from langchain_community.chat_models import ChatOpenAI
from langchain.agents import create_agent
from dotenv import load_dotenv
import os
load_dotenv()
basic_model = ChatOpenAI(model="deepseek-chat",api_key=os.getenv("DEEPSEEK_API_KEY"))
agent = create_agent(
model=basic_model,
tools=[execute_sql],
system_prompt=SYSTEM_PROMPT,
context_schema=RuntimeContext,
)
运用deepseek跑langchain1.0
question = "Which table has the largest number of entries?"
for step in agent.stream(
{"messages": question},
context=RuntimeContext(db=db),
stream_mode="values",
):
step["messages"][-1].pretty_print()
以上代码运行出错:
================================[1m Human Message [0m=================================
Which table has the largest number of entries?
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
Cell In[12], line 3
1 question = "Which table has the largest number of entries?"
----> 3 for step in agent.stream(
4 {"messages": question},
5 context=RuntimeContext(db=db),
6 stream_mode="values",
7 ):
8 step["messages"][-1].pretty_print()
File d:\AIConda\env\004Langchain-v1-notebook\Lib\site-packages\langgraph\pregel\main.py:2633, in Pregel.stream(self, input, config, context, stream_mode, print_mode, output_keys, interrupt_before, interrupt_after, durability, subgraphs, debug, **kwargs)
2631 for task in loop.match_cached_writes():
2632 loop.output_writes(task.id, task.writes, cached=True)
-> 2633 for _ in runner.tick(
2634 [t for t in loop.tasks.values() if not t.writes],
2635 timeout=self.step_timeout,
2636 get_waiter=get_waiter,
2637 schedule_task=loop.accept_push,
2638 ):
2639 # emit output
2640 yield from _output(
2641 stream_mode, print_mode, subgraphs, stream.get, queue.Empty
2642 )
2643 loop.after_tick()
File d:\AIConda\env\004Langchain-v1-notebook\Lib\site-packages\langgraph\pregel\_runner.py:167, in PregelRunner.tick(self, tasks, reraise, timeout, retry_policy, get_waiter, schedule_task)
165 t = tasks[0]
166 try:
--> 167 run_with_retry(
168 t,
169 retry_policy,
170 configurable={
171 CONFIG_KEY_CALL: partial(
172 _call,
173 weakref.ref(t),
174 retry_policy=retry_policy,
175 futures=weakref.ref(futures),
176 schedule_task=schedule_task,
177 submit=self.submit,
178 ),
179 },
180 )
181 self.commit(t, None)
182 except Exception as exc:
File d:\AIConda\env\004Langchain-v1-notebook\Lib\site-packages\langgraph\pregel\_retry.py:42, in run_with_retry(task, retry_policy, configurable)
40 task.writes.clear()
41 # run the task
---> 42 return task.proc.invoke(task.input, config)
43 except ParentCommand as exc:
44 ns: str = config[CONF][CONFIG_KEY_CHECKPOINT_NS]
File d:\AIConda\env\004Langchain-v1-notebook\Lib\site-packages\langgraph\_internal\_runnable.py:656, in RunnableSeq.invoke(self, input, config, **kwargs)
654 # run in context
655 with set_config_context(config, run) as context:
--> 656 input = context.run(step.invoke, input, config, **kwargs)
657 else:
658 input = step.invoke(input, config)
File d:\AIConda\env\004Langchain-v1-notebook\Lib\site-packages\langgraph\_internal\_runnable.py:400, in RunnableCallable.invoke(self, input, config, **kwargs)
398 run_manager.on_chain_end(ret)
399 else:
--> 400 ret = self.func(*args, **kwargs)
401 if self.recurse and isinstance(ret, Runnable):
402 return ret.invoke(input, config)
File d:\AIConda\env\004Langchain-v1-notebook\Lib\site-packages\langchain\agents\factory.py:1095, in create_agent.<locals>.model_node(state, runtime)
1082 request = ModelRequest(
1083 model=model,
1084 tools=default_tools,
(...) 1090 runtime=runtime,
1091 )
1093 if wrap_model_call_handler is None:
1094 # No handlers - execute directly
-> 1095 response = _execute_model_sync(request)
1096 else:
1097 # Call composed handler with base handler
1098 response = wrap_model_call_handler(request, _execute_model_sync)
File d:\AIConda\env\004Langchain-v1-notebook\Lib\site-packages\langchain\agents\factory.py:1063, in create_agent.<locals>._execute_model_sync(request)
1057 """Execute model and return response.
1058
1059 This is the core model execution logic wrapped by `wrap_model_call` handlers.
1060 Raises any exceptions that occur during model invocation.
1061 """
1062 # Get the bound model (with auto-detection if needed)
-> 1063 model_, effective_response_format = _get_bound_model(request)
1064 messages = request.messages
1065 if request.system_prompt:
File d:\AIConda\env\004Langchain-v1-notebook\Lib\site-packages\langchain\agents\factory.py:1049, in create_agent.<locals>._get_bound_model(request)
1046 # No structured output - standard model binding
1047 if final_tools:
1048 return (
-> 1049 request.model.bind_tools(
1050 final_tools, tool_choice=request.tool_choice, **request.model_settings
1051 ),
1052 None,
1053 )
1054 return request.model.bind(**request.model_settings), None
File d:\AIConda\env\004Langchain-v1-notebook\Lib\site-packages\langchain_core\language_models\chat_models.py:1507, in BaseChatModel.bind_tools(self, tools, tool_choice, **kwargs)
1488 def bind_tools(
1489 self,
1490 tools: Sequence[
(...) 1495 **kwargs: Any,
1496 ) -> Runnable[LanguageModelInput, AIMessage]:
1497 """Bind tools to the model.
1498
1499 Args:
(...) 1505
1506 """
-> 1507 raise NotImplementedError
NotImplementedError:
During task with name 'model' and id 'd78c1994-29b6-07ae-4ea4-9764027ff940'