"""模块实现了一个使用OpenAI的API功能的代理。"""
from typing import Any, List, Optional, Sequence, Tuple, Type, Union
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackManager, Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import (
BaseMessage,
SystemMessage,
)
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.chat import (
BaseMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain_core.pydantic_v1 import root_validator
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain.agents import BaseSingleActionAgent
from langchain.agents.format_scratchpad.openai_functions import (
format_to_openai_function_messages,
)
from langchain.agents.output_parsers.openai_functions import (
OpenAIFunctionsAgentOutputParser,
)
[docs]@deprecated("0.1.0", alternative="create_openai_functions_agent", removal="0.3.0")
class OpenAIFunctionsAgent(BaseSingleActionAgent):
"""由OpenAI的功能驱动API的代理。
参数:
llm:这应该是ChatOpenAI的一个实例,具体来说是支持使用`functions`的模型。
tools:此代理可以访问的工具。
prompt:此代理的提示,应支持agent_scratchpad作为其中一个变量。要构建此提示的简便方法,请使用`OpenAIFunctionsAgent.create_prompt(...)`。"""
llm: BaseLanguageModel
tools: Sequence[BaseTool]
prompt: BasePromptTemplate
output_parser: Type[
OpenAIFunctionsAgentOutputParser
] = OpenAIFunctionsAgentOutputParser
@root_validator
def validate_prompt(cls, values: dict) -> dict:
prompt: BasePromptTemplate = values["prompt"]
if "agent_scratchpad" not in prompt.input_variables:
raise ValueError(
"`agent_scratchpad` should be one of the variables in the prompt, "
f"got {prompt.input_variables}"
)
return values
@property
def input_keys(self) -> List[str]:
"""获取输入键。这里的输入指用户输入。"""
return ["input"]
@property
def functions(self) -> List[dict]:
return [dict(convert_to_openai_function(t)) for t in self.tools]
[docs] def plan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
with_functions: bool = True,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""给定输入,决定要做什么。
参数:
intermediate_steps:LLM迄今为止所采取的步骤,以及观察结果
**kwargs:用户输入。
返回:
指定要使用的工具的操作。
"""
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
if with_functions:
predicted_message = self.llm.predict_messages(
messages,
functions=self.functions,
callbacks=callbacks,
)
else:
predicted_message = self.llm.predict_messages(
messages,
callbacks=callbacks,
)
agent_decision = self.output_parser._parse_ai_message(predicted_message)
return agent_decision
[docs] async def aplan(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> Union[AgentAction, AgentFinish]:
"""给定输入,决定要做什么。
参数:
intermediate_steps:LLM迄今为止所采取的步骤,
以及观察结果
**kwargs:用户输入。
返回:
指定要使用的工具的操作。
"""
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
selected_inputs = {
k: kwargs[k] for k in self.prompt.input_variables if k != "agent_scratchpad"
}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
predicted_message = await self.llm.apredict_messages(
messages, functions=self.functions, callbacks=callbacks
)
agent_decision = self.output_parser._parse_ai_message(predicted_message)
return agent_decision
[docs] def return_stopped_response(
self,
early_stopping_method: str,
intermediate_steps: List[Tuple[AgentAction, str]],
**kwargs: Any,
) -> AgentFinish:
"""当代理由于达到最大迭代次数而停止时返回响应。"""
if early_stopping_method == "force":
# `force` just returns a constant string
return AgentFinish(
{"output": "Agent stopped due to iteration limit or time limit."}, ""
)
elif early_stopping_method == "generate":
# Generate does one final forward pass
agent_decision = self.plan(
intermediate_steps, with_functions=False, **kwargs
)
if isinstance(agent_decision, AgentFinish):
return agent_decision
else:
raise ValueError(
f"got AgentAction with no functions provided: {agent_decision}"
)
else:
raise ValueError(
"early_stopping_method should be one of `force` or `generate`, "
f"got {early_stopping_method}"
)
[docs] @classmethod
def create_prompt(
cls,
system_message: Optional[SystemMessage] = SystemMessage(
content="You are a helpful AI assistant."
),
extra_prompt_messages: Optional[List[BaseMessagePromptTemplate]] = None,
) -> ChatPromptTemplate:
"""为这个代理创建提示。
参数:
system_message:用作系统消息的消息,将作为提示中的第一条消息。
extra_prompt_messages:将放置在系统消息和新的人类输入之间的提示消息。
返回:
一个传递给该代理的提示模板。
"""
_prompts = extra_prompt_messages or []
messages: List[Union[BaseMessagePromptTemplate, BaseMessage]]
if system_message:
messages = [system_message]
else:
messages = []
messages.extend(
[
*_prompts,
HumanMessagePromptTemplate.from_template("{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
return ChatPromptTemplate(messages=messages) # type: ignore[arg-type, call-arg]
[docs]def create_openai_functions_agent(
llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate
) -> Runnable:
"""创建一个使用OpenAI函数调用的代理。
参数:
llm:用作代理的LLM。应该能够使用OpenAI函数调用,因此可以是支持该功能的OpenAI模型,或者是添加了等效支持的不同模型的包装器。
tools:此代理可以访问的工具。
prompt:要使用的提示。有关更多信息,请参阅下面的提示部分。
返回:
代表代理的可运行序列。它接受与传入的提示相同的所有输入变量。它返回一个AgentAction或AgentFinish。
示例:
创建一个没有记忆的代理
.. code-block:: python
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain import hub
prompt = hub.pull("hwchase17/openai-functions-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_functions_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# 与聊天历史一起使用
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
提示:
代理提示必须具有一个`agent_scratchpad`键,它是一个``MessagesPlaceholder``。中间代理操作和工具输出消息将传递到这里。
这里是一个示例:
.. code-block:: python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
MessagesPlaceholder("chat_history", optional=True),
("human", "{input}"),
MessagesPlaceholder("agent_scratchpad"),
]
)
"""
if "agent_scratchpad" not in (
prompt.input_variables + list(prompt.partial_variables)
):
raise ValueError(
"Prompt must have input variable `agent_scratchpad`, but wasn't found. "
f"Found {prompt.input_variables} instead."
)
llm_with_tools = llm.bind(functions=[convert_to_openai_function(t) for t in tools])
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_function_messages(
x["intermediate_steps"]
)
)
| prompt
| llm_with_tools
| OpenAIFunctionsAgentOutputParser()
)
return agent