Source code for langchain.agents.agent_toolkits.conversational_retrieval.openai_functions
from typing import Any, List, Optional # noqa: E501
from langchain_core.language_models import BaseLanguageModel
from langchain_core.memory import BaseMemory
from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.tools import BaseTool
from langchain.agents.agent import AgentExecutor
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.memory.token_buffer import ConversationTokenBufferMemory
def _get_default_system_message() -> SystemMessage:
return SystemMessage(
content=(
"Do your best to answer the questions. "
"Feel free to use any tools available to look up "
"relevant information, only if necessary"
)
)
[docs]def create_conversational_retrieval_agent(
llm: BaseLanguageModel,
tools: List[BaseTool],
remember_intermediate_steps: bool = True,
memory_key: str = "chat_history",
system_message: Optional[SystemMessage] = None,
verbose: bool = False,
max_token_limit: int = 2000,
**kwargs: Any,
) -> AgentExecutor:
"""用于创建对话式检索代理的便利方法。
参数:
llm: 要使用的语言模型,应该是ChatOpenAI
tools: 代理可以访问的工具列表
remember_intermediate_steps: 代理是否应记住中间步骤。中间步骤是指来自先前问题的先前操作/观察对。记住这些的好处是,如果其中有相关信息,代理可以使用它来回答后续问题。缺点是它将占用更多的令牌。
memory_key: 提示中记忆键的名称。
system_message: 要使用的系统消息。默认情况下,将使用基本消息。
verbose: 最终的AgentExecutor是否应该冗长,默认为False。
max_token_limit: 在内存中保留的最大令牌数。默认为2000。
返回:
适当初始化的代理执行器
"""
if remember_intermediate_steps:
memory: BaseMemory = AgentTokenBufferMemory(
memory_key=memory_key, llm=llm, max_token_limit=max_token_limit
)
else:
memory = ConversationTokenBufferMemory(
memory_key=memory_key,
return_messages=True,
output_key="output",
llm=llm,
max_token_limit=max_token_limit,
)
_system_message = system_message or _get_default_system_message()
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=_system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
return AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=verbose,
return_intermediate_steps=remember_intermediate_steps,
**kwargs,
)