Source code for langchain.agents.structured_chat.base

import re
from typing import Any, List, Optional, Sequence, Tuple, Union

from langchain_core._api import deprecated
from langchain_core.agents import AgentAction
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.chat import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    SystemMessagePromptTemplate,
)
from langchain_core.pydantic_v1 import Field
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool

from langchain.agents.agent import Agent, AgentOutputParser
from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import JSONAgentOutputParser
from langchain.agents.structured_chat.output_parser import (
    StructuredChatOutputParserWithRetries,
)
from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX
from langchain.chains.llm import LLMChain
from langchain.tools.render import ToolsRenderer, render_text_description_and_args

HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}"


[docs]@deprecated("0.1.0", alternative="create_structured_chat_agent", removal="0.3.0") class StructuredChatAgent(Agent): """结构化聊天机器人。""" output_parser: AgentOutputParser = Field( default_factory=StructuredChatOutputParserWithRetries ) """代理的输出解析器。""" @property def observation_prefix(self) -> str: """要附加到观测值前面的前缀。""" return "Observation: " @property def llm_prefix(self) -> str: """在llm调用前附加的前缀。""" return "Thought:" def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> str: agent_scratchpad = super()._construct_scratchpad(intermediate_steps) if not isinstance(agent_scratchpad, str): raise ValueError("agent_scratchpad should be of type string.") if agent_scratchpad: return ( f"This was your previous work " f"(but I haven't seen any of it! I only see what " f"you return as final answer):\n{agent_scratchpad}" ) else: return agent_scratchpad @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: pass @classmethod def _get_default_output_parser( cls, llm: Optional[BaseLanguageModel] = None, **kwargs: Any ) -> AgentOutputParser: return StructuredChatOutputParserWithRetries.from_llm(llm=llm) @property def _stop(self) -> List[str]: return ["Observation:"]
[docs] @classmethod def create_prompt( cls, tools: Sequence[BaseTool], prefix: str = PREFIX, suffix: str = SUFFIX, human_message_template: str = HUMAN_MESSAGE_TEMPLATE, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, memory_prompts: Optional[List[BasePromptTemplate]] = None, ) -> BasePromptTemplate: tool_strings = [] for tool in tools: args_schema = re.sub("}", "}}", re.sub("{", "{{", str(tool.args))) tool_strings.append(f"{tool.name}: {tool.description}, args: {args_schema}") formatted_tools = "\n".join(tool_strings) tool_names = ", ".join([tool.name for tool in tools]) format_instructions = format_instructions.format(tool_names=tool_names) template = "\n\n".join([prefix, formatted_tools, format_instructions, suffix]) if input_variables is None: input_variables = ["input", "agent_scratchpad"] _memory_prompts = memory_prompts or [] messages = [ SystemMessagePromptTemplate.from_template(template), *_memory_prompts, HumanMessagePromptTemplate.from_template(human_message_template), ] return ChatPromptTemplate(input_variables=input_variables, messages=messages) # type: ignore[arg-type]
[docs] @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: Optional[BaseCallbackManager] = None, output_parser: Optional[AgentOutputParser] = None, prefix: str = PREFIX, suffix: str = SUFFIX, human_message_template: str = HUMAN_MESSAGE_TEMPLATE, format_instructions: str = FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]] = None, memory_prompts: Optional[List[BasePromptTemplate]] = None, **kwargs: Any, ) -> Agent: """从LLM和工具构建一个代理。""" cls._validate_tools(tools) prompt = cls.create_prompt( tools, prefix=prefix, suffix=suffix, human_message_template=human_message_template, format_instructions=format_instructions, input_variables=input_variables, memory_prompts=memory_prompts, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser(llm=llm) return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, )
@property def _agent_type(self) -> str: raise ValueError
[docs]def create_structured_chat_agent( llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate, tools_renderer: ToolsRenderer = render_text_description_and_args, *, stop_sequence: Union[bool, List[str]] = True, ) -> Runnable: """创建一个旨在支持具有多个输入的工具的代理。 参数: llm:用作代理的LLM。 tools:此代理可以访问的工具。 prompt:要使用的提示。有关更多信息,请参见下面的提示部分。 stop_sequence:bool或str列表。 如果为True,则添加一个停止令牌“Observation:”以避免产生幻觉。 如果为False,则不添加停止令牌。 如果为str列表,则使用提供的列表作为停止令牌。 默认为True。如果您使用的LLM不支持停止序列,则可以将其设置为False。 tools_renderer:控制如何将工具转换为字符串并传递给LLM。默认为`render_text_description`。 返回: 代表代理的可运行序列。它接受与传递的提示相同的所有输入变量。它返回AgentAction或AgentFinish。 示例: .. code-block:: python from langchain import hub from langchain_community.chat_models import ChatOpenAI from langchain.agents import AgentExecutor, create_structured_chat_agent prompt = hub.pull("hwchase17/structured-chat-agent") model = ChatOpenAI() tools = ... agent = create_structured_chat_agent(model, tools, prompt) agent_executor = AgentExecutor(agent=agent, tools=tools) agent_executor.invoke({"input": "hi"}) # 与聊天历史一起使用 from langchain_core.messages import AIMessage, HumanMessage agent_executor.invoke( { "input": "what's my name?", "chat_history": [ HumanMessage(content="hi! my name is bob"), AIMessage(content="Hello Bob! How can I assist you today?"), ], } ) 提示: 提示必须具有输入键: * `tools`:包含每个工具的描述和参数。 * `tool_names`:包含所有工具名称。 * `agent_scratchpad`:包含先前代理操作和工具输出的字符串。 这是一个示例: .. code-block:: python from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder system = '''尽可能帮助和准确地回答人类。您可以访问以下工具: {tools} 使用json blob通过提供一个操作键(工具名称)和一个action_input键(工具输入)来指定一个工具。 有效的“action”值:“Final Answer”或{tool_names} 每个$JSON_BLOB只提供一个操作,如下所示: ``` {{ "action": $TOOL_NAME, "action_input": $INPUT }} ``` 遵循此格式: 问题:输入问题以回答 思考:考虑先前和随后的步骤 操作: ``` $JSON_BLOB ``` 观察:操作结果 ...(重复思考/操作/观察N次) 思考:我知道如何回应 操作: ``` {{ "action": "Final Answer", "action_input": "对人类的最终回应" }} 开始!提醒始终使用有效的单个操作的json blob进行回应。必要时使用工具。如果合适,直接回应。格式为操作:```$JSON_BLOB```然后观察''' human = '''{input} {agent_scratchpad} (无论如何都要在JSON blob中回复) ''' prompt = ChatPromptTemplate.from_messages( [ ("system", system), MessagesPlaceholder("chat_history", optional=True), ("human", human), ] ) """ # noqa: E501 missing_vars = {"tools", "tool_names", "agent_scratchpad"}.difference( prompt.input_variables + list(prompt.partial_variables) ) if missing_vars: raise ValueError(f"Prompt missing required variables: {missing_vars}") prompt = prompt.partial( tools=tools_renderer(list(tools)), tool_names=", ".join([t.name for t in tools]), ) if stop_sequence: stop = ["\nObservation"] if stop_sequence is True else stop_sequence llm_with_stop = llm.bind(stop=stop) else: llm_with_stop = llm agent = ( RunnablePassthrough.assign( agent_scratchpad=lambda x: format_log_to_str(x["intermediate_steps"]), ) | prompt | llm_with_stop | JSONAgentOutputParser() ) return agent