Source code for langchain.agents.conversational_chat.output_parser

from __future__ import annotations

from typing import Union

from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.utils.json import parse_json_markdown

from langchain.agents import AgentOutputParser
from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS


# Define a class that parses output for conversational agents
[docs]class ConvoOutputParser(AgentOutputParser): """对话代理的输出解析器。""" format_instructions: str = FORMAT_INSTRUCTIONS """默认格式指令"""
[docs] def get_format_instructions(self) -> str: """返回给定输出解析器的格式化指令。""" return self.format_instructions
[docs] def parse(self, text: str) -> Union[AgentAction, AgentFinish]: """尝试将给定文本解析为AgentAction或AgentFinish。 如果解析失败,则引发OutputParserException。 """ try: # Attempt to parse the text into a structured format (assumed to be JSON # stored as markdown) response = parse_json_markdown(text) # If the response contains an 'action' and 'action_input' if "action" in response and "action_input" in response: action, action_input = response["action"], response["action_input"] # If the action indicates a final answer, return an AgentFinish if action == "Final Answer": return AgentFinish({"output": action_input}, text) else: # Otherwise, return an AgentAction with the specified action and # input return AgentAction(action, action_input, text) else: # If the necessary keys aren't present in the response, raise an # exception raise OutputParserException( f"Missing 'action' or 'action_input' in LLM output: {text}" ) except Exception as e: # If any other exception is raised during parsing, also raise an # OutputParserException raise OutputParserException(f"Could not parse LLM output: {text}") from e
@property def _type(self) -> str: return "conversational_chat"