Source code for langchain.agents.output_parsers.json
from __future__ import annotations
import logging
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.json import parse_json_markdown
from langchain.agents.agent import AgentOutputParser
logger = logging.getLogger(__name__)
[docs]class JSONAgentOutputParser(AgentOutputParser):
"""解析工具调用和最终答案的JSON格式。
期望输出是以下两种格式之一。
如果输出信号表明应采取某种操作,
应该是以下格式。这将导致返回一个AgentAction。
```
{
"action": "search",
"action_input": "2+2"
}
```
如果输出信号表明应给出最终答案,
应该是以下格式。这将导致返回一个AgentFinish。
```
{
"action": "Final Answer",
"action_input": "4"
}
```"""
[docs] def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
response = parse_json_markdown(text)
if isinstance(response, list):
# gpt turbo frequently ignores the directive to emit a single action
logger.warning("Got multiple action responses: %s", response)
response = response[0]
if response["action"] == "Final Answer":
return AgentFinish({"output": response["action_input"]}, text)
else:
return AgentAction(
response["action"], response.get("action_input", {}), text
)
except Exception as e:
raise OutputParserException(f"Could not parse LLM output: {text}") from e
@property
def _type(self) -> str:
return "json-agent"