Source code for langchain_core.messages.tool

import json
from typing import Any, Dict, List, Literal, Optional, Tuple

from typing_extensions import TypedDict

from langchain_core.messages.base import (
    BaseMessage,
    BaseMessageChunk,
    merge_content,
)
from langchain_core.utils._merge import merge_dicts


[docs]class ToolMessage(BaseMessage): """将执行工具的结果传递回模型的消息。""" tool_call_id: str """工具调用,这条消息是在回应该调用。""" # TODO: Add is_error param? # is_error: bool = False # """Whether the tool errored.""" type: Literal["tool"] = "tool"
[docs] @classmethod def get_lc_namespace(cls) -> List[str]: """获取langchain对象的命名空间。""" return ["langchain", "schema", "messages"]
ToolMessage.update_forward_refs()
[docs]class ToolMessageChunk(ToolMessage, BaseMessageChunk): """工具消息块。""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the # non-chunk variant. type: Literal["ToolMessageChunk"] = "ToolMessageChunk" # type: ignore[assignment]
[docs] @classmethod def get_lc_namespace(cls) -> List[str]: """获取langchain对象的命名空间。""" return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore if isinstance(other, ToolMessageChunk): if self.tool_call_id != other.tool_call_id: raise ValueError( "Cannot concatenate ToolMessageChunks with different names." ) return self.__class__( tool_call_id=self.tool_call_id, content=merge_content(self.content, other.content), additional_kwargs=merge_dicts( self.additional_kwargs, other.additional_kwargs ), response_metadata=merge_dicts( self.response_metadata, other.response_metadata ), id=self.id, ) return super().__add__(other)
[docs]class ToolCall(TypedDict): """代表调用工具的请求。 属性: name: (str) 要调用的工具的名称 args: (dict) 调用工具时的参数 id: (str) 如果提供,与工具调用相关联的标识符 """ name: str args: Dict[str, Any] id: Optional[str]
[docs]class ToolCallChunk(TypedDict): """工具调用的一部分(例如,作为流的一部分)。 当合并ToolCallChunks(例如,通过AIMessageChunk.__add__),所有字符串属性将被连接。仅当它们的`index`的值相等且不为None时,才会合并块。 示例: .. code-block:: python left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] ( AIMessageChunk(content="", tool_call_chunks=left_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks) ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] 属性: name:(str)如果提供,被调用的工具名称的子字符串 args:(str)如果提供,工具调用参数的JSON子字符串 id:(str)如果提供,工具调用的标识符的子字符串 index:(int)如果提供,工具调用在序列中的索引 """ name: Optional[str] args: Optional[str] id: Optional[str] index: Optional[int]
[docs]class InvalidToolCall(TypedDict): """允许LLM产生的错误。 在这里,我们添加一个`error`键来显示生成过程中出现的错误(例如,无效的JSON参数)。 """ name: Optional[str] args: Optional[str] id: Optional[str] error: Optional[str]
[docs]def default_tool_parser( raw_tool_calls: List[dict], ) -> Tuple[List[ToolCall], List[InvalidToolCall]]: """尽力解析工具。""" tool_calls = [] invalid_tool_calls = [] for tool_call in raw_tool_calls: if "function" not in tool_call: continue else: function_name = tool_call["function"]["name"] try: function_args = json.loads(tool_call["function"]["arguments"]) parsed = ToolCall( name=function_name or "", args=function_args or {}, id=tool_call.get("id"), ) tool_calls.append(parsed) except json.JSONDecodeError: invalid_tool_calls.append( InvalidToolCall( name=function_name, args=tool_call["function"]["arguments"], id=tool_call.get("id"), error=None, ) ) return tool_calls, invalid_tool_calls
[docs]def default_tool_chunk_parser(raw_tool_calls: List[dict]) -> List[ToolCallChunk]: """最佳努力解析工具块。""" tool_call_chunks = [] for tool_call in raw_tool_calls: if "function" not in tool_call: function_args = None function_name = None else: function_args = tool_call["function"]["arguments"] function_name = tool_call["function"]["name"] parsed = ToolCallChunk( name=function_name, args=function_args, id=tool_call.get("id"), index=tool_call.get("index"), ) tool_call_chunks.append(parsed) return tool_call_chunks