import logging
from operator import itemgetter
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Type,
Union,
cast,
)
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
PydanticToolsParser,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_core.utils.function_calling import convert_to_openai_tool
logger = logging.getLogger(__name__)
[docs]def convert_message_to_dict(message: BaseMessage) -> dict:
"""将消息转换为可以传递给API的字典。"""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage:
content = _dict.get("result", "") or ""
additional_kwargs: Mapping[str, Any] = {}
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
if "thoughts" in additional_kwargs["function_call"]:
# align to api sample, which affects the llm function_call output
additional_kwargs["function_call"].pop("thoughts")
additional_kwargs = {**_dict.get("body", {}), **additional_kwargs}
return AIMessage(
content=content,
additional_kwargs=dict(
finish_reason=additional_kwargs.get("finish_reason", ""),
request_id=additional_kwargs["id"],
object=additional_kwargs.get("object", ""),
search_info=additional_kwargs.get("search_info", []),
function_call=additional_kwargs.get("function_call", {}),
tool_calls=[
{
"type": "function",
"function": additional_kwargs.get("function_call", {}),
}
],
),
)
[docs]class QianfanChatEndpoint(BaseChatModel):
"""百度千帆聊天模型。
要使用,您应该安装``qianfan`` python包,并且设置环境变量``qianfan_ak``和``qianfan_sk``为您的API密钥和Secret Key。
ak, sk是必需的参数
您可以从https://cloud.baidu.com/product/wenxinworkshop获取
示例:
.. code-block:: python
from langchain_community.chat_models import QianfanChatEndpoint
qianfan_chat = QianfanChatEndpoint(model="ERNIE-Bot",
endpoint="your_endpoint", qianfan_ak="your_ak", qianfan_sk="your_sk")
"""
init_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""初始化qianfan客户端的kwargs,例如`query_per_second`,它与qianfan资源对象相关联,用于限制QPS。"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""使用`do`时调用模型的额外参数。"""
client: Any
qianfan_ak: Optional[SecretStr] = None
qianfan_sk: Optional[SecretStr] = None
streaming: Optional[bool] = False
"""是否要流式传输结果。"""
request_timeout: Optional[int] = Field(60, alias="timeout")
"""聊天http请求的请求超时"""
top_p: Optional[float] = 0.8
temperature: Optional[float] = 0.95
penalty_score: Optional[float] = 1
"""模型参数,仅支持ERNIE-Bot和ERNIE-Bot-turbo。
在其他模型的情况下,传递这些参数不会影响结果。"""
model: str = "ERNIE-Bot-turbo"
"""模型名称。
您可以从https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu 获取
预设模型映射到一个端点。
如果设置了`endpoint`,则`model`将被忽略。
默认为ERNIE-Bot-turbo。"""
endpoint: Optional[str] = None
"""Qianfan LLM的端点,如果使用自定义模型则需要。"""
class Config:
"""此pydantic对象的配置。"""
allow_population_by_field_name = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
values["qianfan_ak"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"qianfan_ak",
"QIANFAN_AK",
default="",
)
)
values["qianfan_sk"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"qianfan_sk",
"QIANFAN_SK",
default="",
)
)
params = {
**values.get("init_kwargs", {}),
"model": values["model"],
"stream": values["streaming"],
}
if values["qianfan_ak"].get_secret_value() != "":
params["ak"] = values["qianfan_ak"].get_secret_value()
if values["qianfan_sk"].get_secret_value() != "":
params["sk"] = values["qianfan_sk"].get_secret_value()
if values["endpoint"] is not None and values["endpoint"] != "":
params["endpoint"] = values["endpoint"]
try:
import qianfan
values["client"] = qianfan.ChatCompletion(**params)
except ImportError:
raise ImportError(
"qianfan package not found, please install it with "
"`pip install qianfan`"
)
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
**{"endpoint": self.endpoint, "model": self.model},
**super()._identifying_params,
}
@property
def _llm_type(self) -> str:
"""chat_model的返回类型。"""
return "baidu-qianfan-chat"
@property
def _default_params(self) -> Dict[str, Any]:
"""获取调用Qianfan API 的默认参数。"""
normal_params = {
"model": self.model,
"endpoint": self.endpoint,
"stream": self.streaming,
"request_timeout": self.request_timeout,
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_score": self.penalty_score,
}
return {**normal_params, **self.model_kwargs}
def _convert_prompt_msg_params(
self,
messages: List[BaseMessage],
**kwargs: Any,
) -> Dict[str, Any]:
"""将消息列表转换为包含消息内容和默认参数的字典。
参数:
messages (List[BaseMessage]): 消息列表。
**kwargs (Any): 可选参数,用于向结果字典添加额外参数。
返回:
Dict[str, Any]: 包含消息内容和默认参数的字典。
"""
messages_dict: Dict[str, Any] = {
"messages": [
convert_message_to_dict(m)
for m in messages
if not isinstance(m, SystemMessage)
]
}
for i in [i for i, m in enumerate(messages) if isinstance(m, SystemMessage)]:
if "system" not in messages_dict:
messages_dict["system"] = ""
messages_dict["system"] += cast(str, messages[i].content) + "\n"
return {
**messages_dict,
**self._default_params,
**kwargs,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""调用一个前翻模型端点,为每一代生成一个提示。
参数:
messages: 传递给模型的消息。
stop: 在生成时可选的停用词列表。
返回:
模型生成的字符串。
示例:
.. code-block:: python
response = qianfan_model.invoke("Tell me a joke.")
"""
if self.streaming:
completion = ""
token_usage = {}
chat_generation_info: Dict = {}
for chunk in self._stream(messages, stop, run_manager, **kwargs):
chat_generation_info = (
chunk.generation_info
if chunk.generation_info is not None
else chat_generation_info
)
completion += chunk.text
lc_msg = AIMessage(content=completion, additional_kwargs={})
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
return ChatResult(
generations=[gen],
llm_output={
"token_usage": chat_generation_info.get("usage", {}),
"model_name": self.model,
},
)
params = self._convert_prompt_msg_params(messages, **kwargs)
params["stop"] = stop
response_payload = self.client.do(**params)
lc_msg = _convert_dict_to_message(response_payload)
gen = ChatGeneration(
message=lc_msg,
generation_info={
"finish_reason": "stop",
**response_payload.get("body", {}),
},
)
token_usage = response_payload.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=[gen], llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
completion = ""
token_usage = {}
chat_generation_info: Dict = {}
async for chunk in self._astream(messages, stop, run_manager, **kwargs):
chat_generation_info = (
chunk.generation_info
if chunk.generation_info is not None
else chat_generation_info
)
completion += chunk.text
lc_msg = AIMessage(content=completion, additional_kwargs={})
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
return ChatResult(
generations=[gen],
llm_output={
"token_usage": chat_generation_info.get("usage", {}),
"model_name": self.model,
},
)
params = self._convert_prompt_msg_params(messages, **kwargs)
params["stop"] = stop
response_payload = await self.client.ado(**params)
lc_msg = _convert_dict_to_message(response_payload)
generations = []
gen = ChatGeneration(
message=lc_msg,
generation_info={
"finish_reason": "stop",
**response_payload.get("body", {}),
},
)
generations.append(gen)
token_usage = response_payload.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
params["stop"] = stop
params["stream"] = True
for res in self.client.do(**params):
if res:
msg = _convert_dict_to_message(res)
additional_kwargs = msg.additional_kwargs.get("function_call", {})
chunk = ChatGenerationChunk(
text=res["result"],
message=AIMessageChunk( # type: ignore[call-arg]
content=msg.content,
role="assistant",
additional_kwargs=additional_kwargs,
),
generation_info=msg.additional_kwargs,
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
params["stop"] = stop
params["stream"] = True
async for res in await self.client.ado(**params):
if res:
msg = _convert_dict_to_message(res)
additional_kwargs = msg.additional_kwargs.get("function_call", {})
chunk = ChatGenerationChunk(
text=res["result"],
message=AIMessageChunk( # type: ignore[call-arg]
content=msg.content,
role="assistant",
additional_kwargs=additional_kwargs,
),
generation_info=msg.additional_kwargs,
)
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
[docs] def with_structured_output(
self,
schema: Union[Dict, Type[BaseModel]],
*,
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
"""模型包装器,返回按照给定模式匹配的输出。
参数:
schema: 输出模式,可以是字典或Pydantic类。如果是Pydantic类,则模型输出将是该类的对象。如果是字典,则模型输出将是一个字典。对于Pydantic类,返回的属性将被验证,而对于字典则不会。如果`method`为"function_calling"且`schema`为字典,则该字典必须符合OpenAI的函数调用规范。
include_raw: 如果为False,则仅返回解析后的结构化输出。如果在模型输出解析过程中发生错误,将会被引发。如果为True,则原始模型响应(BaseMessage)和解析后的模型响应都将被返回。如果在输出解析过程中发生错误,也将被捕获并返回。最终输出始终是一个带有键"raw"、"parsed"和"parsing_error"的字典。
返回:
一个可运行的对象,接受任何ChatModel输入并返回输出:
如果include_raw为True,则返回一个带有键的字典:
raw: BaseMessage
parsed: Optional[_DictOrPydantic]
parsing_error: Optional[BaseException]
如果include_raw为False,则只返回_DictOrPydantic,其中_DictOrPydantic取决于模式:
如果schema是Pydantic类,则_DictOrPydantic为Pydantic类。
如果schema是字典,则_DictOrPydantic为字典。
示例:函数调用,Pydantic模式(method="function_calling",include_raw=False):
.. code-block:: python
from langchain_mistralai import QianfanChatEndpoint
from langchain_core.pydantic_v1 import BaseModel
class AnswerWithJustification(BaseModel):
'''用户问题的答案以及答案的理由。'''
answer: str
justification: str
llm = QianfanChatEndpoint(endpoint="ernie-3.5-8k-0329")
structured_llm = llm.with_structured_output(AnswerWithJustification)
structured_llm.invoke("一磅砖头和一磅羽毛哪个重")
# -> AnswerWithJustification(
# answer='它们的重量相同',
# justification='一磅砖头和一磅羽毛都重一磅。重量相同,但物体的体积或密度可能不同。'
# )
示例:函数调用,Pydantic模式(method="function_calling",include_raw=True):
.. code-block:: python
from langchain_mistralai import QianfanChatEndpoint
from langchain_core.pydantic_v1 import BaseModel
class AnswerWithJustification(BaseModel):
'''用户问题的答案以及答案的理由。'''
answer: str
justification: str
llm = QianfanChatEndpoint(endpoint="ernie-3.5-8k-0329")
structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True)
structured_llm.invoke("一磅砖头和一磅羽毛哪个重")
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"它们的重量相同。","justification":"一磅砖头和一磅羽毛都重一磅。重量相同,但物体的体积或密度可能不同。"}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='它们的重量相同。', justification='一磅砖头和一磅羽毛都重一磅。重量相同,但物体的体积或密度可能不同。'),
# 'parsing_error': None
# }
示例:函数调用,字典模式(method="function_calling",include_raw=False):
.. code-block:: python
from langchain_mistralai import QianfanChatEndpoint
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.utils.function_calling import convert_to_openai_tool
class AnswerWithJustification(BaseModel):
'''用户问题的答案以及答案的理由。'''
answer: str
justification: str
dict_schema = convert_to_openai_tool(AnswerWithJustification)
llm = QianfanChatEndpoint(endpoint="ernie-3.5-8k-0329")
structured_llm = llm.with_structured_output(dict_schema)
structured_llm.invoke("一磅砖头和一磅羽毛哪个重")
# -> {
# 'answer': '它们的重量相同',
# 'justification': '一磅砖头和一磅羽毛都重一磅。重量相同,但两种物质的体积和密度不同。'
# }
""" # noqa: E501
if kwargs:
raise ValueError(f"Received unsupported arguments {kwargs}")
is_pydantic_schema = isinstance(schema, type) and issubclass(schema, BaseModel)
llm = self.bind_tools([schema])
if is_pydantic_schema:
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema], # type: ignore[list-item]
first_tool_only=True, # type: ignore[list-item]
)
else:
key_name = convert_to_openai_tool(schema)["function"]["name"]
output_parser = JsonOutputKeyToolsParser(
key_name=key_name, first_tool_only=True
)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
else:
return llm | output_parser