"""使用OpenAI函数调用API创建链的方法。"""
from typing import (
Any,
Callable,
Dict,
Optional,
Sequence,
Type,
Union,
)
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import (
BaseLLMOutputParser,
)
from langchain_core.output_parsers.openai_functions import (
PydanticAttrOutputFunctionsParser,
)
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.utils.function_calling import (
PYTHON_TO_JSON_TYPES,
convert_to_openai_function,
)
from langchain.chains import LLMChain
from langchain.chains.structured_output.base import (
create_openai_fn_runnable,
create_structured_output_runnable,
get_openai_output_parser,
)
__all__ = [
"get_openai_output_parser",
"create_openai_fn_runnable",
"create_structured_output_runnable", # deprecated
"create_openai_fn_chain", # deprecated
"create_structured_output_chain", # deprecated
"PYTHON_TO_JSON_TYPES", # backwards compatibility
"convert_to_openai_function", # backwards compatibility
]
[docs]@deprecated(since="0.1.1", removal="0.3.0", alternative="create_openai_fn_runnable")
def create_openai_fn_chain(
functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
*,
enforce_single_function_usage: bool = True,
output_key: str = "function",
output_parser: Optional[BaseLLMOutputParser] = None,
**kwargs: Any,
) -> LLMChain: # type: ignore[valid-type]
"""[传统] 创建一个使用OpenAI函数的LLM链。
参数:
functions: 一个序列,可以是字典、pydantic.BaseModels类或Python函数。如果传入字典,则假定它们已经是有效的OpenAI函数。如果只传入一个函数,则将强制模型使用该函数。pydantic.BaseModels和Python函数应该有描述函数功能的文档字符串。为了获得最佳结果,pydantic.BaseModels应该有参数描述,Python函数应该在文档字符串中使用Google Python风格的参数描述。此外,Python函数应该只使用原始类型(str、int、float、bool)或pydantic.BaseModels作为参数。
llm: 要使用的语言模型,假定支持OpenAI函数调用API。
prompt: 传递给模型的BasePromptTemplate。
enforce_single_function_usage: 仅在传入单个函数时使用。如果为True,则将强制模型使用给定的函数。如果为False,则模型将有选择使用给定函数或不使用的选项。
output_key: 在LLMChain.__call__中返回输出时使用的键。
output_parser: 用于解析模型输出的BaseLLMOutputParser。默认情况下将从函数类型推断。如果传入pydantic.BaseModels,则OutputParser将尝试使用这些来解析输出。否则,模型输出将简单地解析为JSON。如果传入多个函数且它们不是pydantic.BaseModels,则链输出将包括返回的函数名称和传递给函数的参数。
返回:
一个LLMChain,当运行时将传入给定的函数到模型中。
示例:
.. code-block:: python
from typing import Optional
from langchain.chains.openai_functions import create_openai_fn_chain
from langchain_community.chat_models import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
class RecordPerson(BaseModel):
\"\"\"记录有关一个人的一些身份信息。\"\"\"
name: str = Field(..., description="人的姓名")
age: int = Field(..., description="人的年龄")
fav_food: Optional[str] = Field(None, description="人喜欢的食物")
class RecordDog(BaseModel):
\"\"\"记录有关一只狗的一些身份信息。\"\"\"
name: str = Field(..., description="狗的名字")
color: str = Field(..., description="狗的颜色")
fav_food: Optional[str] = Field(None, description="狗喜欢的食物")
llm = ChatOpenAI(model="gpt-4", temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
("system", "您是一个记录实体的世界级算法。"),
("human", "调用相关函数记录以下输入中的实体:{input}"),
("human", "提示:确保以正确的格式回答"),
]
)
chain = create_openai_fn_chain([RecordPerson, RecordDog], llm, prompt)
chain.run("Harry是一只胖乎乎的棕色比格犬,喜欢鸡肉")
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
""" # noqa: E501
if not functions:
raise ValueError("Need to pass in at least one function. Received zero.")
openai_functions = [convert_to_openai_function(f) for f in functions]
output_parser = output_parser or get_openai_output_parser(functions)
llm_kwargs: Dict[str, Any] = {
"functions": openai_functions,
}
if len(openai_functions) == 1 and enforce_single_function_usage:
llm_kwargs["function_call"] = {"name": openai_functions[0]["name"]}
llm_chain = LLMChain( # type: ignore[misc]
llm=llm,
prompt=prompt,
output_parser=output_parser,
llm_kwargs=llm_kwargs,
output_key=output_key,
**kwargs,
)
return llm_chain
[docs]@deprecated(
since="0.1.1", removal="0.3.0", alternative="ChatOpenAI.with_structured_output"
)
def create_structured_output_chain(
output_schema: Union[Dict[str, Any], Type[BaseModel]],
llm: BaseLanguageModel,
prompt: BasePromptTemplate,
*,
output_key: str = "function",
output_parser: Optional[BaseLLMOutputParser] = None,
**kwargs: Any,
) -> LLMChain: # type: ignore[valid-type]
"""[遗留] 创建一个LLMChain,使用OpenAI函数来获取结构化输出。
参数:
output_schema:可以是字典或pydantic.BaseModel类。如果传入字典,则假定已经是有效的JsonSchema。
为了获得最佳结果,pydantic.BaseModels应该有描述模式代表什么以及参数描述的文档字符串。
llm:要使用的语言模型,假定支持OpenAI函数调用API。
prompt:传递给模型的BasePromptTemplate。
output_key:在LLMChain.__call__中返回输出时要使用的键。
output_parser:用于解析模型输出的BaseLLMOutputParser。默认情况下将从函数类型中推断出来。如果传入pydantic.BaseModels,则OutputParser将尝试使用这些来解析输出。否则,模型输出将简单地解析为JSON。
返回:
一个LLMChain,将给定的函数传递给模型。
示例:
.. code-block:: python
from typing import Optional
from langchain.chains.openai_functions import create_structured_output_chain
from langchain_community.chat_models import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
class Dog(BaseModel):
\"\"\"关于狗的身份信息。\"\"\"
name: str = Field(..., description="狗的名字")
color: str = Field(..., description="狗的颜色")
fav_food: Optional[str] = Field(None, description="狗喜欢的食物")
llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0)
prompt = ChatPromptTemplate.from_messages(
[
("system", "您是一个世界级的算法,用于提取结构化格式中的信息。"),
("human", "使用给定的格式从以下输入中提取信息:{input}"),
("human", "提示:确保以正确的格式回答"),
]
)
chain = create_structured_output_chain(Dog, llm, prompt)
chain.run("Harry was a chubby brown beagle who loved chicken")
# -> Dog(name="Harry", color="brown", fav_food="chicken")
""" # noqa: E501
if isinstance(output_schema, dict):
function: Any = {
"name": "output_formatter",
"description": (
"Output formatter. Should always be used to format your response to the"
" user."
),
"parameters": output_schema,
}
else:
class _OutputFormatter(BaseModel):
"""输出格式化程序。应始终用于格式化对用户的响应。""" # noqa: E501
output: output_schema # type: ignore
function = _OutputFormatter
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
pydantic_schema=_OutputFormatter, attr_name="output"
)
return create_openai_fn_chain(
[function],
llm,
prompt,
output_key=output_key,
output_parser=output_parser,
**kwargs,
)