Source code for langchain_core.prompts.structured

from typing import (
    Any,
    Callable,
    Dict,
    Iterator,
    List,
    Mapping,
    Optional,
    Sequence,
    Set,
    Type,
    Union,
)

from langchain_core._api.beta_decorator import beta
from langchain_core.language_models.base import BaseLanguageModel
from langchain_core.prompts.chat import (
    BaseChatPromptTemplate,
    BaseMessagePromptTemplate,
    ChatPromptTemplate,
    MessageLikeRepresentation,
    MessagesPlaceholder,
    _convert_to_message,
)
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.runnables.base import (
    Other,
    Runnable,
    RunnableSequence,
    RunnableSerializable,
)


[docs]@beta() class StructuredPrompt(ChatPromptTemplate): """为语言模型设计的结构化提示模板。""" schema_: Union[Dict, Type[BaseModel]] """结构化提示的模式。"""
[docs] @classmethod def get_lc_namespace(cls) -> List[str]: """获取langchain对象的命名空间。 例如,如果类是`langchain.llms.openai.OpenAI`,那么命名空间是["langchain", "llms", "openai"] """ return cls.__module__.split(".")
[docs] @classmethod def from_messages_and_schema( cls, messages: Sequence[MessageLikeRepresentation], schema: Union[Dict, Type[BaseModel]], ) -> ChatPromptTemplate: """从各种消息格式创建一个聊天提示模板。 示例: 从消息模板列表实例化: .. code-block:: python class OutputSchema(BaseModel): name: str value: int template = ChatPromptTemplate.from_messages( [ ("human", "你好,你好吗?"), ("ai", "我很好,谢谢!"), ("human", "听到这个消息很高兴。"), ], OutputSchema, ) 参数: messages: 消息表示的序列。 一条消息可以用以下格式表示: (1) BaseMessagePromptTemplate,(2) BaseMessage,(3) 2元组 (消息类型,模板); 例如,("human", "{user_input}"), (4) 2元组 (消息类,模板),(4) 一个字符串,它是 ("human", 模板) 的简写;例如,"{user_input}" schema: 函数调用的字典表示,或 Pydantic 模型。 返回: 一个结构化的提示模板。 """ _messages = [_convert_to_message(message) for message in messages] # Automatically infer input variables from messages input_vars: Set[str] = set() partial_vars: Dict[str, Any] = {} for _message in _messages: if isinstance(_message, MessagesPlaceholder) and _message.optional: partial_vars[_message.variable_name] = [] elif isinstance( _message, (BaseChatPromptTemplate, BaseMessagePromptTemplate) ): input_vars.update(_message.input_variables) return cls( input_variables=sorted(input_vars), messages=_messages, partial_variables=partial_vars, schema_=schema, )
def __or__( self, other: Union[ Runnable[Any, Other], Callable[[Any], Other], Callable[[Iterator[Any]], Iterator[Other]], Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]], ], ) -> RunnableSerializable[Dict, Other]: if isinstance(other, BaseLanguageModel) or hasattr( other, "with_structured_output" ): return RunnableSequence(self, other.with_structured_output(self.schema_)) else: raise NotImplementedError( "Structured prompts need to be piped to a language model." )
[docs] def pipe( self, *others: Union[Runnable[Any, Other], Callable[[Any], Other]], name: Optional[str] = None, ) -> RunnableSerializable[Dict, Other]: if ( others and isinstance(others[0], BaseLanguageModel) or hasattr(others[0], "with_structured_output") ): return RunnableSequence( self, others[0].with_structured_output(self.schema_), *others[1:], name=name, ) else: raise NotImplementedError( "Structured prompts need to be piped to a language model." )