Source code for langchain.chains.conversation.base

"""链条继续对话并调用LLM。"""
from typing import Dict, List

from langchain_core.memory import BaseMemory
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import Extra, Field, root_validator

from langchain.chains.conversation.prompt import PROMPT
from langchain.chains.llm import LLMChain
from langchain.memory.buffer import ConversationBufferMemory


[docs]class ConversationChain(LLMChain): """链条用于进行对话并从内存中加载上下文。 示例: .. code-block:: python from langchain.chains import ConversationChain from langchain_community.llms import OpenAI conversation = ConversationChain(llm=OpenAI())""" memory: BaseMemory = Field(default_factory=ConversationBufferMemory) """默认内存存储。""" prompt: BasePromptTemplate = PROMPT """默认的对话提示。""" input_key: str = "input" #: :meta private: output_key: str = "response" #: :meta private: class Config: """此pydantic对象的配置。""" extra = Extra.forbid arbitrary_types_allowed = True
[docs] @classmethod def is_lc_serializable(cls) -> bool: return False
@property def input_keys(self) -> List[str]: """由于某些提示变量来自历史记录,因此使用这个。""" return [self.input_key] @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """验证提示输入变量是否一致。""" memory_keys = values["memory"].memory_variables input_key = values["input_key"] if input_key in memory_keys: raise ValueError( f"The input key {input_key} was also found in the memory keys " f"({memory_keys}) - please provide keys that don't overlap." ) prompt_variables = values["prompt"].input_variables expected_keys = memory_keys + [input_key] if set(expected_keys) != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but got {memory_keys} as inputs from " f"memory, and {input_key} as the normal input key." ) return values