Source code for langchain.chains.history_aware_retriever

from __future__ import annotations

from langchain_core.language_models import LanguageModelLike
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.retrievers import RetrieverLike, RetrieverOutputLike
from langchain_core.runnables import RunnableBranch


[docs]def create_history_aware_retriever( llm: LanguageModelLike, retriever: RetrieverLike, prompt: BasePromptTemplate, ) -> RetrieverOutputLike: """创建一个链条,接收对话历史并返回文档。 如果没有`chat_history`,则`input`将直接传递给检索器。如果存在`chat_history`,则将使用提示和LLM生成搜索查询。然后将该搜索查询传递给检索器。 参数: llm: 用于根据对话历史生成搜索词的语言模型 retriever: 接受字符串输入并输出文档列表的RetrieverLike对象。 prompt: 用于为检索器生成搜索查询的提示。 返回: 一个LCEL Runnable。Runnable输入必须接收`input`,如果存在对话历史,则应以`chat_history`的形式接收。 Runnable输出是一个文档列表 示例: .. code-block:: python # pip install -U langchain langchain-community from langchain_community.chat_models import ChatOpenAI from langchain.chains import create_history_aware_retriever from langchain import hub rephrase_prompt = hub.pull("langchain-ai/chat-langchain-rephrase") llm = ChatOpenAI() retriever = ... chat_retriever_chain = create_history_aware_retriever( llm, retriever, rephrase_prompt ) chain.invoke({"input": "...", "chat_history": }) """ if "input" not in prompt.input_variables: raise ValueError( "Expected `input` to be a prompt variable, " f"but got {prompt.input_variables}" ) retrieve_documents: RetrieverOutputLike = RunnableBranch( ( # Both empty string and empty list evaluate to False lambda x: not x.get("chat_history", False), # If no chat history, then we just pass input to retriever (lambda x: x["input"]) | retriever, ), # If chat history, then we pass inputs to LLM chain, then to retriever prompt | llm | StrOutputParser() | retriever, ).with_config(run_name="chat_retriever_chain") return retrieve_documents