Source code for langchain_experimental.autonomous_agents.hugginggpt.repsonse_generator

from typing import Any, List, Optional

from langchain.base_language import BaseLanguageModel
from langchain.chains import LLMChain
from langchain_core.callbacks.manager import Callbacks
from langchain_core.prompts import PromptTemplate


[docs]class ResponseGenerationChain(LLMChain): """用于执行任务的链。"""
[docs] @classmethod def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: execution_template = ( "The AI assistant has parsed the user input into several tasks" "and executed them. The results are as follows:\n" "{task_execution}" "\nPlease summarize the results and generate a response." ) prompt = PromptTemplate( template=execution_template, input_variables=["task_execution"], ) return cls(prompt=prompt, llm=llm, verbose=verbose)
[docs]class ResponseGenerator: """根据输入生成一个响应。"""
[docs] def __init__(self, llm_chain: LLMChain, stop: Optional[List] = None): self.llm_chain = llm_chain self.stop = stop
[docs] def generate(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> str: """根据输入,决定要做什么。""" llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks) return llm_response
[docs]def load_response_generator(llm: BaseLanguageModel) -> ResponseGenerator: """加载 ResponseGenerator 模块。""" llm_chain = ResponseGenerationChain.from_llm(llm) return ResponseGenerator( llm_chain=llm_chain, )