Source code for langchain.chains.natbot.base
"""实现一个由LLM驱动的浏览器。"""
from __future__ import annotations
import warnings
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.pydantic_v1 import Extra, root_validator
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.natbot.prompt import PROMPT
[docs]class NatBotChain(Chain):
"""实现一个由LLM驱动的浏览器。
**安全提示** :此工具包提供了控制Web浏览器的代码。
Web浏览器可用于导航至:
- 任何URL(包括任何内部网络URL)
- 以及本地文件
如果向最终用户公开此链,请谨慎操作。控制谁能够访问和使用此链,并隔离托管此链的服务器的网络访问。
有关更多信息,请参阅 https://python.langchain.com/docs/security。
示例:
.. code-block:: python
from langchain.chains import NatBotChain
natbot = NatBotChain.from_default("给我买顶新帽子。")
"""
llm_chain: LLMChain
objective: str
"""NatBot需要完成的目标。"""
llm: Optional[BaseLanguageModel] = None
"""[已弃用] 用于使用的LLM包装器。"""
input_url_key: str = "url" #: :meta private:
input_browser_content_key: str = "browser_content" #: :meta private:
previous_command: str = "" #: :meta private:
output_key: str = "command" #: :meta private:
class Config:
"""这个pydantic对象的配置。"""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
"Directly instantiating an NatBotChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method."
)
if "llm_chain" not in values and values["llm"] is not None:
values["llm_chain"] = LLMChain(llm=values["llm"], prompt=PROMPT)
return values
[docs] @classmethod
def from_default(cls, objective: str, **kwargs: Any) -> NatBotChain:
"""使用默认的LLMChain加载。"""
raise NotImplementedError(
"This method is no longer implemented. Please use from_llm."
"llm = OpenAI(temperature=0.5, best_of=10, n=3, max_tokens=50)"
"For example, NatBotChain.from_llm(llm, objective)"
)
[docs] @classmethod
def from_llm(
cls, llm: BaseLanguageModel, objective: str, **kwargs: Any
) -> NatBotChain:
"""从LLM加载。"""
llm_chain = LLMChain(llm=llm, prompt=PROMPT)
return cls(llm_chain=llm_chain, objective=objective, **kwargs)
@property
def input_keys(self) -> List[str]:
"""期望URL和浏览器内容。
:元数据 私密:
"""
return [self.input_url_key, self.input_browser_content_key]
@property
def output_keys(self) -> List[str]:
"""返回命令。
:元数据 私有:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
url = inputs[self.input_url_key]
browser_content = inputs[self.input_browser_content_key]
llm_cmd = self.llm_chain.predict(
objective=self.objective,
url=url[:100],
previous_command=self.previous_command,
browser_content=browser_content[:4500],
callbacks=_run_manager.get_child(),
)
llm_cmd = llm_cmd.strip()
self.previous_command = llm_cmd
return {self.output_key: llm_cmd}
[docs] def execute(self, url: str, browser_content: str) -> str:
"""确定接下来要运行的浏览器命令。
参数:
url:当前站点的URL。
browser_content:浏览器当前显示的页面内容。
返回:
下一个要运行的浏览器命令。
示例:
.. code-block:: python
browser_content = "...."
llm_command = natbot.run("www.google.com", browser_content)
"""
_inputs = {
self.input_url_key: url,
self.input_browser_content_key: browser_content,
}
return self(_inputs)[self.output_key]
@property
def _chain_type(self) -> str:
return "nat_bot_chain"