Source code for langchain_community.llms.opaqueprompts
import logging
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.llms import LLM
from langchain_core.messages import AIMessage
from langchain_core.pydantic_v1 import Extra, root_validator
from langchain_core.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
[docs]class OpaquePrompts(LLM):
"""LLM使用OpaquePrompts来清理提示。
在将提示传递给LLM之前,对另一个LLM进行包装并清理提示,然后对响应进行去除清理。
要使用,应安装``opaqueprompts`` python包,并设置环境变量``OPAQUEPROMPTS_API_KEY``为您的API密钥,或将其作为构造函数的命名参数传递。
示例:
.. code-block:: python
from langchain_community.llms import OpaquePrompts
from langchain_community.chat_models import ChatOpenAI
op_llm = OpaquePrompts(base_llm=ChatOpenAI())
"""
base_llm: BaseLanguageModel
"""要使用的基本LLM。"""
class Config:
"""此pydantic对象的配置。"""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""验证 OpaquePrompts API 密钥和 Python 包是否存在。"""
try:
import opaqueprompts as op
except ImportError:
raise ImportError(
"Could not import the `opaqueprompts` Python package, "
"please install it with `pip install opaqueprompts`."
)
if op.__package__ is None:
raise ValueError(
"Could not properly import `opaqueprompts`, "
"opaqueprompts.__package__ is None."
)
api_key = get_from_dict_or_env(
values, "opaqueprompts_api_key", "OPAQUEPROMPTS_API_KEY", default=""
)
if not api_key:
raise ValueError(
"Could not find OPAQUEPROMPTS_API_KEY in the environment. "
"Please set it to your OpaquePrompts API key."
"You can get it by creating an account on the OpaquePrompts website: "
"https://opaqueprompts.opaque.co/ ."
)
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""在调用基本LLM之前进行清理,并在之后进行去除清理。
参数:
prompt:传递给模型的提示。
返回:
模型生成的字符串。
示例:
.. code-block:: python
response = op_llm.invoke("Tell me a joke.")
"""
import opaqueprompts as op
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
# sanitize the prompt by replacing the sensitive information with a placeholder
sanitize_response: op.SanitizeResponse = op.sanitize([prompt])
sanitized_prompt_value_str = sanitize_response.sanitized_texts[0]
# TODO: Add in callbacks once child runs for LLMs are supported by LangSmith.
# call the LLM with the sanitized prompt and get the response
llm_response = self.base_llm.bind(stop=stop).invoke(
sanitized_prompt_value_str,
)
if isinstance(llm_response, AIMessage):
llm_response = llm_response.content
# desanitize the response by restoring the original sensitive information
desanitize_response: op.DesanitizeResponse = op.desanitize(
llm_response,
secure_context=sanitize_response.secure_context,
)
return desanitize_response.desanitized_text
@property
def _llm_type(self) -> str:
"""LLM的返回类型。
这是基类方法的重写。
"""
return "opaqueprompts"