Source code for langchain_community.llms.predictionguard

import logging
from typing import Any, Dict, List, Optional

from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.pydantic_v1 import Extra, root_validator
from langchain_core.utils import get_from_dict_or_env

from langchain_community.llms.utils import enforce_stop_tokens

logger = logging.getLogger(__name__)


[docs]class PredictionGuard(LLM): """保护大型语言模型的预测。 要使用,您应该安装``predictionguard`` python包,并设置环境变量``PREDICTIONGUARD_TOKEN``为您的访问令牌,或将其作为命名参数传递给构造函数。要与OpenAI模型一起使用Prediction Guard的API,还需设置环境变量``OPENAI_API_KEY``为您的OpenAI API密钥。 示例: .. code-block:: python pgllm = PredictionGuard(model="MPT-7B-Instruct", token="my-access-token", output={ "type": "boolean" })""" client: Any #: :meta private: model: Optional[str] = "MPT-7B-Instruct" """要使用的模型名称。""" output: Optional[Dict[str, Any]] = None """控制LLM输出的输出类型或结构。""" max_tokens: int = 256 """表示每代要预测的令牌数量。""" temperature: float = 0.75 """一个非负浮点数,用于调整生成过程中的随机程度。""" token: Optional[str] = None """您的预测守卫访问令牌。""" stop: Optional[List[str]] = None class Config: """此pydantic对象的配置。""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """验证访问令牌和Python包是否存在于环境中。""" token = get_from_dict_or_env(values, "token", "PREDICTIONGUARD_TOKEN") try: import predictionguard as pg values["client"] = pg.Client(token=token) except ImportError: raise ImportError( "Could not import predictionguard python package. " "Please install it with `pip install predictionguard`." ) return values @property def _default_params(self) -> Dict[str, Any]: """获取调用Prediction Guard API的默认参数。""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, } @property def _identifying_params(self) -> Dict[str, Any]: """获取识别参数。""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """llm的返回类型。""" return "predictionguard" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """调用Prediction Guard的模型API。 参数: prompt: 传递给模型的提示。 返回: 模型生成的字符串。 示例: .. code-block:: python response = pgllm.invoke("告诉我一个笑话。") """ import predictionguard as pg params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = pg.Completion.create( model=self.model, prompt=prompt, output=self.output, temperature=params["temperature"], max_tokens=params["max_tokens"], **kwargs, ) text = response["choices"][0]["text"] # If stop tokens are provided, Prediction Guard's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text