Source code for langchain_community.llms.aleph_alpha

from typing import Any, Dict, List, Optional, Sequence

from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.llms import LLM
from langchain_core.pydantic_v1 import Extra, root_validator
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env

from langchain_community.llms.utils import enforce_stop_tokens


[docs]class AlephAlpha(LLM): """阿勒夫阿尔法大型语言模型。 要使用,您应该安装``aleph_alpha_client`` python包,并设置环境变量``ALEPH_ALPHA_API_KEY``为您的API密钥,或将其作为命名参数传递给构造函数。 参数在这里有更详细的解释: https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10 示例: .. code-block:: python from langchain_community.llms import AlephAlpha aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key")""" client: Any #: :meta private: model: Optional[str] = "luminous-base" """要使用的模型名称。""" maximum_tokens: int = 64 """要生成的令牌的最大数量。""" temperature: float = 0.0 """一个非负浮点数,用于调整生成过程中的随机程度。""" top_k: int = 0 """每个步骤考虑的最有可能的令牌数量。""" top_p: float = 0.0 """每一步需要考虑的标记的总概率质量。""" presence_penalty: float = 0.0 """惩罚重复的标记。""" frequency_penalty: float = 0.0 """根据频率惩罚重复的标记。""" repetition_penalties_include_prompt: Optional[bool] = False """标志,决定是否从提示中更新存在惩罚或频率惩罚。""" use_multiplicative_presence_penalty: Optional[bool] = False """标志,决定是否将存在惩罚应用乘法(True)或加法(False)。""" penalty_bias: Optional[str] = None """完成的惩罚偏差。""" penalty_exceptions: Optional[List[str]] = None """无论其他惩罚设置如何,都可以生成的字符串列表。""" penalty_exceptions_include_stop_sequences: Optional[bool] = None """在 penalty_exceptions 中是否应该包含 stop_sequences。""" best_of: Optional[int] = None """返回具有“最佳结果”的结果 (每个标记的最高对数概率)""" n: int = 1 """每个提示生成多少个完成。""" logit_bias: Optional[Dict[int, float]] = None """logit偏差允许影响生成令牌的可能性。""" log_probs: Optional[int] = None """每个生成的标记返回的前几个对数概率的数量。""" tokens: Optional[bool] = False """返回完成的标记。""" disable_optimizations: Optional[bool] = False minimum_tokens: Optional[int] = 0 """生成至少这么多个令牌。""" echo: bool = False """在完成时回显提示。""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None completion_bias_inclusion_first_token_only: bool = False completion_bias_exclusion: Optional[Sequence[str]] = None completion_bias_exclusion_first_token_only: bool = False """仅考虑第一个标记用于完成偏差排除。""" contextual_control_threshold: Optional[float] = None """如果设置为None,则注意控制参数仅适用于在请求中明确设置的那些标记。 如果设置为非None值,则控制参数也将应用于类似的标记。""" control_log_additive: Optional[bool] = True """True: 通过将log(control_factor)添加到注意力分数中来应用控制。 False: (attention_scores - - attention_scores.min(-1)) * control_factor""" repetition_penalties_include_completion: bool = True """标志,用于决定是否从完成中更新存在惩罚或频率惩罚。""" raw_completion: bool = False """强制返回模型的原始完成结果。""" stop_sequences: Optional[List[str]] = None """停止使用序列。""" # Client params aleph_alpha_api_key: Optional[str] = None """Aleph Alpha API 的 API 密钥。""" host: str = "https://api.aleph-alpha.com" """API主机的主机名。 默认值为"https://api.aleph-alpha.com" """ hosting: Optional[str] = None """确定请求可以在哪些数据中心处理。 您可以将参数设置为"aleph-alpha"或省略它(默认为None)。 不设置此值,或将其设置为None,使我们在我们自己的数据中心以及托管在其他提供商的服务器上处理您的请求具有最大的灵活性。 选择此选项以获得最大的可用性。 将其设置为"aleph-alpha"允许我们仅在我们自己的数据中心中处理请求。 选择此选项以获得最大的数据隐私性。""" request_timeout_seconds: int = 305 """HTTP请求中`requests`库API调用的客户端超时时间设置。 服务器将在300秒后关闭所有请求,并返回内部服务器错误。""" total_retries: int = 8 """在请求失败时重试的次数,具有可重试的状态码。如果最后一次重试失败,将引发相应的异常。请注意,在重试之间会应用指数退避,从第一次重试后的0.5秒开始,每次重试加倍。因此,默认设置为8次重试,总等待时间为63.5秒。""" nice: bool = False """将此设置为True,将向API表明您打算对其他用户友好,通过将您的请求优先级降低到并发请求之下。""" class Config: """此pydantic对象的配置。""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """验证环境中是否存在API密钥和Python包。""" values["aleph_alpha_api_key"] = convert_to_secret_str( get_from_dict_or_env(values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY") ) try: from aleph_alpha_client import Client values["client"] = Client( token=values["aleph_alpha_api_key"].get_secret_value(), host=values["host"], hosting=values["hosting"], request_timeout_seconds=values["request_timeout_seconds"], total_retries=values["total_retries"], nice=values["nice"], ) except ImportError: raise ImportError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) return values @property def _default_params(self) -> Dict[str, Any]: """获取调用 Aleph Alpha API 的默认参数。""" return { "maximum_tokens": self.maximum_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "n": self.n, "repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501 "use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501 "penalty_bias": self.penalty_bias, "penalty_exceptions": self.penalty_exceptions, "penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501 "best_of": self.best_of, "logit_bias": self.logit_bias, "log_probs": self.log_probs, "tokens": self.tokens, "disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence_penalty_min_length": self.sequence_penalty_min_length, "use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501 "completion_bias_inclusion": self.completion_bias_inclusion, "completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501 "completion_bias_exclusion": self.completion_bias_exclusion, "completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501 "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, "repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501 "raw_completion": self.raw_completion, } @property def _identifying_params(self) -> Dict[str, Any]: """获取识别参数。""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """llm的返回类型。""" return "aleph_alpha" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """调用Aleph Alpha的完成端点。 参数: prompt: 传递给模型的提示。 stop: 生成时可选的停止词列表。 返回: 模型生成的字符串。 示例: .. code-block:: python response = aleph_alpha("Tell me a joke.") """ from aleph_alpha_client import CompletionRequest, Prompt params = self._default_params if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop params = {**params, **kwargs} request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion # If stop tokens are provided, Aleph Alpha's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop_sequences is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
if __name__ == "__main__": aa = AlephAlpha() # type: ignore[call-arg] print(aa.invoke("How are you?")) # noqa: T201