Source code for langchain_community.llms.ctransformers

from functools import partial
from typing import Any, Dict, List, Optional, Sequence

from langchain_core.callbacks import (
    AsyncCallbackManagerForLLMRun,
    CallbackManagerForLLMRun,
)
from langchain_core.language_models.llms import LLM
from langchain_core.pydantic_v1 import root_validator


[docs]class CTransformers(LLM): """C Transformers LLM 模型。 要使用,应安装``ctransformers`` python包。 请参阅 https://github.com/marella/ctransformers 示例: .. code-block:: python from langchain_community.llms import CTransformers llm = CTransformers(model="/path/to/ggml-gpt-2.bin", model_type="gpt2")""" client: Any #: :meta private: model: str """模型文件或目录的路径,或者是Hugging Face Hub 模型仓库的名称。""" model_type: Optional[str] = None """模型类型。""" model_file: Optional[str] = None """存储库或目录中模型文件的名称。""" config: Optional[Dict[str, Any]] = None """配置参数。 请参阅 https://github.com/marella/ctransformers#config""" lib: Optional[str] = None """共享库的路径或者`avx2`、`avx`、`basic`之一。""" @property def _identifying_params(self) -> Dict[str, Any]: """获取识别参数。""" return { "model": self.model, "model_type": self.model_type, "model_file": self.model_file, "config": self.config, } @property def _llm_type(self) -> str: """llm的返回类型。""" return "ctransformers" @root_validator() def validate_environment(cls, values: Dict) -> Dict: """验证``ctransformers``包是否已安装。""" try: from ctransformers import AutoModelForCausalLM except ImportError: raise ImportError( "Could not import `ctransformers` package. " "Please install it with `pip install ctransformers`" ) config = values["config"] or {} values["client"] = AutoModelForCausalLM.from_pretrained( values["model"], model_type=values["model_type"], model_file=values["model_file"], lib=values["lib"], **config, ) return values def _call( self, prompt: str, stop: Optional[Sequence[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """根据提示生成文本。 参数: prompt: 用于生成文本的提示。 stop: 遇到时停止生成的序列列表。 返回: 生成的文本。 示例: .. code-block:: python response = llm.invoke("Tell me a joke.") """ text = [] _run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager() for chunk in self.client(prompt, stop=stop, stream=True): text.append(chunk) _run_manager.on_llm_new_token(chunk, verbose=self.verbose) return "".join(text) async def _acall( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """异步调用CTransformers生成方法。 在流式传输时非常有用(比如使用Websockets!) 参数: prompt:传递给模型的提示。 stop:遇到时停止生成的字符串列表。 返回: 模型生成的字符串。 示例: .. code-block:: python response = llm.invoke("从前,有一天,") """ text_callback = None if run_manager: text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose) text = "" for token in self.client(prompt, stop=stop, stream=True): if text_callback: await text_callback(token) text += token return text