Source code for langchain_community.embeddings.huggingface

from typing import Any, Dict, List, Optional

import requests
from langchain_core.embeddings import Embeddings
from langchain_core.pydantic_v1 import BaseModel, Extra, Field, SecretStr

DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2"
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large"
DEFAULT_BGE_MODEL = "BAAI/bge-large-en"
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: "
DEFAULT_QUERY_INSTRUCTION = (
    "Represent the question for retrieving supporting documents: "
)
DEFAULT_QUERY_BGE_INSTRUCTION_EN = (
    "Represent this question for searching relevant passages: "
)
DEFAULT_QUERY_BGE_INSTRUCTION_ZH = "为这个句子生成表示以用于检索相关文章:"


[docs]class HuggingFaceEmbeddings(BaseModel, Embeddings): """HuggingFace sentence_transformers嵌入模型。 要使用,您应该已安装``sentence_transformers`` python包。 示例: .. code-block:: python from langchain_community.embeddings import HuggingFaceEmbeddings model_name = "sentence-transformers/all-mpnet-base-v2" model_kwargs = {'device': 'cpu'} encode_kwargs = {'normalize_embeddings': False} hf = HuggingFaceEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs )""" client: Any #: :meta private: model_name: str = DEFAULT_MODEL_NAME """要使用的模型名称。""" cache_folder: Optional[str] = None """存储模型的路径。 也可以通过SENTENCE_TRANSFORMERS_HOME环境变量进行设置。""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """要传递给Sentence Transformer模型的关键字参数,例如`device`、`prompts`、`default_prompt_name`、`revision`、`trust_remote_code`或`token`。另请参阅Sentence Transformer文档: https://sbert.net/docs/package_reference/SentenceTransformer.html#sentence_transformers.SentenceTransformer""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) """调用Sentence Transformer模型的`encode`方法时传递的关键字参数,例如`prompt_name`、`prompt`、`batch_size`、`precision`、`normalize_embeddings`等。 另请参阅Sentence Transformer文档:https://sbert.net/docs/package_reference/SentenceTransformer.html#sentence_transformers.SentenceTransformer.encode""" multi_process: bool = False """在多个GPU上运行encode()。""" show_progress: bool = False """是否显示进度条。""" def __init__(self, **kwargs: Any): """初始化sentence_transformer。""" super().__init__(**kwargs) try: import sentence_transformers except ImportError as exc: raise ImportError( "Could not import sentence_transformers python package. " "Please install it with `pip install sentence-transformers`." ) from exc self.client = sentence_transformers.SentenceTransformer( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) class Config: """此pydantic对象的配置。""" extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """使用HuggingFace transformer模型计算文档嵌入。 参数: texts:要嵌入的文本列表。 返回: 嵌入列表,每个文本对应一个嵌入。 """ import sentence_transformers texts = list(map(lambda x: x.replace("\n", " "), texts)) if self.multi_process: pool = self.client.start_multi_process_pool() embeddings = self.client.encode_multi_process(texts, pool) sentence_transformers.SentenceTransformer.stop_multi_process_pool(pool) else: embeddings = self.client.encode( texts, show_progress_bar=self.show_progress, **self.encode_kwargs ) return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]: """使用HuggingFace transformer模型计算查询嵌入。 参数: text:要嵌入的文本。 返回: 文本的嵌入。 """ return self.embed_documents([text])[0]
[docs]class HuggingFaceInstructEmbeddings(BaseModel, Embeddings): """包装句子转换嵌入模型。 要使用,您应该已安装``sentence_transformers``和``InstructorEmbedding`` python包。 示例: .. code-block:: python from langchain_community.embeddings import HuggingFaceInstructEmbeddings model_name = "hkunlp/instructor-large" model_kwargs = {'device': 'cpu'} encode_kwargs = {'normalize_embeddings': True} hf = HuggingFaceInstructEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) """ client: Any #: :meta private: model_name: str = DEFAULT_INSTRUCT_MODEL """要使用的模型名称。""" cache_folder: Optional[str] = None """存储模型的路径。 也可以通过SENTENCE_TRANSFORMERS_HOME环境变量进行设置。""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """传递给模型的关键字参数。""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) """调用模型的`encode`方法时要传递的关键字参数。""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """用于嵌入文档的指令。""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """用于嵌入查询的指令。""" def __init__(self, **kwargs: Any): """初始化sentence_transformer。""" super().__init__(**kwargs) try: from InstructorEmbedding import INSTRUCTOR self.client = INSTRUCTOR( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) except ImportError as e: raise ImportError("Dependencies for InstructorEmbedding not found.") from e class Config: """此pydantic对象的配置。""" extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """使用HuggingFace预训练模型计算文档嵌入。 参数: texts:要嵌入的文本列表。 返回: 嵌入列表,每个文本对应一个嵌入。 """ instruction_pairs = [[self.embed_instruction, text] for text in texts] embeddings = self.client.encode(instruction_pairs, **self.encode_kwargs) return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]: """使用HuggingFace instruct模型计算查询嵌入。 参数: text: 要嵌入的文本。 返回: 文本的嵌入。 """ instruction_pair = [self.query_instruction, text] embedding = self.client.encode([instruction_pair], **self.encode_kwargs)[0] return embedding.tolist()
[docs]class HuggingFaceBgeEmbeddings(BaseModel, Embeddings): """HuggingFace sentence_transformers嵌入模型。 要使用,您应该已安装``sentence_transformers`` python包。 要使用Nomic,请确保``sentence_transformers``的版本 >= 2.3.0。 Bge示例: .. code-block:: python from langchain_community.embeddings import HuggingFaceBgeEmbeddings model_name = "BAAI/bge-large-en" model_kwargs = {'device': 'cpu'} encode_kwargs = {'normalize_embeddings': True} hf = HuggingFaceBgeEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) Nomic示例: .. code-block:: python from langchain_community.embeddings import HuggingFaceBgeEmbeddings model_name = "nomic-ai/nomic-embed-text-v1" model_kwargs = { 'device': 'cpu', 'trust_remote_code':True } encode_kwargs = {'normalize_embeddings': True} hf = HuggingFaceBgeEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs, query_instruction = "search_query:", embed_instruction = "search_document:" ) """ client: Any #: :meta private: model_name: str = DEFAULT_BGE_MODEL """要使用的模型名称。""" cache_folder: Optional[str] = None """存储模型的路径。 也可以通过SENTENCE_TRANSFORMERS_HOME环境变量进行设置。""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """传递给模型的关键字参数。""" encode_kwargs: Dict[str, Any] = Field(default_factory=dict) """调用模型的`encode`方法时要传递的关键字参数。""" query_instruction: str = DEFAULT_QUERY_BGE_INSTRUCTION_EN """用于嵌入查询的指令。""" embed_instruction: str = "" """用于嵌入文档的指令。""" def __init__(self, **kwargs: Any): """初始化sentence_transformer。""" super().__init__(**kwargs) try: import sentence_transformers except ImportError as exc: raise ImportError( "Could not import sentence_transformers python package. " "Please install it with `pip install sentence_transformers`." ) from exc self.client = sentence_transformers.SentenceTransformer( self.model_name, cache_folder=self.cache_folder, **self.model_kwargs ) if "-zh" in self.model_name: self.query_instruction = DEFAULT_QUERY_BGE_INSTRUCTION_ZH class Config: """此pydantic对象的配置。""" extra = Extra.forbid
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """使用HuggingFace transformer模型计算文档嵌入。 参数: texts:要嵌入的文本列表。 返回: 嵌入列表,每个文本对应一个嵌入。 """ texts = [self.embed_instruction + t.replace("\n", " ") for t in texts] embeddings = self.client.encode(texts, **self.encode_kwargs) return embeddings.tolist()
[docs] def embed_query(self, text: str) -> List[float]: """使用HuggingFace transformer模型计算查询嵌入。 参数: text:要嵌入的文本。 返回: 文本的嵌入。 """ text = text.replace("\n", " ") embedding = self.client.encode( self.query_instruction + text, **self.encode_kwargs ) return embedding.tolist()
[docs]class HuggingFaceInferenceAPIEmbeddings(BaseModel, Embeddings): """使用HuggingFace API 嵌入文本。 需要 HuggingFace 推理 API 密钥和模型名称。""" api_key: SecretStr """用于HuggingFace推理API的API密钥。""" model_name: str = "sentence-transformers/all-MiniLM-L6-v2" """用于文本嵌入的模型名称。""" api_url: Optional[str] = None """自定义推断端点URL。如果使用默认的公共URL,则为None。""" additional_headers: Dict[str, str] = {} """如果需要,可以将额外的标头传递给requests库。""" @property def _api_url(self) -> str: return self.api_url or self._default_api_url @property def _default_api_url(self) -> str: return ( "https://api-inference.huggingface.co" "/pipeline" "/feature-extraction" f"/{self.model_name}" ) @property def _headers(self) -> dict: return { "Authorization": f"Bearer {self.api_key.get_secret_value()}", **self.additional_headers, }
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """获取文本列表的嵌入。 参数: texts(文档):要获取嵌入的文本列表。 返回: 嵌入文本作为List[List[float]],其中每个内部List[float]对应一个输入文本。 示例: .. code-block:: python from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings hf_embeddings = HuggingFaceInferenceAPIEmbeddings( api_key="your_api_key", model_name="sentence-transformers/all-MiniLM-l6-v2" ) texts = ["Hello, world!", "How are you?"] hf_embeddings.embed_documents(texts) """ # noqa: E501 response = requests.post( self._api_url, headers=self._headers, json={ "inputs": texts, "options": {"wait_for_model": True, "use_cache": True}, }, ) return response.json()
[docs] def embed_query(self, text: str) -> List[float]: """使用HuggingFace transformer模型计算查询嵌入。 参数: text:要嵌入的文本。 返回: 文本的嵌入。 """ return self.embed_documents([text])[0]