Source code for langchain_community.vectorstores.annoy

from __future__ import annotations

import os
import pickle
import uuid
from configparser import ConfigParser
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple

import numpy as np
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.utils import guard_import
from langchain_core.vectorstores import VectorStore

from langchain_community.docstore.base import Docstore
from langchain_community.docstore.in_memory import InMemoryDocstore
from langchain_community.vectorstores.utils import maximal_marginal_relevance

INDEX_METRICS = frozenset(["angular", "euclidean", "manhattan", "hamming", "dot"])
DEFAULT_METRIC = "angular"


[docs]def dependable_annoy_import() -> Any: """导入 annoy 库,如果不可用则引发错误。""" return guard_import("annoy")
[docs]class Annoy(VectorStore): """`Annoy` 向量存储。 要使用,您应该已安装``annoy`` python包。 示例: .. code-block:: python from langchain_community.vectorstores import Annoy db = Annoy(embedding_function, index, docstore, index_to_docstore_id)"""
[docs] def __init__( self, embedding_function: Callable, index: Any, metric: str, docstore: Docstore, index_to_docstore_id: Dict[int, str], ): """使用必要的组件进行初始化。""" self.embedding_function = embedding_function self.index = index self.metric = metric self.docstore = docstore self.index_to_docstore_id = index_to_docstore_id
@property def embeddings(self) -> Optional[Embeddings]: # TODO: Accept embedding object directly return None
[docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: raise NotImplementedError( "Annoy does not allow to add new data once the index is build." )
[docs] def process_index_results( self, idxs: List[int], dists: List[float] ) -> List[Tuple[Document, float]]: """将烦人的结果转换为文档和分数列表。 参数: idxs:索引中文档的索引列表。 dists:索引中文档的距离列表。 返回: 文档和分数列表。 """ docs = [] for idx, dist in zip(idxs, dists): _id = self.index_to_docstore_id[idx] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append((doc, dist)) return docs
[docs] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """返回与查询最相似的文档。 参数: query:要查找类似文档的文本。 k:要返回的文档数量。默认为4。 search_k:检查最多search_k个节点,默认为n_trees * n(如果未提供)。 返回: 与查询最相似的文档列表,以及每个文档的得分。 """ idxs, dists = self.index.get_nns_by_vector( embedding, k, search_k=search_k, include_distances=True ) return self.process_index_results(idxs, dists)
[docs] def similarity_search_with_score_by_index( self, docstore_index: int, k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """返回与查询最相似的文档。 参数: query:要查找类似文档的文本。 k:要返回的文档数量。默认为4。 search_k:检查最多search_k个节点,默认为n_trees * n(如果未提供)。 返回: 与查询最相似的文档列表,以及每个文档的得分。 """ idxs, dists = self.index.get_nns_by_item( docstore_index, k, search_k=search_k, include_distances=True ) return self.process_index_results(idxs, dists)
[docs] def similarity_search_with_score( self, query: str, k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """返回与查询最相似的文档。 参数: query:要查找与之相似的文档的文本。 k:要返回的文档数量。默认为4。 search_k:检查最多search_k个节点,默认为n_trees * n如果未提供。 返回: 与查询最相似的文档列表,以及每个文档的分数。 """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k, search_k) return docs
[docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """返回与嵌入向量最相似的文档。 参数: embedding:要查找与之相似文档的嵌入。 k:要返回的文档数量。默认为4。 search_k:检查最多search_k个节点,默认为n_trees * n。 返回: 与嵌入最相似的文档列表。 """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding, k, search_k ) return [doc for doc, _ in docs_and_scores]
[docs] def similarity_search_by_index( self, docstore_index: int, k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """返回与docstore_index最相似的文档。 参数: docstore_index:docstore中文档的索引 k:要返回的文档数量。默认为4。 search_k:检查最多search_k个节点,默认为n_trees * n。 返回: 与嵌入最相似的文档列表。 """ docs_and_scores = self.similarity_search_with_score_by_index( docstore_index, k, search_k ) return [doc for doc, _ in docs_and_scores]
[docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """返回使用最大边际相关性选择的文档。 最大边际相关性优化了与查询的相似性和所选文档之间的多样性。 参数: embedding:要查找相似文档的嵌入。 fetch_k:要传递给MMR算法的要获取的文档数量。 k:要返回的文档数量。默认为4。 lambda_mult:介于0和1之间的数字,确定结果之间多样性的程度,0表示最大多样性,1表示最小多样性。默认为0.5。 返回: 通过最大边际相关性选择的文档列表。 """ idxs = self.index.get_nns_by_vector( embedding, fetch_k, search_k=-1, include_distances=False ) embeddings = [self.index.get_item_vector(i) for i in idxs] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), embeddings, k=k, lambda_mult=lambda_mult, ) # ignore the -1's if not enough docs are returned/indexed selected_indices = [idxs[i] for i in mmr_selected if i != -1] docs = [] for i in selected_indices: _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append(doc) return docs
@classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: if metric not in INDEX_METRICS: raise ValueError( ( f"Unsupported distance metric: {metric}. " f"Expected one of {list(INDEX_METRICS)}" ) ) annoy = guard_import("annoy") if not embeddings: raise ValueError("embeddings must be provided to build AnnoyIndex") f = len(embeddings[0]) index = annoy.AnnoyIndex(f, metric=metric) for i, emb in enumerate(embeddings): index.add_item(i, emb) index.build(trees, n_jobs=n_jobs) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))} docstore = InMemoryDocstore( {index_to_id[i]: doc for i, doc in enumerate(documents)} ) return cls(embedding.embed_query, index, metric, docstore, index_to_id)
[docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: """从原始文档构建Annoy包装器。 参数: texts:要索引的文档列表。 embedding:要使用的嵌入函数。 metadatas:要与文档关联的元数据字典列表。 metric:用于索引的度量标准。默认为“angular”。 trees:用于索引的树的数量。默认为100。 n_jobs:用于索引的作业数量。默认为-1。 这是一个用户友好的接口,可以: 1. 嵌入文档。 2. 创建一个内存中的文档存储。 3. 初始化Annoy数据库。 这旨在是一个快速入门的方式。 示例: .. code-block:: python from langchain_community.vectorstores import Annoy from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = Annoy.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts) return cls.__from( texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs )
[docs] @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: """从嵌入构造Annoy包装器。 参数: text_embeddings:(文本,嵌入)元组列表 embedding:要使用的嵌入函数。 metadatas:要与文档关联的元数据字典列表。 metric:用于索引的度量。默认为“angular”。 trees:用于索引的树的数量。默认为100。 n_jobs:用于索引的作业数量。默认为-1。 这是一个用户友好的接口,可以: 1. 使用提供的嵌入创建内存中的文档存储 2. 初始化Annoy数据库 这旨在是一个快速入门的方式。 示例: .. code-block:: python from langchain_community.vectorstores import Annoy from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) db = Annoy.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs )
[docs] def save_local(self, folder_path: str, prefault: bool = False) -> None: """将Annoy索引、文档存储和索引到文档存储ID保存到磁盘。 参数: folder_path:保存索引、文档存储和索引到文档存储ID的文件夹路径。 prefault:是否预先将索引加载到内存中。 """ path = Path(folder_path) os.makedirs(path, exist_ok=True) # save index, index config, docstore and index_to_docstore_id config_object = ConfigParser() config_object["ANNOY"] = { "f": self.index.f, "metric": self.metric, } self.index.save(str(path / "index.annoy"), prefault=prefault) with open(path / "index.pkl", "wb") as file: pickle.dump((self.docstore, self.index_to_docstore_id, config_object), file)
[docs] @classmethod def load_local( cls, folder_path: str, embeddings: Embeddings, *, allow_dangerous_deserialization: bool = False, ) -> Annoy: """将Annoy索引、docstore和index_to_docstore_id加载到磁盘上。 参数: folder_path:从中加载索引、docstore和index_to_docstore_id的文件夹路径。 embeddings:生成查询时要使用的嵌入。 allow_dangerous_deserialization:是否允许反序列化数据,这涉及加载一个pickle文件。 pickle文件可以被恶意用户修改,以传递恶意有效负载,导致在您的计算机上执行任意代码。 """ if not allow_dangerous_deserialization: raise ValueError( "The de-serialization relies loading a pickle file. " "Pickle files can be modified to deliver a malicious payload that " "results in execution of arbitrary code on your machine." "You will need to set `allow_dangerous_deserialization` to `True` to " "enable deserialization. If you do this, make sure that you " "trust the source of the data. For example, if you are loading a " "file that you created, and know that no one else has modified the " "file, then this is safe to do. Do not set this to `True` if you are " "loading a file from an untrusted source (e.g., some random site on " "the internet.)." ) path = Path(folder_path) # load index separately since it is not picklable annoy = guard_import("annoy") # load docstore and index_to_docstore_id with open(path / "index.pkl", "rb") as file: docstore, index_to_docstore_id, config_object = pickle.load(file) f = int(config_object["ANNOY"]["f"]) metric = config_object["ANNOY"]["metric"] index = annoy.AnnoyIndex(f, metric=metric) index.load(str(path / "index.annoy")) return cls( embeddings.embed_query, index, metric, docstore, index_to_docstore_id )