Source code for langchain_community.embeddings.llamafile
import logging
from typing import List, Optional
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.pydantic_v1 import BaseModel
logger = logging.getLogger(__name__)
[docs]class LlamafileEmbeddings(BaseModel, Embeddings):
"""Llamafile可以让您使用单个文件分发和运行大型语言模型。
要开始,请参阅:https://github.com/Mozilla-Ocho/llamafile
要使用这个类,您首先需要:
1. 下载一个llamafile。
2. 使下载的文件可执行:`chmod +x path/to/model.llamafile`
3. 以启用嵌入的服务器模式启动llamafile:
`./path/to/model.llamafile --server --nobrowser --embedding`
示例:
.. code-block:: python
from langchain_community.embeddings import LlamafileEmbeddings
embedder = LlamafileEmbeddings()
doc_embeddings = embedder.embed_documents(
[
"Alpha is the first letter of the Greek alphabet",
"Beta is the second letter of the Greek alphabet",
]
)
query_embedding = embedder.embed_query(
"What is the second letter of the Greek alphabet"
)"""
base_url: str = "http://localhost:8080"
"""LLamafile服务器正在监听的基本URL。"""
request_timeout: Optional[int] = None
"""服务器请求的超时时间"""
def _embed(self, text: str) -> List[float]:
try:
response = requests.post(
url=f"{self.base_url}/embedding",
headers={
"Content-Type": "application/json",
},
json={
"content": text,
},
timeout=self.request_timeout,
)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError(
f"Could not connect to Llamafile server. Please make sure "
f"that a server is running at {self.base_url}."
)
# Raise exception if we got a bad (non-200) response status code
response.raise_for_status()
contents = response.json()
if "embedding" not in contents:
raise KeyError(
"Unexpected output from /embedding endpoint, output dict "
"missing 'embedding' key."
)
embedding = contents["embedding"]
# Sanity check the embedding vector:
# Prior to llamafile v0.6.2, if the server was not started with the
# `--embedding` option, the embedding endpoint would always return a
# 0-vector. See issue:
# https://github.com/Mozilla-Ocho/llamafile/issues/243
# So here we raise an exception if the vector sums to exactly 0.
if sum(embedding) == 0.0:
raise ValueError(
"Embedding sums to 0, did you start the llamafile server with "
"the `--embedding` option enabled?"
)
return embedding
[docs] def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""使用在`self.base_url`上运行的llamafile服务器嵌入文档。
在调用此方法之前,应该在单独的进程中启动llamafile服务器。
参数:
texts: 要嵌入的文本列表。
返回:
每个文本的嵌入列表。
"""
doc_embeddings = []
for text in texts:
doc_embeddings.append(self._embed(text))
return doc_embeddings
[docs] def embed_query(self, text: str) -> List[float]:
"""在运行在`self.base_url`上的llamafile服务器中嵌入一个查询。
在调用此方法之前,应该在单独的进程中启动llamafile服务器。
参数:
text: 要嵌入的文本。
返回:
文本的嵌入。
"""
return self._embed(text)