Skip to content

Kdbai

KDBAIVectorStore #

Bases: BasePydanticVectorStore

KDBAI向量存储。

在这个向量存储中,我们将文本、其嵌入和元数据存储在KDBAI向量存储表中。这个实现允许使用已经存在的表。

Parameters:

Name Type Description Default
table Table

用作存储的KDB.AI表。

None
batch int

插入数据的批量大小。默认为100。

required

Returns:

Name Type Description
KDBAIVectorStore

支持添加和查询的向量存储。

Source code in llama_index/vector_stores/kdbai/base.py
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
class KDBAIVectorStore(BasePydanticVectorStore):
    """KDBAI向量存储。

在这个向量存储中,我们将文本、其嵌入和元数据存储在KDBAI向量存储表中。这个实现允许使用已经存在的表。

Args:
    table kdbai.Table: 用作存储的KDB.AI表。
    batch (int, optional): 插入数据的批量大小。默认为100。

Returns:
    KDBAIVectorStore: 支持添加和查询的向量存储。"""

    stores_text: bool = True
    flat_metadata: bool = True

    hybrid_search: bool = False
    batch_size: int

    _table: Any = PrivateAttr()
    _sparse_encoder: Optional[Callable] = PrivateAttr()

    def __init__(
        self,
        table: Any = None,
        hybrid_search: bool = False,
        sparse_encoder: Optional[Callable] = None,
        batch_size: int = DEFAULT_BATCH_SIZE,
        **kwargs: Any,
    ) -> None:
        """初始化参数。"""
        try:
            import kdbai_client as kdbai

            logger.info("KDBAI client version: " + kdbai.__version__)

        except ImportError:
            raise ValueError(
                "Could not import kdbai_client package."
                "Please add it to the dependencies."
            )

        if table is None:
            raise ValueError("Must provide an existing KDB.AI table.")
        else:
            self._table = table

        if hybrid_search:
            if sparse_encoder is None:
                self._sparse_encoder = default_sparse_encoder
            else:
                self._sparse_encoder = sparse_encoder

        super().__init__(batch_size=batch_size, hybrid_search=hybrid_search)

    @property
    def client(self) -> Any:
        """返回KDB.AI客户端。"""
        return self._table

    @classmethod
    def class_name(cls) -> str:
        return "KDBAIVectorStore"

    def add(
        self,
        nodes: List[BaseNode],
        **add_kwargs: Any,
    ) -> List[str]:
        """将节点添加到KDBAI向量存储中。

Args:
    节点(List[BaseNode]):要添加的节点列表。

Returns:
    List[str]:已添加的文档ID列表。
"""
        df = pd.DataFrame()
        docs = []
        schema = self._table.schema()["columns"]
        if self.hybrid_search:
            schema = [item for item in schema if item["name"] != "sparseVectors"]

        try:
            for node in nodes:
                doc = {
                    "document_id": node.node_id.encode("utf-8"),
                    "text": node.text.encode("utf-8"),
                    "embedding": node.embedding,
                }

                if self.hybrid_search:
                    doc["sparseVectors"] = self._sparse_encoder([node.get_content()])

                # handle extra columns
                if len(schema) > len(DEFAULT_COLUMN_NAMES):
                    for column in schema[len(DEFAULT_COLUMN_NAMES) :]:
                        try:
                            doc[column["name"]] = convert_metadata_col(
                                column, node.metadata[column["name"]]
                            )
                        except Exception as e:
                            logger.error(
                                f"Error writing column {column['name']} as type {column['pytype']}: {e}."
                            )

                docs.append(doc)

            df = pd.DataFrame(docs)
            for i in range((len(df) - 1) // self.batch_size + 1):
                batch = df.iloc[i * self.batch_size : (i + 1) * self.batch_size]
                try:
                    self._table.insert(batch, warn=False)
                    logger.info(f"inserted batch {i}")
                except Exception as e:
                    logger.exception(
                        f"Failed to insert batch {i} of documents into the datastore: {e}"
                    )

            return [x.decode("utf-8") for x in df["document_id"].tolist()]

        except Exception as e:
            logger.error(f"Error preparing data for KDB.AI: {e}.")

    def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
        if query.filters is None:
            filter = []
        else:
            filter = query.filters

        if self.hybrid_search:
            alpha = query.alpha if query.alpha is not None else 0.5
            sparse_vectors = self._sparse_encoder([query.query_str])
            results = self._table.hybrid_search(
                dense_vectors=[query.query_embedding],
                sparse_vectors=sparse_vectors,
                n=query.similarity_top_k,
                filter=filter,
                alpha=alpha,
            )[0]
        else:
            results = self._table.search(
                vectors=[query.query_embedding], n=query.similarity_top_k, filter=filter
            )[0]

        top_k_nodes = []
        top_k_ids = []
        top_k_scores = []

        for result in results.to_dict(orient="records"):
            metadata = {x: result[x] for x in result if x not in DEFAULT_COLUMN_NAMES}
            node = TextNode(
                text=result["text"], id_=result["document_id"], metadata=metadata
            )
            top_k_ids.append(result["document_id"])
            top_k_nodes.append(node)
            top_k_scores.append(result["__nn_distance"])

        return VectorStoreQueryResult(
            nodes=top_k_nodes, similarities=top_k_scores, ids=top_k_ids
        )

    def delete(self, **delete_kwargs: Any) -> None:
        raise Exception("Not implemented.")

client property #

client: Any

返回KDB.AI客户端。

add #

add(nodes: List[BaseNode], **add_kwargs: Any) -> List[str]

将节点添加到KDBAI向量存储中。

Returns:

Type Description
List[str]

List[str]:已添加的文档ID列表。

Source code in llama_index/vector_stores/kdbai/base.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
    def add(
        self,
        nodes: List[BaseNode],
        **add_kwargs: Any,
    ) -> List[str]:
        """将节点添加到KDBAI向量存储中。

Args:
    节点(List[BaseNode]):要添加的节点列表。

Returns:
    List[str]:已添加的文档ID列表。
"""
        df = pd.DataFrame()
        docs = []
        schema = self._table.schema()["columns"]
        if self.hybrid_search:
            schema = [item for item in schema if item["name"] != "sparseVectors"]

        try:
            for node in nodes:
                doc = {
                    "document_id": node.node_id.encode("utf-8"),
                    "text": node.text.encode("utf-8"),
                    "embedding": node.embedding,
                }

                if self.hybrid_search:
                    doc["sparseVectors"] = self._sparse_encoder([node.get_content()])

                # handle extra columns
                if len(schema) > len(DEFAULT_COLUMN_NAMES):
                    for column in schema[len(DEFAULT_COLUMN_NAMES) :]:
                        try:
                            doc[column["name"]] = convert_metadata_col(
                                column, node.metadata[column["name"]]
                            )
                        except Exception as e:
                            logger.error(
                                f"Error writing column {column['name']} as type {column['pytype']}: {e}."
                            )

                docs.append(doc)

            df = pd.DataFrame(docs)
            for i in range((len(df) - 1) // self.batch_size + 1):
                batch = df.iloc[i * self.batch_size : (i + 1) * self.batch_size]
                try:
                    self._table.insert(batch, warn=False)
                    logger.info(f"inserted batch {i}")
                except Exception as e:
                    logger.exception(
                        f"Failed to insert batch {i} of documents into the datastore: {e}"
                    )

            return [x.decode("utf-8") for x in df["document_id"].tolist()]

        except Exception as e:
            logger.error(f"Error preparing data for KDB.AI: {e}.")