时间加权重新排序¶
In [ ]:
Copied!
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import TimeWeightedPostprocessor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.response.notebook_utils import display_response
from datetime import datetime, timedelta
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import TimeWeightedPostprocessor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.response.notebook_utils import display_response
from datetime import datetime, timedelta
/home/loganm/miniconda3/envs/llama-index/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html from .autonotebook import tqdm as notebook_tqdm
将文档解析为节点,添加到文档存储库¶
在这个例子中,有 PG 的文章的 3 个不同版本。它们在大部分内容上都是相同的,除了一个特定的部分,详细说明了他们为 Viaweb 筹集的资金金额。
V1: 50k,V2: 30k,V3: 10K
V1: -1 天,V2: -2 天,V3: -3 天
这个想法是鼓励索引获取最新的信息(即 V3)。
In [ ]:
Copied!
# 加载文档
from llama_index.core import StorageContext
now = datetime.now()
key = "__last_accessed__"
doc1 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v1.txt"]
).load_data()[0]
doc2 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v2.txt"]
).load_data()[0]
doc3 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v3.txt"]
).load_data()[0]
# 定义设置
from llama_index.core import Settings
Settings.text_splitter = SentenceSplitter(chunk_size=512)
# 使用设置中的节点解析器将文档解析为节点
nodes1 = Settings.text_splitter.get_nodes_from_documents([doc1])
nodes2 = Settings.text_splitter.get_nodes_from_documents([doc2])
nodes3 = Settings.text_splitter.get_nodes_from_documents([doc3])
# 从每个文档中获取修改的块,设置元数据
# 同时排除日期不被LLM读取
nodes1[14].metadata[key] = (now - timedelta(hours=3)).timestamp()
nodes1[14].excluded_llm_metadata_keys = [key]
nodes2[14].metadata[key] = (now - timedelta(hours=2)).timestamp()
nodes2[14].excluded_llm_metadata_keys = [key]
nodes3[14].metadata[key] = (now - timedelta(hours=1)).timestamp()
nodes2[14].excluded_llm_metadata_keys = [key]
# 添加到文档存储
docstore = SimpleDocumentStore()
nodes = [nodes1[14], nodes2[14], nodes3[14]]
docstore.add_documents(nodes)
storage_context = StorageContext.from_defaults(docstore=docstore)
# 加载文档
from llama_index.core import StorageContext
now = datetime.now()
key = "__last_accessed__"
doc1 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v1.txt"]
).load_data()[0]
doc2 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v2.txt"]
).load_data()[0]
doc3 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v3.txt"]
).load_data()[0]
# 定义设置
from llama_index.core import Settings
Settings.text_splitter = SentenceSplitter(chunk_size=512)
# 使用设置中的节点解析器将文档解析为节点
nodes1 = Settings.text_splitter.get_nodes_from_documents([doc1])
nodes2 = Settings.text_splitter.get_nodes_from_documents([doc2])
nodes3 = Settings.text_splitter.get_nodes_from_documents([doc3])
# 从每个文档中获取修改的块,设置元数据
# 同时排除日期不被LLM读取
nodes1[14].metadata[key] = (now - timedelta(hours=3)).timestamp()
nodes1[14].excluded_llm_metadata_keys = [key]
nodes2[14].metadata[key] = (now - timedelta(hours=2)).timestamp()
nodes2[14].excluded_llm_metadata_keys = [key]
nodes3[14].metadata[key] = (now - timedelta(hours=1)).timestamp()
nodes2[14].excluded_llm_metadata_keys = [key]
# 添加到文档存储
docstore = SimpleDocumentStore()
nodes = [nodes1[14], nodes2[14], nodes3[14]]
docstore.add_documents(nodes)
storage_context = StorageContext.from_defaults(docstore=docstore)
构建索引¶
In [ ]:
Copied!
# 构建索引
index = VectorStoreIndex(nodes, storage_context=storage_context)
# 构建索引
index = VectorStoreIndex(nodes, storage_context=storage_context)
定义Recency后处理器¶
In [ ]:
Copied!
node_postprocessor = TimeWeightedPostprocessor(
time_decay=0.5, time_access_refresh=False, top_k=1
)
node_postprocessor = TimeWeightedPostprocessor(
time_decay=0.5, time_access_refresh=False, top_k=1
)
查询索引¶
In [ ]:
Copied!
query_engine = index.as_query_engine(
similarity_top_k=3, response_mode="no_text"
)
init_response = query_engine.query(
query_str,
)
resp_nodes = [n for n in init_response.source_nodes]
query_engine = index.as_query_engine(
similarity_top_k=3, response_mode="no_text"
)
init_response = query_engine.query(
query_str,
)
resp_nodes = [n for n in init_response.source_nodes]
In [ ]:
Copied!
display_response(response)
display_response(response)
Final Response:
$50,000
In [ ]:
Copied!
# 使用基于时间加权的节点后处理器进行查询
query_engine = index.as_query_engine(
similarity_top_k=3, node_postprocessors=[node_postprocessor]
)
response = query_engine.query(
"作者从Idelle的丈夫(朱利安)那里为Viaweb筹集了多少种子资金?",
)
# 使用基于时间加权的节点后处理器进行查询
query_engine = index.as_query_engine(
similarity_top_k=3, node_postprocessors=[node_postprocessor]
)
response = query_engine.query(
"作者从Idelle的丈夫(朱利安)那里为Viaweb筹集了多少种子资金?",
)
In [ ]:
Copied!
display_response(response)
display_response(response)
Final Response:
The author raised $10,000 in seed funding from Idelle's husband (Julian) for Viaweb.
查询索引(底层用法)¶
在这个例子中,我们首先从查询调用中获取完整的节点集,然后将其发送到节点后处理器,最后通过摘要索引合成响应。
In [ ]:
Copied!
from llama_index.core import SummaryIndex
from llama_index.core import SummaryIndex
In [ ]:
Copied!
query_str = (
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?"
)
query_str = (
"How much did the author raise in seed funding from Idelle's husband"
" (Julian) for Viaweb?"
)
In [ ]:
Copied!
query_engine = index.as_query_engine(
similarity_top_k=3, response_mode="no_text"
)
init_response = query_engine.query(
query_str,
)
resp_nodes = [n for n in init_response.source_nodes]
query_engine = index.as_query_engine(
similarity_top_k=3, response_mode="no_text"
)
init_response = query_engine.query(
query_str,
)
resp_nodes = [n for n in init_response.source_nodes]
In [ ]:
Copied!
# 获取后处理的节点--应该是按日期排序的前1个节点
new_resp_nodes = node_postprocessor.postprocess_nodes(resp_nodes)
summary_index = SummaryIndex([n.node for n in new_resp_nodes])
query_engine = summary_index.as_query_engine()
response = query_engine.query(query_str)
# 获取后处理的节点--应该是按日期排序的前1个节点
new_resp_nodes = node_postprocessor.postprocess_nodes(resp_nodes)
summary_index = SummaryIndex([n.node for n in new_resp_nodes])
query_engine = summary_index.as_query_engine()
response = query_engine.query(query_str)
In [ ]:
Copied!
display_response(response)
display_response(response)
Final Response:
The author raised $10,000 in seed funding from Idelle's husband (Julian) for Viaweb.