@deprecated.deprecated(version="0.10.53",reason=("KGTableRetriever is deprecated, it is recommended to use ""PropertyGraphIndex and associated retrievers instead."),)classKGTableRetriever(BaseRetriever):""" KG Table Retriever. Arguments are shared among subclasses. Args: query_keyword_extract_template (Optional[QueryKGExtractPrompt]): A Query KG Extraction Prompt (see :ref:`Prompt-Templates`). refine_template (Optional[BasePromptTemplate]): A Refinement Prompt (see :ref:`Prompt-Templates`). text_qa_template (Optional[BasePromptTemplate]): A Question Answering Prompt (see :ref:`Prompt-Templates`). max_keywords_per_query (int): Maximum number of keywords to extract from query. num_chunks_per_query (int): Maximum number of text chunks to query. include_text (bool): Use the document text source from each relevant triplet during queries. retriever_mode (KGRetrieverMode): Specifies whether to use keywords, embeddings, or both to find relevant triplets. Should be one of "keyword", "embedding", or "hybrid". similarity_top_k (int): The number of top embeddings to use (if embeddings are used). graph_store_query_depth (int): The depth of the graph store query. use_global_node_triplets (bool): Whether to get more keywords(entities) from text chunks matched by keywords. This helps introduce more global knowledge. While it's more expensive, thus to be turned off by default. max_knowledge_sequence (int): The maximum number of knowledge sequence to include in the response. By default, it's 30. """def__init__(self,index:KnowledgeGraphIndex,llm:Optional[LLM]=None,embed_model:Optional[BaseEmbedding]=None,query_keyword_extract_template:Optional[BasePromptTemplate]=None,max_keywords_per_query:int=10,num_chunks_per_query:int=10,include_text:bool=True,retriever_mode:Optional[KGRetrieverMode]=KGRetrieverMode.KEYWORD,similarity_top_k:int=2,graph_store_query_depth:int=2,use_global_node_triplets:bool=False,max_knowledge_sequence:int=REL_TEXT_LIMIT,callback_manager:Optional[CallbackManager]=None,object_map:Optional[dict]=None,verbose:bool=False,**kwargs:Any,)->None:"""Initialize params."""assertisinstance(index,KnowledgeGraphIndex)self._index=indexself._index_struct=self._index.index_structself._docstore=self._index.docstoreself.max_keywords_per_query=max_keywords_per_queryself.num_chunks_per_query=num_chunks_per_queryself.query_keyword_extract_template=query_keyword_extract_templateorDQKETself.similarity_top_k=similarity_top_kself._include_text=include_textself._retriever_mode=(KGRetrieverMode(retriever_mode)ifretriever_modeelseKGRetrieverMode.KEYWORD)self._llm=llmorSettings.llmself._embed_model=embed_modelorSettings.embed_modelself._graph_store=index.graph_storeself.graph_store_query_depth=graph_store_query_depthself.use_global_node_triplets=use_global_node_tripletsself.max_knowledge_sequence=max_knowledge_sequenceself._verbose=kwargs.get("verbose",False)refresh_schema=kwargs.get("refresh_schema",False)try:self._graph_schema=self._graph_store.get_schema(refresh=refresh_schema)exceptNotImplementedError:self._graph_schema=""exceptExceptionase:logger.warning(f"Failed to get graph schema: {e}")self._graph_schema=""super().__init__(callback_manager=callback_managerorSettings.callback_manager,object_map=object_map,verbose=verbose,)def_get_keywords(self,query_str:str)->List[str]:"""Extract keywords."""response=self._llm.predict(self.query_keyword_extract_template,max_keywords=self.max_keywords_per_query,question=query_str,)keywords=extract_keywords_given_response(response,start_token="KEYWORDS:",lowercase=False)returnlist(keywords)def_extract_rel_text_keywords(self,rel_texts:List[str])->List[str]:"""Find the keywords for given rel text triplets."""keywords=[]forrel_textinrel_texts:splited_texts=rel_text.split(",")iflen(splited_texts)<=0:continuekeyword=splited_texts[0]ifkeyword:keywords.append(keyword.strip("(\"'"))# Return the Object as welliflen(splited_texts)<=2:continuekeyword=splited_texts[2]ifkeyword:keywords.append(keyword.strip(" ()\"'"))returnkeywordsdef_retrieve(self,query_bundle:QueryBundle,)->List[NodeWithScore]:"""Get nodes for response."""node_visited=set()keywords=self._get_keywords(query_bundle.query_str)ifself._verbose:print_text(f"Extracted keywords: {keywords}\n",color="green")rel_texts=[]cur_rel_map={}chunk_indices_count:Dict[str,int]=defaultdict(int)ifself._retriever_mode!=KGRetrieverMode.EMBEDDING:forkeywordinkeywords:subjs={keyword}node_ids=self._index_struct.search_node_by_keyword(keyword)fornode_idinnode_ids[:GLOBAL_EXPLORE_NODE_LIMIT]:ifnode_idinnode_visited:continueifself._include_text:chunk_indices_count[node_id]+=1node_visited.add(node_id)ifself.use_global_node_triplets:# Get nodes from keyword search, and add them to the subjs# set. This helps introduce more global knowledge into the# query. While it's more expensive, thus to be turned off# by default, it can be useful for some applications.# TODO: we should a keyword-node_id map in IndexStruct, so that# node-keywords extraction with LLM will be called only once# during indexing.extended_subjs=self._get_keywords(self._docstore.get_node(node_id).get_content(metadata_mode=MetadataMode.LLM))subjs.update(extended_subjs)rel_map=self._graph_store.get_rel_map(list(subjs),self.graph_store_query_depth)logger.debug(f"rel_map: {rel_map}")ifnotrel_map:continuerel_texts.extend([str(rel_obj)forrel_objsinrel_map.values()forrel_objinrel_objs])cur_rel_map.update(rel_map)if(self._retriever_mode!=KGRetrieverMode.KEYWORDandlen(self._index_struct.embedding_dict)>0):query_embedding=self._embed_model.get_text_embedding(query_bundle.query_str)all_rel_texts=list(self._index_struct.embedding_dict.keys())rel_text_embeddings=[self._index_struct.embedding_dict[_id]for_idinall_rel_texts]similarities,top_rel_texts=get_top_k_embeddings(query_embedding,rel_text_embeddings,similarity_top_k=self.similarity_top_k,embedding_ids=all_rel_texts,)logger.debug(f"Found the following rel_texts+query similarites: {similarities!s}")logger.debug(f"Found the following top_k rel_texts: {rel_texts!s}")rel_texts.extend(top_rel_texts)eliflen(self._index_struct.embedding_dict)==0:logger.warning("Index was not constructed with embeddings, skipping embedding usage...")# remove any duplicates from keyword + embedding queriesifself._retriever_mode==KGRetrieverMode.HYBRID:rel_texts=list(set(rel_texts))# remove shorter rel_texts that are substrings of longer rel_textsrel_texts.sort(key=len,reverse=True)foriinrange(len(rel_texts)):forjinrange(i+1,len(rel_texts)):ifrel_texts[j]inrel_texts[i]:rel_texts[j]=""rel_texts=[rel_textforrel_textinrel_textsifrel_text!=""]# truncate rel_textsrel_texts=rel_texts[:self.max_knowledge_sequence]# When include_text = True just get the actual content of all the nodes# (Nodes with actual keyword match, Nodes which are found from the depth search and Nodes founnd from top_k similarity)ifself._include_text:keywords=self._extract_rel_text_keywords(rel_texts)# rel_texts will have all the Triplets retrieved with respect to the Querynested_node_ids=[self._index_struct.search_node_by_keyword(keyword)forkeywordinkeywords]node_ids=[_idforidsinnested_node_idsfor_idinids]fornode_idinnode_ids:chunk_indices_count[node_id]+=1sorted_chunk_indices=sorted(chunk_indices_count.keys(),key=lambdax:chunk_indices_count[x],reverse=True,)sorted_chunk_indices=sorted_chunk_indices[:self.num_chunks_per_query]sorted_nodes=self._docstore.get_nodes(sorted_chunk_indices)# TMP/TODO: also filter rel_texts as nodes until we figure out better# abstraction# TODO(suo): figure out what this does# rel_text_nodes = [Node(text=rel_text) for rel_text in rel_texts]# for node_processor in self._node_postprocessors:# rel_text_nodes = node_processor.postprocess_nodes(rel_text_nodes)# rel_texts = [node.get_content() for node in rel_text_nodes]sorted_nodes_with_scores=[]forchunk_idx,nodeinzip(sorted_chunk_indices,sorted_nodes):# nodes are found with keyword mapping, give high conf to avoid cutoffsorted_nodes_with_scores.append(NodeWithScore(node=node,score=DEFAULT_NODE_SCORE))logger.info(f"> Querying with idx: {chunk_idx}: "f"{truncate_text(node.get_content(),80)}")# if no relationship is found, return the nodes found by keywordsifnotrel_texts:logger.info("> No relationships found, returning nodes found by keywords.")iflen(sorted_nodes_with_scores)==0:logger.info("> No nodes found by keywords, returning empty response.")return[NodeWithScore(node=TextNode(text="No relationships found."),score=1.0)]# In else case the sorted_nodes_with_scores is not empty# thus returning the nodes found by keywordsreturnsorted_nodes_with_scores# add relationships as Node# TODO: make initial text customizablerel_initial_text=(f"The following are knowledge sequence in max depth"f" {self.graph_store_query_depth} "f"in the form of directed graph like:\n"f"`subject -[predicate]->, object, <-[predicate_next_hop]-,"f" object_next_hop ...`")rel_info=[rel_initial_text,*rel_texts]rel_node_info={"kg_rel_texts":rel_texts,"kg_rel_map":cur_rel_map,}ifself._graph_schema!="":rel_node_info["kg_schema"]={"schema":self._graph_schema}rel_info_text="\n".join([str(item)forsublistinrel_infoforitemin(sublistifisinstance(sublist,list)else[sublist])])ifself._verbose:print_text(f"KG context:\n{rel_info_text}\n",color="blue")rel_text_node=TextNode(text=rel_info_text,metadata=rel_node_info,excluded_embed_metadata_keys=["kg_rel_map","kg_rel_texts"],excluded_llm_metadata_keys=["kg_rel_map","kg_rel_texts"],)# this node is constructed from rel_texts, give high confidence to avoid cutoffsorted_nodes_with_scores.append(NodeWithScore(node=rel_text_node,score=DEFAULT_NODE_SCORE))returnsorted_nodes_with_scoresdef_get_metadata_for_response(self,nodes:List[BaseNode])->Optional[Dict[str,Any]]:"""Get metadata for response."""fornodeinnodes:ifnode.metadataisNoneor"kg_rel_map"notinnode.metadata:continuereturnnode.metadataraiseValueError("kg_rel_map must be found in at least one Node.")
@deprecated.deprecated(version="0.10.53",reason=("KnowledgeGraphRAGRetriever is deprecated, it is recommended to use ""PropertyGraphIndex and associated retrievers instead."),)classKnowledgeGraphRAGRetriever(BaseRetriever):""" Knowledge Graph RAG retriever. Retriever that perform SubGraph RAG towards knowledge graph. Args: storage_context (Optional[StorageContext]): A storage context to use. entity_extract_fn (Optional[Callable]): A function to extract entities. entity_extract_template Optional[BasePromptTemplate]): A Query Key Entity Extraction Prompt (see :ref:`Prompt-Templates`). entity_extract_policy (Optional[str]): The entity extraction policy to use. default: "union" possible values: "union", "intersection" synonym_expand_fn (Optional[Callable]): A function to expand synonyms. synonym_expand_template (Optional[QueryKeywordExpandPrompt]): A Query Key Entity Expansion Prompt (see :ref:`Prompt-Templates`). synonym_expand_policy (Optional[str]): The synonym expansion policy to use. default: "union" possible values: "union", "intersection" max_entities (int): The maximum number of entities to extract. default: 5 max_synonyms (int): The maximum number of synonyms to expand per entity. default: 5 retriever_mode (Optional[str]): The retriever mode to use. default: "keyword" possible values: "keyword", "embedding", "keyword_embedding" with_nl2graphquery (bool): Whether to combine NL2GraphQuery in context. default: False graph_traversal_depth (int): The depth of graph traversal. default: 2 max_knowledge_sequence (int): The maximum number of knowledge sequence to include in the response. By default, it's 30. verbose (bool): Whether to print out debug info. """def__init__(self,storage_context:Optional[StorageContext]=None,llm:Optional[LLM]=None,entity_extract_fn:Optional[Callable]=None,entity_extract_template:Optional[BasePromptTemplate]=None,entity_extract_policy:Optional[str]="union",synonym_expand_fn:Optional[Callable]=None,synonym_expand_template:Optional[BasePromptTemplate]=None,synonym_expand_policy:Optional[str]="union",max_entities:int=5,max_synonyms:int=5,retriever_mode:Optional[str]="keyword",with_nl2graphquery:bool=False,graph_traversal_depth:int=2,max_knowledge_sequence:int=REL_TEXT_LIMIT,verbose:bool=False,callback_manager:Optional[CallbackManager]=None,**kwargs:Any,)->None:"""Initialize the retriever."""# Ensure that we have a graph storeassertstorage_contextisnotNone,"Must provide a storage context."assertstorage_context.graph_storeisnotNone,("Must provide a graph store in the storage context.")self._storage_context=storage_contextself._graph_store=storage_context.graph_storeself._llm=llmorSettings.llmself._entity_extract_fn=entity_extract_fnself._entity_extract_template=(entity_extract_templateorDEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE)self._entity_extract_policy=entity_extract_policyself._synonym_expand_fn=synonym_expand_fnself._synonym_expand_template=(synonym_expand_templateorDEFAULT_SYNONYM_EXPAND_PROMPT)self._synonym_expand_policy=synonym_expand_policyself._max_entities=max_entitiesself._max_synonyms=max_synonymsself._retriever_mode=retriever_modeself._with_nl2graphquery=with_nl2graphqueryifself._with_nl2graphquery:fromllama_index.core.query_engine.knowledge_graph_query_engineimport(KnowledgeGraphQueryEngine,)graph_query_synthesis_prompt=kwargs.get("graph_query_synthesis_prompt")ifgraph_query_synthesis_promptisnotNone:delkwargs["graph_query_synthesis_prompt"]graph_response_answer_prompt=kwargs.get("graph_response_answer_prompt")ifgraph_response_answer_promptisnotNone:delkwargs["graph_response_answer_prompt"]refresh_schema=kwargs.get("refresh_schema",False)response_synthesizer=kwargs.get("response_synthesizer")self._kg_query_engine=KnowledgeGraphQueryEngine(llm=self._llm,storage_context=self._storage_context,graph_query_synthesis_prompt=graph_query_synthesis_prompt,graph_response_answer_prompt=graph_response_answer_prompt,refresh_schema=refresh_schema,verbose=verbose,response_synthesizer=response_synthesizer,**kwargs,)self._graph_traversal_depth=graph_traversal_depthself._max_knowledge_sequence=max_knowledge_sequenceself._verbose=verboserefresh_schema=kwargs.get("refresh_schema",False)try:self._graph_schema=self._graph_store.get_schema(refresh=refresh_schema)exceptNotImplementedError:self._graph_schema=""exceptExceptionase:logger.warning(f"Failed to get graph schema: {e}")self._graph_schema=""super().__init__(callback_manager=callback_managerorSettings.callback_manager)def_process_entities(self,query_str:str,handle_fn:Optional[Callable],handle_llm_prompt_template:Optional[BasePromptTemplate],cross_handle_policy:Optional[str]="union",max_items:Optional[int]=5,result_start_token:str="KEYWORDS:",)->List[str]:"""Get entities from query string."""assertcross_handle_policyin["union","intersection",],"Invalid entity extraction policy."ifcross_handle_policy=="intersection":assertall([handle_fnisnotNone,handle_llm_prompt_templateisnotNone,]),"Must provide entity extract function and template."assertany([handle_fnisnotNone,handle_llm_prompt_templateisnotNone,]),"Must provide either entity extract function or template."enitities_fn:List[str]=[]enitities_llm:Set[str]=set()ifhandle_fnisnotNone:enitities_fn=handle_fn(query_str)ifhandle_llm_prompt_templateisnotNone:response=self._llm.predict(handle_llm_prompt_template,max_keywords=max_items,question=query_str,)enitities_llm=extract_keywords_given_response(response,start_token=result_start_token,lowercase=False)ifcross_handle_policy=="union":entities=list(set(enitities_fn)|enitities_llm)elifcross_handle_policy=="intersection":entities=list(set(enitities_fn).intersection(set(enitities_llm)))ifself._verbose:print_text(f"Entities processed: {entities}\n",color="green")returnentitiesasyncdef_aprocess_entities(self,query_str:str,handle_fn:Optional[Callable],handle_llm_prompt_template:Optional[BasePromptTemplate],cross_handle_policy:Optional[str]="union",max_items:Optional[int]=5,result_start_token:str="KEYWORDS:",)->List[str]:"""Get entities from query string."""assertcross_handle_policyin["union","intersection",],"Invalid entity extraction policy."ifcross_handle_policy=="intersection":assertall([handle_fnisnotNone,handle_llm_prompt_templateisnotNone,]),"Must provide entity extract function and template."assertany([handle_fnisnotNone,handle_llm_prompt_templateisnotNone,]),"Must provide either entity extract function or template."enitities_fn:List[str]=[]enitities_llm:Set[str]=set()ifhandle_fnisnotNone:enitities_fn=handle_fn(query_str)ifhandle_llm_prompt_templateisnotNone:response=awaitself._llm.apredict(handle_llm_prompt_template,max_keywords=max_items,question=query_str,)enitities_llm=extract_keywords_given_response(response,start_token=result_start_token,lowercase=False)ifcross_handle_policy=="union":entities=list(set(enitities_fn)|enitities_llm)elifcross_handle_policy=="intersection":entities=list(set(enitities_fn).intersection(set(enitities_llm)))ifself._verbose:print_text(f"Entities processed: {entities}\n",color="green")returnentitiesdef_get_entities(self,query_str:str)->List[str]:"""Get entities from query string."""entities=self._process_entities(query_str,self._entity_extract_fn,self._entity_extract_template,self._entity_extract_policy,self._max_entities,"KEYWORDS:",)expanded_entities=self._expand_synonyms(entities)returnlist(set(entities)|set(expanded_entities))asyncdef_aget_entities(self,query_str:str)->List[str]:"""Get entities from query string."""entities=awaitself._aprocess_entities(query_str,self._entity_extract_fn,self._entity_extract_template,self._entity_extract_policy,self._max_entities,"KEYWORDS:",)expanded_entities=awaitself._aexpand_synonyms(entities)returnlist(set(entities)|set(expanded_entities))def_expand_synonyms(self,keywords:List[str])->List[str]:"""Expand synonyms or similar expressions for keywords."""returnself._process_entities(str(keywords),self._synonym_expand_fn,self._synonym_expand_template,self._synonym_expand_policy,self._max_synonyms,"SYNONYMS:",)asyncdef_aexpand_synonyms(self,keywords:List[str])->List[str]:"""Expand synonyms or similar expressions for keywords."""returnawaitself._aprocess_entities(str(keywords),self._synonym_expand_fn,self._synonym_expand_template,self._synonym_expand_policy,self._max_synonyms,"SYNONYMS:",)def_get_knowledge_sequence(self,entities:List[str])->Tuple[List[str],Optional[Dict[Any,Any]]]:"""Get knowledge sequence from entities."""# Get SubGraph from Graph Store as Knowledge Sequencerel_map:Optional[Dict]=self._graph_store.get_rel_map(entities,self._graph_traversal_depth,limit=self._max_knowledge_sequence)logger.debug(f"rel_map: {rel_map}")# Build Knowledge Sequenceknowledge_sequence=[]ifrel_map:knowledge_sequence.extend([str(rel_obj)forrel_objsinrel_map.values()forrel_objinrel_objs])else:logger.info("> No knowledge sequence extracted from entities.")return[],Nonereturnknowledge_sequence,rel_mapasyncdef_aget_knowledge_sequence(self,entities:List[str])->Tuple[List[str],Optional[Dict[Any,Any]]]:"""Get knowledge sequence from entities."""# Get SubGraph from Graph Store as Knowledge Sequence# TBD: async in graph storerel_map:Optional[Dict]=self._graph_store.get_rel_map(entities,self._graph_traversal_depth,limit=self._max_knowledge_sequence)logger.debug(f"rel_map from GraphStore:\n{rel_map}")# Build Knowledge Sequenceknowledge_sequence=[]ifrel_map:knowledge_sequence.extend([str(rel_obj)forrel_objsinrel_map.values()forrel_objinrel_objs])else:logger.info("> No knowledge sequence extracted from entities.")return[],Nonereturnknowledge_sequence,rel_mapdef_build_nodes(self,knowledge_sequence:List[str],rel_map:Optional[Dict[Any,Any]]=None)->List[NodeWithScore]:"""Build nodes from knowledge sequence."""iflen(knowledge_sequence)==0:logger.info("> No knowledge sequence extracted from entities.")return[]_new_line_char="\n"context_string=(f"The following are knowledge sequence in max depth"f" {self._graph_traversal_depth} "f"in the form of directed graph like:\n"f"`subject -[predicate]->, object, <-[predicate_next_hop]-,"f" object_next_hop ...`"f" extracted based on key entities as subject:\n"f"{_new_line_char.join(knowledge_sequence)}")ifself._verbose:print_text(f"Graph RAG context:\n{context_string}\n",color="blue")rel_node_info={"kg_rel_map":rel_map,"kg_rel_text":knowledge_sequence,}metadata_keys=["kg_rel_map","kg_rel_text"]ifself._graph_schema!="":rel_node_info["kg_schema"]={"schema":self._graph_schema}metadata_keys.append("kg_schema")node=NodeWithScore(node=TextNode(text=context_string,score=1.0,metadata=rel_node_info,excluded_embed_metadata_keys=metadata_keys,excluded_llm_metadata_keys=metadata_keys,))return[node]def_retrieve_keyword(self,query_bundle:QueryBundle)->List[NodeWithScore]:"""Retrieve in keyword mode."""ifself._retriever_modenotin["keyword","keyword_embedding"]:return[]# Get entitiesentities=self._get_entities(query_bundle.query_str)# Before we enable embedding/semantic search, we need to make sure# we don't miss any entities that's synoynm of the entities we extracted# in string matching based retrieval in following steps, thus we expand# synonyms here.iflen(entities)==0:logger.info("> No entities extracted from query string.")return[]# Get SubGraph from Graph Store as Knowledge Sequenceknowledge_sequence,rel_map=self._get_knowledge_sequence(entities)returnself._build_nodes(knowledge_sequence,rel_map)asyncdef_aretrieve_keyword(self,query_bundle:QueryBundle)->List[NodeWithScore]:"""Retrieve in keyword mode."""ifself._retriever_modenotin["keyword","keyword_embedding"]:return[]# Get entitiesentities=awaitself._aget_entities(query_bundle.query_str)# Before we enable embedding/semantic search, we need to make sure# we don't miss any entities that's synoynm of the entities we extracted# in string matching based retrieval in following steps, thus we expand# synonyms here.iflen(entities)==0:logger.info("> No entities extracted from query string.")return[]# Get SubGraph from Graph Store as Knowledge Sequenceknowledge_sequence,rel_map=awaitself._aget_knowledge_sequence(entities)returnself._build_nodes(knowledge_sequence,rel_map)def_retrieve_embedding(self,query_bundle:QueryBundle)->List[NodeWithScore]:"""Retrieve in embedding mode."""ifself._retriever_modenotin["embedding","keyword_embedding"]:return[]# TBD: will implement this later with vector store.raiseNotImplementedErrorasyncdef_aretrieve_embedding(self,query_bundle:QueryBundle)->List[NodeWithScore]:"""Retrieve in embedding mode."""ifself._retriever_modenotin["embedding","keyword_embedding"]:return[]# TBD: will implement this later with vector store.raiseNotImplementedErrordef_retrieve(self,query_bundle:QueryBundle)->List[NodeWithScore]:"""Build nodes for response."""nodes:List[NodeWithScore]=[]ifself._with_nl2graphquery:try:nodes_nl2graphquery=self._kg_query_engine._retrieve(query_bundle)nodes.extend(nodes_nl2graphquery)exceptExceptionase:logger.warning(f"Error in retrieving from nl2graphquery: {e}")nodes.extend(self._retrieve_keyword(query_bundle))nodes.extend(self._retrieve_embedding(query_bundle))returnnodesasyncdef_aretrieve(self,query_bundle:QueryBundle)->List[NodeWithScore]:"""Build nodes for response."""nodes:List[NodeWithScore]=[]ifself._with_nl2graphquery:try:nodes_nl2graphquery=awaitself._kg_query_engine._aretrieve(query_bundle)nodes.extend(nodes_nl2graphquery)exceptExceptionase:logger.warning(f"Error in retrieving from nl2graphquery: {e}")nodes.extend(awaitself._aretrieve_keyword(query_bundle))nodes.extend(awaitself._aretrieve_embedding(query_bundle))returnnodes