Skip to content

Compact and refine

初始化文件。

CompactAndRefine #

Bases: Refine

在紧凑的文本块中优化响应。

Source code in llama_index/core/response_synthesizers/compact_and_refine.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
class CompactAndRefine(Refine):
    """在紧凑的文本块中优化响应。"""

    @dispatcher.span
    async def aget_response(
        self,
        query_str: str,
        text_chunks: Sequence[str],
        prev_response: Optional[RESPONSE_TEXT_TYPE] = None,
        **response_kwargs: Any,
    ) -> RESPONSE_TEXT_TYPE:
        compact_texts = self._make_compact_text_chunks(query_str, text_chunks)
        return await super().aget_response(
            query_str=query_str,
            text_chunks=compact_texts,
            prev_response=prev_response,
            **response_kwargs,
        )

    @dispatcher.span
    def get_response(
        self,
        query_str: str,
        text_chunks: Sequence[str],
        prev_response: Optional[RESPONSE_TEXT_TYPE] = None,
        **response_kwargs: Any,
    ) -> RESPONSE_TEXT_TYPE:
        """获取紧凑的响应。"""
        # use prompt helper to fix compact text_chunks under the prompt limitation
        # TODO: This is a temporary fix - reason it's temporary is that
        # the refine template does not account for size of previous answer.
        new_texts = self._make_compact_text_chunks(query_str, text_chunks)
        return super().get_response(
            query_str=query_str,
            text_chunks=new_texts,
            prev_response=prev_response,
            **response_kwargs,
        )

    def _make_compact_text_chunks(
        self, query_str: str, text_chunks: Sequence[str]
    ) -> List[str]:
        text_qa_template = self._text_qa_template.partial_format(query_str=query_str)
        refine_template = self._refine_template.partial_format(query_str=query_str)

        max_prompt = get_biggest_prompt([text_qa_template, refine_template])
        return self._prompt_helper.repack(max_prompt, text_chunks)

get_response #

get_response(
    query_str: str,
    text_chunks: Sequence[str],
    prev_response: Optional[RESPONSE_TEXT_TYPE] = None,
    **response_kwargs: Any
) -> RESPONSE_TEXT_TYPE

获取紧凑的响应。

Source code in llama_index/core/response_synthesizers/compact_and_refine.py
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
@dispatcher.span
def get_response(
    self,
    query_str: str,
    text_chunks: Sequence[str],
    prev_response: Optional[RESPONSE_TEXT_TYPE] = None,
    **response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
    """获取紧凑的响应。"""
    # use prompt helper to fix compact text_chunks under the prompt limitation
    # TODO: This is a temporary fix - reason it's temporary is that
    # the refine template does not account for size of previous answer.
    new_texts = self._make_compact_text_chunks(query_str, text_chunks)
    return super().get_response(
        query_str=query_str,
        text_chunks=new_texts,
        prev_response=prev_response,
        **response_kwargs,
    )