Skip to content

Google

GoogleTextSynthesizer #

Bases: BaseSynthesizer

谷歌的属性问答服务。

给定用户的查询和一组段落,谷歌的服务器将返回一个与提供的段落列表相关联的响应。它不会基于参数化记忆来生成响应。

Source code in llama_index/response_synthesizers/google/base.py
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
class GoogleTextSynthesizer(BaseSynthesizer):
    """谷歌的属性问答服务。

给定用户的查询和一组段落,谷歌的服务器将返回一个与提供的段落列表相关联的响应。它不会基于参数化记忆来生成响应。"""

    _client: Any
    _temperature: float
    _answer_style: Any
    _safety_setting: List[Any]

    def __init__(
        self,
        *,
        temperature: float,
        answer_style: Any,
        safety_setting: List[Any],
        **kwargs: Any,
    ):
        """创建一个新的Google AQA。

最好使用工厂`from_defaults`来确保类型安全。
查看`from_defaults`获取更多文档。
"""
        try:
            import llama_index.vector_stores.google.genai_extension as genaix
        except ImportError:
            raise ImportError(_import_err_msg)

        super().__init__(
            llm=MockLLM(),
            output_cls=SynthesizedResponse,
            **kwargs,
        )

        self._client = genaix.build_generative_service()
        self._temperature = temperature
        self._answer_style = answer_style
        self._safety_setting = safety_setting

    # Type safe factory that is only available if Google is installed.
    @classmethod
    def from_defaults(
        cls,
        temperature: float = 0.7,
        answer_style: int = 1,
        safety_setting: List["genai.SafetySetting"] = [],
    ) -> "GoogleTextSynthesizer":
        """创建一个新的Google AQA。

示例:
  responder = GoogleTextSynthesizer.create(
      temperature=0.7,
      answer_style=AnswerStyle.ABSTRACTIVE,
      safety_setting=[
          SafetySetting(
              category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
              threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
          ),
      ]
  )

Args:
  temperature: 0.0 到 1.0。
  answer_style: 参见 `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`。
    默认值为 ABSTRACTIVE (1)。
  safety_setting: 参见 `google.ai.generativelanguage.SafetySetting`。

Returns:
  一个 GoogleTextSynthesizer 的实例。
"""
        return cls(
            temperature=temperature,
            answer_style=answer_style,
            safety_setting=safety_setting,
        )

    def get_response(
        self,
        query_str: str,
        text_chunks: Sequence[str],
        **response_kwargs: Any,
    ) -> SynthesizedResponse:
        """生成对提供段落的有根据的回复。

Args:
    query_str: 用户的问题。
    text_chunks: 应该用来回答问题的段落列表。

Returns:
    一个`SynthesizedResponse`对象。
"""
        try:
            import llama_index.vector_stores.google.genai_extension as genaix

            import google.ai.generativelanguage as genai
        except ImportError:
            raise ImportError(_import_err_msg)

        client = cast(genai.GenerativeServiceClient, self._client)
        response = genaix.generate_answer(
            prompt=query_str,
            passages=list(text_chunks),
            answer_style=self._answer_style,
            safety_settings=self._safety_setting,
            temperature=self._temperature,
            client=client,
        )

        return SynthesizedResponse(
            answer=response.answer,
            attributed_passages=[
                passage.text for passage in response.attributed_passages
            ],
            answerable_probability=response.answerable_probability,
        )

    async def aget_response(
        self,
        query_str: str,
        text_chunks: Sequence[str],
        **response_kwargs: Any,
    ) -> RESPONSE_TEXT_TYPE:
        # TODO: Implement a true async version.
        return self.get_response(query_str, text_chunks, **response_kwargs)

    def synthesize(
        self,
        query: QueryTextType,
        nodes: List[NodeWithScore],
        additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
        **response_kwargs: Any,
    ) -> Response:
        """返回基于提供的段落的基础响应。

返回:
    响应的 `source_nodes` 将以属性段落列表开头。这些段落是用于构建基础响应的段落。这些段落始终没有分数,这是将它们标记为属性段落的唯一方式。然后,列表将跟随最初提供的段落,这些段落将具有来自检索的分数。

    响应的 `metadata` 也可能具有键为 `answerable_probability` 的条目,这是模型对其答案正确性并基于输入段落的概率的估计。
"""
        if len(nodes) == 0:
            return Response("Empty Response")

        if isinstance(query, str):
            query = QueryBundle(query_str=query)

        with self._callback_manager.event(
            CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
        ) as event:
            internal_response = self.get_response(
                query_str=query.query_str,
                text_chunks=[
                    n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
                ],
                **response_kwargs,
            )

            additional_source_nodes = list(additional_source_nodes or [])

            external_response = self._prepare_external_response(
                internal_response, nodes + additional_source_nodes
            )

            event.on_end(payload={EventPayload.RESPONSE: external_response})

        return external_response

    async def asynthesize(
        self,
        query: QueryTextType,
        nodes: List[NodeWithScore],
        additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
        **response_kwargs: Any,
    ) -> Response:
        # TODO: Implement a true async version.
        return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)

    def _prepare_external_response(
        self,
        response: SynthesizedResponse,
        source_nodes: List[NodeWithScore],
    ) -> Response:
        return Response(
            response=response.answer,
            source_nodes=[
                NodeWithScore(node=TextNode(text=passage))
                for passage in response.attributed_passages
            ]
            + source_nodes,
            metadata={
                "answerable_probability": response.answerable_probability,
            },
        )

    def _get_prompts(self) -> PromptDictType:
        # Not used.
        return {}

    def _update_prompts(self, prompts_dict: PromptDictType) -> None:
        # Not used.
        ...

from_defaults classmethod #

from_defaults(
    temperature: float = 0.7,
    answer_style: int = 1,
    safety_setting: List[SafetySetting] = [],
) -> GoogleTextSynthesizer

创建一个新的Google AQA。

示例: responder = GoogleTextSynthesizer.create( temperature=0.7, answer_style=AnswerStyle.ABSTRACTIVE, safety_setting=[ SafetySetting( category=HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, ), ] )

Parameters:

Name Type Description Default
temperature float

0.0 到 1.0。

0.7
answer_style int

参见 google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle。 默认值为 ABSTRACTIVE (1)。

1
safety_setting List[SafetySetting]

参见 google.ai.generativelanguage.SafetySetting

[]

Returns:

Type Description
GoogleTextSynthesizer

一个 GoogleTextSynthesizer 的实例。

Source code in llama_index/response_synthesizers/google/base.py
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
    @classmethod
    def from_defaults(
        cls,
        temperature: float = 0.7,
        answer_style: int = 1,
        safety_setting: List["genai.SafetySetting"] = [],
    ) -> "GoogleTextSynthesizer":
        """创建一个新的Google AQA。

示例:
  responder = GoogleTextSynthesizer.create(
      temperature=0.7,
      answer_style=AnswerStyle.ABSTRACTIVE,
      safety_setting=[
          SafetySetting(
              category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
              threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
          ),
      ]
  )

Args:
  temperature: 0.0 到 1.0。
  answer_style: 参见 `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`。
    默认值为 ABSTRACTIVE (1)。
  safety_setting: 参见 `google.ai.generativelanguage.SafetySetting`。

Returns:
  一个 GoogleTextSynthesizer 的实例。
"""
        return cls(
            temperature=temperature,
            answer_style=answer_style,
            safety_setting=safety_setting,
        )

get_response #

get_response(
    query_str: str,
    text_chunks: Sequence[str],
    **response_kwargs: Any
) -> SynthesizedResponse

生成对提供段落的有根据的回复。

Parameters:

Name Type Description Default
query_str str

用户的问题。

required
text_chunks Sequence[str]

应该用来回答问题的段落列表。

required

Returns:

Type Description
SynthesizedResponse

一个SynthesizedResponse对象。

Source code in llama_index/response_synthesizers/google/base.py
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
    def get_response(
        self,
        query_str: str,
        text_chunks: Sequence[str],
        **response_kwargs: Any,
    ) -> SynthesizedResponse:
        """生成对提供段落的有根据的回复。

Args:
    query_str: 用户的问题。
    text_chunks: 应该用来回答问题的段落列表。

Returns:
    一个`SynthesizedResponse`对象。
"""
        try:
            import llama_index.vector_stores.google.genai_extension as genaix

            import google.ai.generativelanguage as genai
        except ImportError:
            raise ImportError(_import_err_msg)

        client = cast(genai.GenerativeServiceClient, self._client)
        response = genaix.generate_answer(
            prompt=query_str,
            passages=list(text_chunks),
            answer_style=self._answer_style,
            safety_settings=self._safety_setting,
            temperature=self._temperature,
            client=client,
        )

        return SynthesizedResponse(
            answer=response.answer,
            attributed_passages=[
                passage.text for passage in response.attributed_passages
            ],
            answerable_probability=response.answerable_probability,
        )

synthesize #

synthesize(
    query: QueryTextType,
    nodes: List[NodeWithScore],
    additional_source_nodes: Optional[
        Sequence[NodeWithScore]
    ] = None,
    **response_kwargs: Any
) -> Response

返回基于提供的段落的基础响应。

返回: 响应的 source_nodes 将以属性段落列表开头。这些段落是用于构建基础响应的段落。这些段落始终没有分数,这是将它们标记为属性段落的唯一方式。然后,列表将跟随最初提供的段落,这些段落将具有来自检索的分数。

响应的 `metadata` 也可能具有键为 `answerable_probability` 的条目,这是模型对其答案正确性并基于输入段落的概率的估计。
Source code in llama_index/response_synthesizers/google/base.py
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
    def synthesize(
        self,
        query: QueryTextType,
        nodes: List[NodeWithScore],
        additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
        **response_kwargs: Any,
    ) -> Response:
        """返回基于提供的段落的基础响应。

返回:
    响应的 `source_nodes` 将以属性段落列表开头。这些段落是用于构建基础响应的段落。这些段落始终没有分数,这是将它们标记为属性段落的唯一方式。然后,列表将跟随最初提供的段落,这些段落将具有来自检索的分数。

    响应的 `metadata` 也可能具有键为 `answerable_probability` 的条目,这是模型对其答案正确性并基于输入段落的概率的估计。
"""
        if len(nodes) == 0:
            return Response("Empty Response")

        if isinstance(query, str):
            query = QueryBundle(query_str=query)

        with self._callback_manager.event(
            CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
        ) as event:
            internal_response = self.get_response(
                query_str=query.query_str,
                text_chunks=[
                    n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
                ],
                **response_kwargs,
            )

            additional_source_nodes = list(additional_source_nodes or [])

            external_response = self._prepare_external_response(
                internal_response, nodes + additional_source_nodes
            )

            event.on_end(payload={EventPayload.RESPONSE: external_response})

        return external_response