Skip to content

Answer relevancy

评估模块。

AnswerRelevancyEvaluator #

Bases: BaseEvaluator

答案相关性评估器。

评估响应对查询的相关性。 该评估器考虑查询字符串和响应字符串。

Parameters:

Name Type Description Default
service_context(Optional[ServiceContext])

用于评估的服务上下文。

required
raise_error(Optional[bool])

如果响应无效是否引发错误。 默认为False。

required
eval_template(Optional[Union[str, BasePromptTemplate]]

用于评估的模板。

required
refine_template(Optional[Union[str, BasePromptTemplate]]

用于改进的模板。

required
Source code in llama_index/core/evaluation/answer_relevancy.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
class AnswerRelevancyEvaluator(BaseEvaluator):
    """答案相关性评估器。

    评估响应对查询的相关性。
    该评估器考虑查询字符串和响应字符串。

    Args:
        service_context(Optional[ServiceContext]):
            用于评估的服务上下文。
        raise_error(Optional[bool]):
            如果响应无效是否引发错误。
            默认为False。
        eval_template(Optional[Union[str, BasePromptTemplate]]):
            用于评估的模板。
        refine_template(Optional[Union[str, BasePromptTemplate]]):
            用于改进的模板。"""

    def __init__(
        self,
        llm: Optional[LLM] = None,
        raise_error: bool = False,
        eval_template: str | BasePromptTemplate | None = None,
        score_threshold: float = _DEFAULT_SCORE_THRESHOLD,
        parser_function: Callable[
            [str], Tuple[Optional[float], Optional[str]]
        ] = _default_parser_function,
        # deprecated
        service_context: Optional[ServiceContext] = None,
    ) -> None:
        """初始化参数。"""
        self._llm = llm or llm_from_settings_or_context(Settings, service_context)
        self._raise_error = raise_error

        self._eval_template: BasePromptTemplate
        if isinstance(eval_template, str):
            self._eval_template = PromptTemplate(eval_template)
        else:
            self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE

        self.parser_function = parser_function
        self.score_threshold = score_threshold

    def _get_prompts(self) -> PromptDictType:
        """获取提示。"""
        return {
            "eval_template": self._eval_template,
            "refine_template": self._refine_template,
        }

    def _update_prompts(self, prompts: PromptDictType) -> None:
        """更新提示。"""
        if "eval_template" in prompts:
            self._eval_template = prompts["eval_template"]
        if "refine_template" in prompts:
            self._refine_template = prompts["refine_template"]

    async def aevaluate(
        self,
        query: str | None = None,
        response: str | None = None,
        contexts: Sequence[str] | None = None,
        sleep_time_in_seconds: int = 0,
        **kwargs: Any,
    ) -> EvaluationResult:
        """评估响应是否与查询相关。"""
        del kwargs  # Unused
        del contexts  # Unused

        if query is None or response is None:
            raise ValueError("query and response must be provided")

        await asyncio.sleep(sleep_time_in_seconds)

        eval_response = await self._llm.apredict(
            prompt=self._eval_template,
            query=query,
            response=response,
        )

        score, reasoning = self.parser_function(eval_response)

        invalid_result, invalid_reason = False, None
        if score is None and reasoning is None:
            if self._raise_error:
                raise ValueError("The response is invalid")
            invalid_result = True
            invalid_reason = "Unable to parse the output string."

        if score:
            score /= self.score_threshold

        return EvaluationResult(
            query=query,
            response=response,
            score=score,
            feedback=eval_response,
            invalid_result=invalid_result,
            invalid_reason=invalid_reason,
        )

aevaluate async #

aevaluate(
    query: str | None = None,
    response: str | None = None,
    contexts: Sequence[str] | None = None,
    sleep_time_in_seconds: int = 0,
    **kwargs: Any
) -> EvaluationResult

评估响应是否与查询相关。

Source code in llama_index/core/evaluation/answer_relevancy.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
async def aevaluate(
    self,
    query: str | None = None,
    response: str | None = None,
    contexts: Sequence[str] | None = None,
    sleep_time_in_seconds: int = 0,
    **kwargs: Any,
) -> EvaluationResult:
    """评估响应是否与查询相关。"""
    del kwargs  # Unused
    del contexts  # Unused

    if query is None or response is None:
        raise ValueError("query and response must be provided")

    await asyncio.sleep(sleep_time_in_seconds)

    eval_response = await self._llm.apredict(
        prompt=self._eval_template,
        query=query,
        response=response,
    )

    score, reasoning = self.parser_function(eval_response)

    invalid_result, invalid_reason = False, None
    if score is None and reasoning is None:
        if self._raise_error:
            raise ValueError("The response is invalid")
        invalid_result = True
        invalid_reason = "Unable to parse the output string."

    if score:
        score /= self.score_threshold

    return EvaluationResult(
        query=query,
        response=response,
        score=score,
        feedback=eval_response,
        invalid_result=invalid_result,
        invalid_reason=invalid_reason,
    )