Skip to content

Guidance

GuidancePydanticProgram #

Bases: BaseLLMFunctionProgram['GuidanceLLM']

一个基于指导的函数,返回一个pydantic模型。

注意:此接口尚不稳定。

Source code in llama_index/program/guidance/base.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
class GuidancePydanticProgram(BaseLLMFunctionProgram["GuidanceLLM"]):
    """一个基于指导的函数,返回一个pydantic模型。

注意:此接口尚不稳定。"""

    def __init__(
        self,
        output_cls: Type[BaseModel],
        prompt_template_str: str,
        guidance_llm: Optional["GuidanceLLM"] = None,
        verbose: bool = False,
    ):
        if not guidance_llm:
            llm = guidance_llm
        else:
            llm = OpenAI("gpt-3.5-turbo")

        full_str = prompt_template_str + "\n"
        self._full_str = full_str
        self._guidance_program = partial(self.program, llm=llm, silent=not verbose)
        self._output_cls = output_cls
        self._verbose = verbose

    def program(
        self,
        llm: "GuidanceLLM",
        silent: bool,
        tools_str: str,
        query_str: str,
        **kwargs: dict,
    ) -> "GuidanceLLM":
        """一个用于使用新的指导版本执行程序的包装器。"""
        given_query = self._full_str.replace("{{tools_str}}", tools_str).replace(
            "{{query_str}}", query_str
        )
        with user():
            llm = llm + given_query

        with assistant():
            llm = llm + gen(stop=".")

        return llm  # noqa: RET504

    @classmethod
    def from_defaults(
        cls,
        output_cls: Type[BaseModel],
        prompt_template_str: Optional[str] = None,
        prompt: Optional[PromptTemplate] = None,
        llm: Optional["GuidanceLLM"] = None,
        **kwargs: Any,
    ) -> "BaseLLMFunctionProgram":
        """从默认值。"""
        if prompt is None and prompt_template_str is None:
            raise ValueError("Must provide either prompt or prompt_template_str.")
        if prompt is not None and prompt_template_str is not None:
            raise ValueError("Must provide either prompt or prompt_template_str.")
        if prompt is not None:
            prompt_template_str = prompt.template
        prompt_template_str = cast(str, prompt_template_str)
        return cls(
            output_cls,
            prompt_template_str,
            guidance_llm=llm,
            **kwargs,
        )

    @property
    def output_cls(self) -> Type[BaseModel]:
        return self._output_cls

    def __call__(
        self,
        *args: Any,
        **kwargs: Any,
    ) -> BaseModel:
        executed_program = self._guidance_program(**kwargs)
        response = str(executed_program)

        return parse_pydantic_from_guidance_program(
            response=response, cls=self._output_cls
        )

program #

program(
    llm: Model,
    silent: bool,
    tools_str: str,
    query_str: str,
    **kwargs: dict
) -> Model

一个用于使用新的指导版本执行程序的包装器。

Source code in llama_index/program/guidance/base.py
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
def program(
    self,
    llm: "GuidanceLLM",
    silent: bool,
    tools_str: str,
    query_str: str,
    **kwargs: dict,
) -> "GuidanceLLM":
    """一个用于使用新的指导版本执行程序的包装器。"""
    given_query = self._full_str.replace("{{tools_str}}", tools_str).replace(
        "{{query_str}}", query_str
    )
    with user():
        llm = llm + given_query

    with assistant():
        llm = llm + gen(stop=".")

    return llm  # noqa: RET504

from_defaults classmethod #

from_defaults(
    output_cls: Type[BaseModel],
    prompt_template_str: Optional[str] = None,
    prompt: Optional[PromptTemplate] = None,
    llm: Optional[Model] = None,
    **kwargs: Any
) -> BaseLLMFunctionProgram

从默认值。

Source code in llama_index/program/guidance/base.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
@classmethod
def from_defaults(
    cls,
    output_cls: Type[BaseModel],
    prompt_template_str: Optional[str] = None,
    prompt: Optional[PromptTemplate] = None,
    llm: Optional["GuidanceLLM"] = None,
    **kwargs: Any,
) -> "BaseLLMFunctionProgram":
    """从默认值。"""
    if prompt is None and prompt_template_str is None:
        raise ValueError("Must provide either prompt or prompt_template_str.")
    if prompt is not None and prompt_template_str is not None:
        raise ValueError("Must provide either prompt or prompt_template_str.")
    if prompt is not None:
        prompt_template_str = prompt.template
    prompt_template_str = cast(str, prompt_template_str)
    return cls(
        output_cls,
        prompt_template_str,
        guidance_llm=llm,
        **kwargs,
    )