Skip to content

Palm

PaLM #

Bases: CustomLLM

PaLM LLM。

示例

pip install llama-index-llms-palm

import google.generativeai as palm

# PaLM的API密钥
palm_api_key = "YOUR_API_KEY_HERE"

# 列出所有支持文本生成的模型
models = [
    m
    for m in palm.list_models()
    if "generateText" in m.supported_generation_methods
]
model = models[0].name
print(model)

# 开始使用我们的PaLM LLM抽象
from llama_index.llms.palm import PaLM

# 使用API密钥创建PaLM类的实例
llm = PaLM(model_name=model, api_key=palm_api_key)

# 使用complete方法基于提示生成文本
response = llm.complete("Your prompt text here.")
print(str(response))
Source code in llama_index/llms/palm/base.py
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
class PaLM(CustomLLM):
    """PaLM LLM。

    示例:
        `pip install llama-index-llms-palm`

        ```python
        import google.generativeai as palm

        # PaLM的API密钥
        palm_api_key = "YOUR_API_KEY_HERE"

        # 列出所有支持文本生成的模型
        models = [
            m
            for m in palm.list_models()
            if "generateText" in m.supported_generation_methods
        ]
        model = models[0].name
        print(model)

        # 开始使用我们的PaLM LLM抽象
        from llama_index.llms.palm import PaLM

        # 使用API密钥创建PaLM类的实例
        llm = PaLM(model_name=model, api_key=palm_api_key)

        # 使用complete方法基于提示生成文本
        response = llm.complete("Your prompt text here.")
        print(str(response))
        ```"""

    model_name: str = Field(
        default=DEFAULT_PALM_MODEL, description="The PaLM model to use."
    )
    num_output: int = Field(
        default=DEFAULT_NUM_OUTPUTS,
        description="The number of tokens to generate.",
        gt=0,
    )
    generate_kwargs: dict = Field(
        default_factory=dict, description="Kwargs for generation."
    )

    _model: Any = PrivateAttr()

    def __init__(
        self,
        api_key: Optional[str] = None,
        model_name: Optional[str] = DEFAULT_PALM_MODEL,
        num_output: Optional[int] = None,
        callback_manager: Optional[CallbackManager] = None,
        system_prompt: Optional[str] = None,
        messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
        completion_to_prompt: Optional[Callable[[str], str]] = None,
        pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
        output_parser: Optional[BaseOutputParser] = None,
        **generate_kwargs: Any,
    ) -> None:
        """初始化参数。"""
        api_key = api_key or os.environ.get("PALM_API_KEY")
        palm.configure(api_key=api_key)

        models = palm.list_models()
        models_dict = {m.name: m for m in models}
        if model_name not in models_dict:
            raise ValueError(
                f"Model name {model_name} not found in {models_dict.keys()}"
            )

        model_name = model_name
        self._model = models_dict[model_name]

        # get num_output
        num_output = num_output or self._model.output_token_limit

        generate_kwargs = generate_kwargs or {}
        super().__init__(
            model_name=model_name,
            num_output=num_output,
            generate_kwargs=generate_kwargs,
            callback_manager=callback_manager,
            system_prompt=system_prompt,
            messages_to_prompt=messages_to_prompt,
            completion_to_prompt=completion_to_prompt,
            pydantic_program_mode=pydantic_program_mode,
            output_parser=output_parser,
        )

    @classmethod
    def class_name(cls) -> str:
        return "PaLM_llm"

    @property
    def metadata(self) -> LLMMetadata:
        """获取LLM元数据。"""
        # TODO: google palm actually separates input and output token limits
        total_tokens = self._model.input_token_limit + self.num_output
        return LLMMetadata(
            context_window=total_tokens,
            num_output=self.num_output,
            model_name=self.model_name,
        )

    @llm_completion_callback()
    def complete(
        self, prompt: str, formatted: bool = False, **kwargs: Any
    ) -> CompletionResponse:
        """预测对查询的答案。

Args:
    prompt (str): 用于预测的提示。

Returns:
    Tuple[str, str]: 预测的答案和格式化后的提示的元组。
"""
        completion = palm.generate_text(
            model=self.model_name,
            prompt=prompt,
            **kwargs,
        )
        return CompletionResponse(text=completion.result, raw=completion.candidates[0])

    @llm_completion_callback()
    def stream_complete(
        self, prompt: str, formatted: bool = False, **kwargs: Any
    ) -> CompletionResponseGen:
        """流式传输查询的答案。

注意:这是一个测试功能。将尝试构建或使用更好的抽象来处理响应。

Args:
    prompt(str):用于预测的提示。

Returns:
    str:预测的答案。
"""
        raise NotImplementedError(
            "PaLM does not support streaming completion in LlamaIndex currently."
        )

metadata property #

metadata: LLMMetadata

获取LLM元数据。

complete #

complete(
    prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse

预测对查询的答案。

Parameters:

Name Type Description Default
prompt str

用于预测的提示。

required

Returns:

Type Description
CompletionResponse

Tuple[str, str]: 预测的答案和格式化后的提示的元组。

Source code in llama_index/llms/palm/base.py
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
    @llm_completion_callback()
    def complete(
        self, prompt: str, formatted: bool = False, **kwargs: Any
    ) -> CompletionResponse:
        """预测对查询的答案。

Args:
    prompt (str): 用于预测的提示。

Returns:
    Tuple[str, str]: 预测的答案和格式化后的提示的元组。
"""
        completion = palm.generate_text(
            model=self.model_name,
            prompt=prompt,
            **kwargs,
        )
        return CompletionResponse(text=completion.result, raw=completion.candidates[0])

stream_complete #

stream_complete(
    prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen

流式传输查询的答案。

注意:这是一个测试功能。将尝试构建或使用更好的抽象来处理响应。

Returns:

Type Description
CompletionResponseGen

str:预测的答案。

Source code in llama_index/llms/palm/base.py
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
    @llm_completion_callback()
    def stream_complete(
        self, prompt: str, formatted: bool = False, **kwargs: Any
    ) -> CompletionResponseGen:
        """流式传输查询的答案。

注意:这是一个测试功能。将尝试构建或使用更好的抽象来处理响应。

Args:
    prompt(str):用于预测的提示。

Returns:
    str:预测的答案。
"""
        raise NotImplementedError(
            "PaLM does not support streaming completion in LlamaIndex currently."
        )