Skip to content

Gradient

GradientBaseModelLLM #

Bases: _BaseGradientLLM

基于梯度的基础模型LLM。

示例

pip install llama-index-llms-gradient

import os
from llama_index.llms.gradient import GradientBaseModelLLM

# 设置梯度访问令牌和工作空间ID
os.environ["GRADIENT_ACCESS_TOKEN"] = "{GRADIENT_ACCESS_TOKEN}"
os.environ["GRADIENT_WORKSPACE_ID"] = "{GRADIENT_WORKSPACE_ID}"

# 创建GradientBaseModelLLM的实例
llm = GradientBaseModelLLM(
    base_model_slug="llama2-7b-chat",
    max_tokens=400,
)
Source code in llama_index/llms/gradient/base.py
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
class GradientBaseModelLLM(_BaseGradientLLM):
    """基于梯度的基础模型LLM。

    示例:
        `pip install llama-index-llms-gradient`

        ```python
        import os
        from llama_index.llms.gradient import GradientBaseModelLLM

        # 设置梯度访问令牌和工作空间ID
        os.environ["GRADIENT_ACCESS_TOKEN"] = "{GRADIENT_ACCESS_TOKEN}"
        os.environ["GRADIENT_WORKSPACE_ID"] = "{GRADIENT_WORKSPACE_ID}"

        # 创建GradientBaseModelLLM的实例
        llm = GradientBaseModelLLM(
            base_model_slug="llama2-7b-chat",
            max_tokens=400,
        )
        ```"""

    base_model_slug: str = Field(
        description="The slug of the base model to use.",
    )

    def __init__(
        self,
        *,
        access_token: Optional[str] = None,
        base_model_slug: str,
        host: Optional[str] = None,
        max_tokens: Optional[int] = None,
        workspace_id: Optional[str] = None,
        callback_manager: Optional[CallbackManager] = None,
        is_chat_model: bool = False,
    ) -> None:
        super().__init__(
            access_token=access_token,
            base_model_slug=base_model_slug,
            host=host,
            max_tokens=max_tokens,
            workspace_id=workspace_id,
            callback_manager=callback_manager,
            is_chat_model=is_chat_model,
        )

        self._model = self._gradient.get_base_model(
            base_model_slug=base_model_slug,
        )

GradientModelAdapterLLM #

Bases: _BaseGradientLLM

模型适配器 LLM 的梯度。

示例

pip install llama-index-llms-gradient

import os

os.environ["GRADIENT_ACCESS_TOKEN"] = "{GRADIENT_ACCESS_TOKEN}"
os.environ["GRADIENT_WORKSPACE_ID"] = "{GRADIENT_WORKSPACE_ID}"

from llama_index.llms.gradient import GradientModelAdapterLLM

llm = GradientModelAdapterLLM(
    model_adapter_id="{YOUR_MODEL_ADAPTER_ID}",
    max_tokens=400,
)

result = llm.complete("Can you tell me about large language models?")
print(result)
Source code in llama_index/llms/gradient/base.py
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
class GradientModelAdapterLLM(_BaseGradientLLM):
    """模型适配器 LLM 的梯度。

    示例:
        `pip install llama-index-llms-gradient`

        ```python
        import os

        os.environ["GRADIENT_ACCESS_TOKEN"] = "{GRADIENT_ACCESS_TOKEN}"
        os.environ["GRADIENT_WORKSPACE_ID"] = "{GRADIENT_WORKSPACE_ID}"

        from llama_index.llms.gradient import GradientModelAdapterLLM

        llm = GradientModelAdapterLLM(
            model_adapter_id="{YOUR_MODEL_ADAPTER_ID}",
            max_tokens=400,
        )

        result = llm.complete("Can you tell me about large language models?")
        print(result)
        ```"""

    model_adapter_id: str = Field(
        description="The id of the model adapter to use.",
    )

    def __init__(
        self,
        *,
        access_token: Optional[str] = None,
        host: Optional[str] = None,
        max_tokens: Optional[int] = None,
        model_adapter_id: str,
        workspace_id: Optional[str] = None,
        callback_manager: Optional[CallbackManager] = None,
        is_chat_model: bool = False,
    ) -> None:
        super().__init__(
            access_token=access_token,
            host=host,
            max_tokens=max_tokens,
            model_adapter_id=model_adapter_id,
            workspace_id=workspace_id,
            callback_manager=callback_manager,
            is_chat_model=is_chat_model,
        )
        self._model = self._gradient.get_model_adapter(
            model_adapter_id=model_adapter_id
        )