Skip to content

Portkey

Portkey #

Bases: CustomLLM

Portkey LLM。

例子

pip install llama-index-llms-portkey

# 导入必要的库和模块
from llama_index.llms.portkey import Portkey
from llama_index.core.llms import ChatMessage
import portkey as pk

# 设置Portkey API密钥
import os
os.environ["PORTKEY_API_KEY"] = "YOUR_PORTKEY_API_KEY"

# 为OpenAI设置虚拟密钥
openai_virtual_key_a = "YOUR_OPENAI_VIRTUAL_KEY_A"
openai_virtual_key_b = "YOUR_OPENAI_VIRTUAL_KEY_B"

# 使用所需的配置创建Portkey实例
portkey_client = Portkey(api_key=os.environ.get("PORTKEY_API_KEY"), mode="single")

# 配置带有语义缓存的OpenAI的LLM选项
openai_llm = pk.LLMOptions(
    provider="openai",
    model="gpt-3.5-turbo",
    virtual_key=openai_virtual_key_a,
    cache_status="semantic",
)

# 将LLM选项添加到Portkey客户端
portkey_client.add_llms(openai_llm)

# 为测试定义聊天消息
current_messages = [
    ChatMessage(role="system", content="You are a helpful assistant"),
    ChatMessage(role="user", content="What are the ingredients of a pizza?"),
]

# 测试Portkey语义缓存
response = portkey_client.chat(current_messages)
print(str(response))
Source code in llama_index/llms/portkey/base.py
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
class Portkey(CustomLLM):
    """Portkey LLM。

    例子:
        `pip install llama-index-llms-portkey`

        ```python
        # 导入必要的库和模块
        from llama_index.llms.portkey import Portkey
        from llama_index.core.llms import ChatMessage
        import portkey as pk

        # 设置Portkey API密钥
        import os
        os.environ["PORTKEY_API_KEY"] = "YOUR_PORTKEY_API_KEY"

        # 为OpenAI设置虚拟密钥
        openai_virtual_key_a = "YOUR_OPENAI_VIRTUAL_KEY_A"
        openai_virtual_key_b = "YOUR_OPENAI_VIRTUAL_KEY_B"

        # 使用所需的配置创建Portkey实例
        portkey_client = Portkey(api_key=os.environ.get("PORTKEY_API_KEY"), mode="single")

        # 配置带有语义缓存的OpenAI的LLM选项
        openai_llm = pk.LLMOptions(
            provider="openai",
            model="gpt-3.5-turbo",
            virtual_key=openai_virtual_key_a,
            cache_status="semantic",
        )

        # 将LLM选项添加到Portkey客户端
        portkey_client.add_llms(openai_llm)

        # 为测试定义聊天消息
        current_messages = [
            ChatMessage(role="system", content="You are a helpful assistant"),
            ChatMessage(role="user", content="What are the ingredients of a pizza?"),
        ]

        # 测试Portkey语义缓存
        response = portkey_client.chat(current_messages)
        print(str(response))
        ```"""

    mode: Optional[Union["Modes", "ModesLiteral"]] = Field(
        description="The mode for using the Portkey integration"
    )

    model: Optional[str] = Field(default=DEFAULT_PORTKEY_MODEL)
    llm: "LLMOptions" = Field(description="LLM parameter", default_factory=dict)

    llms: List["LLMOptions"] = Field(description="LLM parameters", default_factory=list)

    _client: Any = PrivateAttr()

    def __init__(
        self,
        *,
        mode: Union["Modes", "ModesLiteral"],
        api_key: Optional[str] = None,
        base_url: Optional[str] = None,
        system_prompt: Optional[str] = None,
        messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
        completion_to_prompt: Optional[Callable[[str], str]] = None,
        pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
        output_parser: Optional[BaseOutputParser] = None,
    ) -> None:
        """初始化一个Portkey实例。

Args:
    mode(可选[Modes]):用于使用Portkey集成的模式(默认值:Modes.SINGLE)。
    api_key(可选[str]):用于认证Portkey的API密钥。
    base_url(可选[str]):用于自托管rubeus(portkey的开源版本)或任何其他自托管服务器的基本URL。
"""
        try:
            import portkey
        except ImportError as exc:
            raise ImportError(IMPORT_ERROR_MESSAGE) from exc

        super().__init__(
            base_url=base_url,
            api_key=api_key,
            system_prompt=system_prompt,
            messages_to_prompt=messages_to_prompt,
            completion_to_prompt=completion_to_prompt,
            pydantic_program_mode=pydantic_program_mode,
            output_parser=output_parser,
        )
        if api_key is not None:
            portkey.api_key = api_key

        if base_url is not None:
            portkey.base_url = base_url

        portkey.mode = mode

        self._client = portkey
        self.model = None
        self.mode = mode

    @property
    def metadata(self) -> LLMMetadata:
        """LLM元数据。"""
        return generate_llm_metadata(self.llms[0])

    def add_llms(
        self, llm_params: Union["LLMOptions", List["LLMOptions"]]
    ) -> "Portkey":
        """将指定的LLM参数添加到LLM列表中。这可以用于回退或根据模式进行负载平衡。

Args:
    llm_params (Union[LLMOptions, List[LLMOptions]]): 单个LLM参数集或LLM参数集列表。每个集应该是LLMOptions的一个实例,具有以下指定属性。
        > provider: Optional[ProviderTypes]
        > model: str
        > temperature: float
        > max_tokens: Optional[int]
        > max_retries: int
        > trace_id: Optional[str]
        > cache_status: Optional[CacheType]
        > cache: Optional[bool]
        > metadata: Dict[str, Any]
        > weight: Optional[float]
        > **kwargs : 其他由portkey-ai中的LLMOptions支持的附加参数

    注意: 用户也可以选择传递其他参数。

Returns:
    self
"""
        try:
            from portkey import LLMOptions
        except ImportError as exc:
            raise ImportError(IMPORT_ERROR_MESSAGE) from exc
        if isinstance(llm_params, LLMOptions):
            llm_params = [llm_params]
        self.llms.extend(llm_params)
        if self.model is None:
            self.model = self.llms[0].model
        return self

    @llm_completion_callback()
    def complete(
        self, prompt: str, formatted: bool = False, **kwargs: Any
    ) -> CompletionResponse:
        """LLM的完成端点。"""
        if self._is_chat_model:
            complete_fn = chat_to_completion_decorator(self._chat)
        else:
            complete_fn = self._complete
        return complete_fn(prompt, **kwargs)

    @llm_chat_callback()
    def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
        if self._is_chat_model:
            chat_fn = self._chat
        else:
            chat_fn = completion_to_chat_decorator(self._complete)
        return chat_fn(messages, **kwargs)

    @llm_completion_callback()
    def stream_complete(
        self, prompt: str, formatted: bool = False, **kwargs: Any
    ) -> CompletionResponseGen:
        """LLM的完成端点。"""
        if self._is_chat_model:
            complete_fn = stream_chat_to_completion_decorator(self._stream_chat)
        else:
            complete_fn = self._stream_complete
        return complete_fn(prompt, **kwargs)

    @llm_chat_callback()
    def stream_chat(
        self, messages: Sequence[ChatMessage], **kwargs: Any
    ) -> ChatResponseGen:
        if self._is_chat_model:
            stream_chat_fn = self._stream_chat
        else:
            stream_chat_fn = stream_completion_to_chat_decorator(self._stream_complete)
        return stream_chat_fn(messages, **kwargs)

    def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
        try:
            from portkey import Config, Message
        except ImportError as exc:
            raise ImportError(IMPORT_ERROR_MESSAGE) from exc
        _messages = cast(
            List[Message],
            [{"role": i.role.value, "content": i.content} for i in messages],
        )
        config = Config(llms=self.llms)
        response = self._client.ChatCompletions.create(
            messages=_messages, config=config
        )
        self.llm = self._get_llm(response)

        message = response.choices[0].message
        return ChatResponse(message=message, raw=response)

    def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
        try:
            from portkey import Config
        except ImportError as exc:
            raise ImportError(IMPORT_ERROR_MESSAGE) from exc

        config = Config(llms=self.llms)
        response = self._client.Completions.create(prompt=prompt, config=config)
        text = response.choices[0].text
        return CompletionResponse(text=text, raw=response)

    def _stream_chat(
        self, messages: Sequence[ChatMessage], **kwargs: Any
    ) -> ChatResponseGen:
        try:
            from portkey import Config, Message
        except ImportError as exc:
            raise ImportError(IMPORT_ERROR_MESSAGE) from exc
        _messages = cast(
            List[Message],
            [{"role": i.role.value, "content": i.content} for i in messages],
        )
        config = Config(llms=self.llms)
        response = self._client.ChatCompletions.create(
            messages=_messages, config=config, stream=True, **kwargs
        )

        def gen() -> ChatResponseGen:
            content = ""
            function_call: Optional[dict] = {}
            for resp in response:
                if resp.choices is None:
                    continue
                delta = resp.choices[0].delta
                role = delta.get("role", "assistant")
                content_delta = delta.get("content", "") or ""
                content += content_delta

                function_call_delta = delta.get("function_call", None)
                if function_call_delta is not None:
                    if function_call is None:
                        function_call = function_call_delta
                        # ensure we do not add a blank function call
                        if (
                            function_call
                            and function_call.get("function_name", "") is None
                        ):
                            del function_call["function_name"]
                    else:
                        function_call["arguments"] += function_call_delta["arguments"]

                additional_kwargs = {}
                if function_call is not None:
                    additional_kwargs["function_call"] = function_call

                yield ChatResponse(
                    message=ChatMessage(
                        role=role,
                        content=content,
                        additional_kwargs=additional_kwargs,
                    ),
                    delta=content_delta,
                    raw=resp,
                )

        return gen()

    def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
        try:
            from portkey import Config
        except ImportError as exc:
            raise ImportError(IMPORT_ERROR_MESSAGE) from exc

        config = Config(llms=self.llms)
        response = self._client.Completions.create(
            prompt=prompt, config=config, stream=True, **kwargs
        )

        def gen() -> CompletionResponseGen:
            text = ""
            for resp in response:
                delta = resp.choices[0].text or ""
                text += delta
                yield CompletionResponse(
                    delta=delta,
                    text=text,
                    raw=resp,
                )

        return gen()

    @property
    def _is_chat_model(self) -> bool:
        """检查给定的模型是否是基于对话的语言模型。

返回:
    布尔值:如果提供的模型是基于对话的语言模型,则为True,否则为False。
"""
        return is_chat_model(self.model or "")

    def _get_llm(self, response: "PortkeyResponse") -> "LLMOptions":
        return get_llm(response, self.llms)

metadata property #

metadata: LLMMetadata

LLM元数据。

add_llms #

add_llms(
    llm_params: Union[LLMOptions, List[LLMOptions]]
) -> Portkey

将指定的LLM参数添加到LLM列表中。这可以用于回退或根据模式进行负载平衡。

Parameters:

Name Type Description Default
llm_params Union[LLMOptions, List[LLMOptions]]

单个LLM参数集或LLM参数集列表。每个集应该是LLMOptions的一个实例,具有以下指定属性。

provider: Optional[ProviderTypes] model: str temperature: float max_tokens: Optional[int] max_retries: int trace_id: Optional[str] cache_status: Optional[CacheType] cache: Optional[bool] metadata: Dict[str, Any] weight: Optional[float] **kwargs : 其他由portkey-ai中的LLMOptions支持的附加参数

required
注意

用户也可以选择传递其他参数。

required

Returns:

Type Description
Portkey

self

Source code in llama_index/llms/portkey/base.py
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
    def add_llms(
        self, llm_params: Union["LLMOptions", List["LLMOptions"]]
    ) -> "Portkey":
        """将指定的LLM参数添加到LLM列表中。这可以用于回退或根据模式进行负载平衡。

Args:
    llm_params (Union[LLMOptions, List[LLMOptions]]): 单个LLM参数集或LLM参数集列表。每个集应该是LLMOptions的一个实例,具有以下指定属性。
        > provider: Optional[ProviderTypes]
        > model: str
        > temperature: float
        > max_tokens: Optional[int]
        > max_retries: int
        > trace_id: Optional[str]
        > cache_status: Optional[CacheType]
        > cache: Optional[bool]
        > metadata: Dict[str, Any]
        > weight: Optional[float]
        > **kwargs : 其他由portkey-ai中的LLMOptions支持的附加参数

    注意: 用户也可以选择传递其他参数。

Returns:
    self
"""
        try:
            from portkey import LLMOptions
        except ImportError as exc:
            raise ImportError(IMPORT_ERROR_MESSAGE) from exc
        if isinstance(llm_params, LLMOptions):
            llm_params = [llm_params]
        self.llms.extend(llm_params)
        if self.model is None:
            self.model = self.llms[0].model
        return self

complete #

complete(
    prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse

LLM的完成端点。

Source code in llama_index/llms/portkey/base.py
184
185
186
187
188
189
190
191
192
193
@llm_completion_callback()
def complete(
    self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
    """LLM的完成端点。"""
    if self._is_chat_model:
        complete_fn = chat_to_completion_decorator(self._chat)
    else:
        complete_fn = self._complete
    return complete_fn(prompt, **kwargs)

stream_complete #

stream_complete(
    prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen

LLM的完成端点。

Source code in llama_index/llms/portkey/base.py
203
204
205
206
207
208
209
210
211
212
@llm_completion_callback()
def stream_complete(
    self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
    """LLM的完成端点。"""
    if self._is_chat_model:
        complete_fn = stream_chat_to_completion_decorator(self._stream_chat)
    else:
        complete_fn = self._stream_complete
    return complete_fn(prompt, **kwargs)