Skip to content

Openai

OpenAIAgent #

Bases: AgentRunner

OpenAI代理。

使用OpenAIAgentWorker的AgentRunner的子类。

有关旧版实现,请参见:

from llama_index..agent.legacy.openai.base import OpenAIAgent

Source code in llama_index/agent/openai/base.py
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
class OpenAIAgent(AgentRunner):
    """OpenAI代理。

使用OpenAIAgentWorker的AgentRunner的子类。

有关旧版实现,请参见:
```python
from llama_index..agent.legacy.openai.base import OpenAIAgent
```"""

    def __init__(
        self,
        tools: List[BaseTool],
        llm: OpenAI,
        memory: BaseMemory,
        prefix_messages: List[ChatMessage],
        verbose: bool = False,
        max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
        default_tool_choice: str = "auto",
        callback_manager: Optional[CallbackManager] = None,
        tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
        tool_call_parser: Optional[Callable[[OpenAIToolCall], Dict]] = None,
    ) -> None:
        """初始化参数。"""
        callback_manager = callback_manager or llm.callback_manager
        step_engine = OpenAIAgentWorker.from_tools(
            tools=tools,
            tool_retriever=tool_retriever,
            llm=llm,
            verbose=verbose,
            max_function_calls=max_function_calls,
            callback_manager=callback_manager,
            prefix_messages=prefix_messages,
            tool_call_parser=tool_call_parser,
        )
        super().__init__(
            step_engine,
            memory=memory,
            llm=llm,
            callback_manager=callback_manager,
            default_tool_choice=default_tool_choice,
        )

    @classmethod
    def from_tools(
        cls,
        tools: Optional[List[BaseTool]] = None,
        tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
        llm: Optional[LLM] = None,
        chat_history: Optional[List[ChatMessage]] = None,
        memory: Optional[BaseMemory] = None,
        memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
        verbose: bool = False,
        max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
        default_tool_choice: str = "auto",
        callback_manager: Optional[CallbackManager] = None,
        system_prompt: Optional[str] = None,
        prefix_messages: Optional[List[ChatMessage]] = None,
        tool_call_parser: Optional[Callable[[OpenAIToolCall], Dict]] = None,
        **kwargs: Any,
    ) -> "OpenAIAgent":
        """从工具列表中创建一个OpenAIAgent。

类似于其他类中的`from_defaults`,该方法将推断出各种参数的默认值,包括如果它们未指定LLM。
"""
        tools = tools or []

        chat_history = chat_history or []
        llm = llm or Settings.llm
        if not isinstance(llm, OpenAI):
            raise ValueError("llm must be a OpenAI instance")

        if callback_manager is not None:
            llm.callback_manager = callback_manager

        memory = memory or memory_cls.from_defaults(chat_history, llm=llm)

        if not llm.metadata.is_function_calling_model:
            raise ValueError(
                f"Model name {llm.model} does not support function calling API. "
            )

        if system_prompt is not None:
            if prefix_messages is not None:
                raise ValueError(
                    "Cannot specify both system_prompt and prefix_messages"
                )
            prefix_messages = [ChatMessage(content=system_prompt, role="system")]

        prefix_messages = prefix_messages or []

        return cls(
            tools=tools,
            tool_retriever=tool_retriever,
            llm=llm,
            memory=memory,
            prefix_messages=prefix_messages,
            verbose=verbose,
            max_function_calls=max_function_calls,
            callback_manager=callback_manager,
            default_tool_choice=default_tool_choice,
            tool_call_parser=tool_call_parser,
        )

from_tools classmethod #

from_tools(
    tools: Optional[List[BaseTool]] = None,
    tool_retriever: Optional[
        ObjectRetriever[BaseTool]
    ] = None,
    llm: Optional[LLM] = None,
    chat_history: Optional[List[ChatMessage]] = None,
    memory: Optional[BaseMemory] = None,
    memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
    verbose: bool = False,
    max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
    default_tool_choice: str = "auto",
    callback_manager: Optional[CallbackManager] = None,
    system_prompt: Optional[str] = None,
    prefix_messages: Optional[List[ChatMessage]] = None,
    tool_call_parser: Optional[
        Callable[[OpenAIToolCall], Dict]
    ] = None,
    **kwargs: Any
) -> OpenAIAgent

从工具列表中创建一个OpenAIAgent。

类似于其他类中的from_defaults,该方法将推断出各种参数的默认值,包括如果它们未指定LLM。

Source code in llama_index/agent/openai/base.py
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
    @classmethod
    def from_tools(
        cls,
        tools: Optional[List[BaseTool]] = None,
        tool_retriever: Optional[ObjectRetriever[BaseTool]] = None,
        llm: Optional[LLM] = None,
        chat_history: Optional[List[ChatMessage]] = None,
        memory: Optional[BaseMemory] = None,
        memory_cls: Type[BaseMemory] = ChatMemoryBuffer,
        verbose: bool = False,
        max_function_calls: int = DEFAULT_MAX_FUNCTION_CALLS,
        default_tool_choice: str = "auto",
        callback_manager: Optional[CallbackManager] = None,
        system_prompt: Optional[str] = None,
        prefix_messages: Optional[List[ChatMessage]] = None,
        tool_call_parser: Optional[Callable[[OpenAIToolCall], Dict]] = None,
        **kwargs: Any,
    ) -> "OpenAIAgent":
        """从工具列表中创建一个OpenAIAgent。

类似于其他类中的`from_defaults`,该方法将推断出各种参数的默认值,包括如果它们未指定LLM。
"""
        tools = tools or []

        chat_history = chat_history or []
        llm = llm or Settings.llm
        if not isinstance(llm, OpenAI):
            raise ValueError("llm must be a OpenAI instance")

        if callback_manager is not None:
            llm.callback_manager = callback_manager

        memory = memory or memory_cls.from_defaults(chat_history, llm=llm)

        if not llm.metadata.is_function_calling_model:
            raise ValueError(
                f"Model name {llm.model} does not support function calling API. "
            )

        if system_prompt is not None:
            if prefix_messages is not None:
                raise ValueError(
                    "Cannot specify both system_prompt and prefix_messages"
                )
            prefix_messages = [ChatMessage(content=system_prompt, role="system")]

        prefix_messages = prefix_messages or []

        return cls(
            tools=tools,
            tool_retriever=tool_retriever,
            llm=llm,
            memory=memory,
            prefix_messages=prefix_messages,
            verbose=verbose,
            max_function_calls=max_function_calls,
            callback_manager=callback_manager,
            default_tool_choice=default_tool_choice,
            tool_call_parser=tool_call_parser,
        )

OpenAIAssistantAgent #

Bases: BaseAgent

OpenAIAssistant代理。

对OpenAI助手API的封装:https://platform.openai.com/docs/assistants/overview#

Source code in llama_index/agent/openai/openai_assistant_agent.py
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
class OpenAIAssistantAgent(BaseAgent):
    """OpenAIAssistant代理。

    # 对OpenAI助手API的封装:https://platform.openai.com/docs/assistants/overview"""

    def __init__(
        self,
        client: Any,
        assistant: Any,
        tools: Optional[List[BaseTool]],
        callback_manager: Optional[CallbackManager] = None,
        thread_id: Optional[str] = None,
        instructions_prefix: Optional[str] = None,
        run_retrieve_sleep_time: float = 0.1,
        file_dict: Dict[str, str] = {},
        verbose: bool = False,
    ) -> None:
        """初始化参数。"""
        from openai import OpenAI
        from openai.types.beta.assistant import Assistant

        self._client = cast(OpenAI, client)
        self._assistant = cast(Assistant, assistant)
        self._tools = tools or []
        if thread_id is None:
            thread = self._client.beta.threads.create()
            thread_id = thread.id
        self._thread_id = thread_id
        self._instructions_prefix = instructions_prefix
        self._run_retrieve_sleep_time = run_retrieve_sleep_time
        self._verbose = verbose
        self.file_dict = file_dict

        self.callback_manager = callback_manager or CallbackManager([])

    @classmethod
    def from_new(
        cls,
        name: str,
        instructions: str,
        tools: Optional[List[BaseTool]] = None,
        openai_tools: Optional[List[Dict]] = None,
        thread_id: Optional[str] = None,
        model: str = "gpt-4-1106-preview",
        instructions_prefix: Optional[str] = None,
        run_retrieve_sleep_time: float = 0.1,
        files: Optional[List[str]] = None,
        callback_manager: Optional[CallbackManager] = None,
        verbose: bool = False,
        file_ids: Optional[List[str]] = None,
        api_key: Optional[str] = None,
    ) -> "OpenAIAssistantAgent":
        """来自新助手。

Args:
    name: 助手的名称
    instructions: 助手的指示
    tools: 工具列表
    openai_tools: OpenAI工具列表
    thread_id: 线程ID
    model: 模型
    run_retrieve_sleep_time: 运行检索睡眠时间
    files: 文件
    instructions_prefix: 指示前缀
    callback_manager: 回调管理器
    verbose: 冗长的
    file_ids: 文件ID列表
    api_key: OpenAI API密钥
"""
        from openai import OpenAI

        # this is the set of openai tools
        # not to be confused with the tools we pass in for function calling
        openai_tools = openai_tools or []
        tools = tools or []
        tool_fns = [t.metadata.to_openai_tool() for t in tools]
        all_openai_tools = openai_tools + tool_fns

        # initialize client
        client = OpenAI(api_key=api_key)

        # process files
        files = files or []
        file_ids = file_ids or []

        file_dict = _process_files(client, files)

        # TODO: openai's typing is a bit sus
        all_openai_tools = cast(List[Any], all_openai_tools)
        assistant = client.beta.assistants.create(
            name=name,
            instructions=instructions,
            tools=cast(List[Any], all_openai_tools),
            model=model,
        )
        return cls(
            client,
            assistant,
            tools,
            callback_manager=callback_manager,
            thread_id=thread_id,
            instructions_prefix=instructions_prefix,
            file_dict=file_dict,
            run_retrieve_sleep_time=run_retrieve_sleep_time,
            verbose=verbose,
        )

    @classmethod
    def from_existing(
        cls,
        assistant_id: str,
        tools: Optional[List[BaseTool]] = None,
        thread_id: Optional[str] = None,
        instructions_prefix: Optional[str] = None,
        run_retrieve_sleep_time: float = 0.1,
        callback_manager: Optional[CallbackManager] = None,
        api_key: Optional[str] = None,
        verbose: bool = False,
    ) -> "OpenAIAssistantAgent":
        """从现有助手ID。

Args:
    assistant_id:助手的ID
    tools:助手可以使用的BaseTools列表
    thread_id:线程ID
    run_retrieve_sleep_time:运行检索的休眠时间
    instructions_prefix:指令前缀
    callback_manager:回调管理器
    api_key:OpenAI API密钥
    verbose:详细信息
"""
        from openai import OpenAI

        # initialize client
        client = OpenAI(api_key=api_key)

        # get assistant
        assistant = client.beta.assistants.retrieve(assistant_id)
        # assistant.tools is incompatible with BaseTools so have to pass from params

        return cls(
            client,
            assistant,
            tools=tools,
            callback_manager=callback_manager,
            thread_id=thread_id,
            instructions_prefix=instructions_prefix,
            run_retrieve_sleep_time=run_retrieve_sleep_time,
            verbose=verbose,
        )

    @property
    def assistant(self) -> Any:
        """获取助手。"""
        return self._assistant

    @property
    def client(self) -> Any:
        """获取客户端。"""
        return self._client

    @property
    def thread_id(self) -> str:
        """获取线程ID。"""
        return self._thread_id

    @property
    def files_dict(self) -> Dict[str, str]:
        """获取文件字典。"""
        return self.file_dict

    @property
    def chat_history(self) -> List[ChatMessage]:
        raw_messages = self._client.beta.threads.messages.list(
            thread_id=self._thread_id, order="asc"
        )
        return from_openai_thread_messages(list(raw_messages))

    def reset(self) -> None:
        """删除并创建一个新的线程。"""
        self._client.beta.threads.delete(self._thread_id)
        thread = self._client.beta.threads.create()
        thread_id = thread.id
        self._thread_id = thread_id

    def get_tools(self, message: str) -> List[BaseTool]:
        """获取工具。"""
        return self._tools

    def upload_files(self, files: List[str]) -> Dict[str, Any]:
        """上传文件。"""
        return _process_files(self._client, files)

    def add_message(self, message: str, file_ids: Optional[List[str]] = None) -> Any:
        """向助手添加消息。"""
        attachments = format_attachments(file_ids=file_ids)
        return self._client.beta.threads.messages.create(
            thread_id=self._thread_id,
            role="user",
            content=message,
            attachments=attachments,
        )

    def _run_function_calling(self, run: Any) -> List[ToolOutput]:
        """运行函数调用。"""
        tool_calls = run.required_action.submit_tool_outputs.tool_calls
        tool_output_dicts = []
        tool_output_objs: List[ToolOutput] = []
        for tool_call in tool_calls:
            fn_obj = tool_call.function
            _, tool_output = call_function(self._tools, fn_obj, verbose=self._verbose)
            tool_output_dicts.append(
                {"tool_call_id": tool_call.id, "output": str(tool_output)}
            )
            tool_output_objs.append(tool_output)

        # submit tool outputs
        # TODO: openai's typing is a bit sus
        self._client.beta.threads.runs.submit_tool_outputs(
            thread_id=self._thread_id,
            run_id=run.id,
            tool_outputs=cast(List[Any], tool_output_dicts),
        )
        return tool_output_objs

    async def _arun_function_calling(self, run: Any) -> List[ToolOutput]:
        """运行函数调用。"""
        tool_calls = run.required_action.submit_tool_outputs.tool_calls
        tool_output_dicts = []
        tool_output_objs: List[ToolOutput] = []
        for tool_call in tool_calls:
            fn_obj = tool_call.function
            _, tool_output = await acall_function(
                self._tools, fn_obj, verbose=self._verbose
            )
            tool_output_dicts.append(
                {"tool_call_id": tool_call.id, "output": str(tool_output)}
            )
            tool_output_objs.append(tool_output)

        # submit tool outputs
        self._client.beta.threads.runs.submit_tool_outputs(
            thread_id=self._thread_id,
            run_id=run.id,
            tool_outputs=cast(List[Any], tool_output_dicts),
        )
        return tool_output_objs

    def run_assistant(
        self, instructions_prefix: Optional[str] = None
    ) -> Tuple[Any, Dict]:
        """运行助手。"""
        instructions_prefix = instructions_prefix or self._instructions_prefix
        run = self._client.beta.threads.runs.create(
            thread_id=self._thread_id,
            assistant_id=self._assistant.id,
            instructions=instructions_prefix,
        )
        from openai.types.beta.threads import Run

        run = cast(Run, run)

        sources = []

        while run.status in ["queued", "in_progress", "requires_action"]:
            run = self._client.beta.threads.runs.retrieve(
                thread_id=self._thread_id, run_id=run.id
            )
            if run.status == "requires_action":
                cur_tool_outputs = self._run_function_calling(run)
                sources.extend(cur_tool_outputs)

            time.sleep(self._run_retrieve_sleep_time)
        if run.status == "failed":
            raise ValueError(
                f"Run failed with status {run.status}.\n" f"Error: {run.last_error}"
            )
        return run, {"sources": sources}

    async def arun_assistant(
        self, instructions_prefix: Optional[str] = None
    ) -> Tuple[Any, Dict]:
        """运行助手。"""
        instructions_prefix = instructions_prefix or self._instructions_prefix
        run = self._client.beta.threads.runs.create(
            thread_id=self._thread_id,
            assistant_id=self._assistant.id,
            instructions=instructions_prefix,
        )
        from openai.types.beta.threads import Run

        run = cast(Run, run)

        sources = []

        while run.status in ["queued", "in_progress", "requires_action"]:
            run = self._client.beta.threads.runs.retrieve(
                thread_id=self._thread_id, run_id=run.id
            )
            if run.status == "requires_action":
                cur_tool_outputs = await self._arun_function_calling(run)
                sources.extend(cur_tool_outputs)

            await asyncio.sleep(self._run_retrieve_sleep_time)
        if run.status == "failed":
            raise ValueError(
                f"Run failed with status {run.status}.\n" f"Error: {run.last_error}"
            )
        return run, {"sources": sources}

    @property
    def latest_message(self) -> ChatMessage:
        """获取最新消息。"""
        raw_messages = self._client.beta.threads.messages.list(
            thread_id=self._thread_id, order="desc"
        )
        messages = from_openai_thread_messages(list(raw_messages))
        return messages[0]

    def _chat(
        self,
        message: str,
        chat_history: Optional[List[ChatMessage]] = None,
        function_call: Union[str, dict] = "auto",
        mode: ChatResponseMode = ChatResponseMode.WAIT,
    ) -> AGENT_CHAT_RESPONSE_TYPE:
        """主要聊天界面。"""
        # TODO: since chat interface doesn't expose additional kwargs
        # we can't pass in file_ids per message
        _added_message_obj = self.add_message(message)
        _run, metadata = self.run_assistant(
            instructions_prefix=self._instructions_prefix,
        )
        latest_message = self.latest_message
        # get most recent message content
        return AgentChatResponse(
            response=str(latest_message.content),
            sources=metadata["sources"],
        )

    async def _achat(
        self,
        message: str,
        chat_history: Optional[List[ChatMessage]] = None,
        function_call: Union[str, dict] = "auto",
        mode: ChatResponseMode = ChatResponseMode.WAIT,
    ) -> AGENT_CHAT_RESPONSE_TYPE:
        """异步主聊天界面。"""
        self.add_message(message)
        run, metadata = await self.arun_assistant(
            instructions_prefix=self._instructions_prefix,
        )
        latest_message = self.latest_message
        # get most recent message content
        return AgentChatResponse(
            response=str(latest_message.content),
            sources=metadata["sources"],
        )

    @trace_method("chat")
    def chat(
        self,
        message: str,
        chat_history: Optional[List[ChatMessage]] = None,
        function_call: Union[str, dict] = "auto",
    ) -> AgentChatResponse:
        with self.callback_manager.event(
            CBEventType.AGENT_STEP,
            payload={EventPayload.MESSAGES: [message]},
        ) as e:
            chat_response = self._chat(
                message, chat_history, function_call, mode=ChatResponseMode.WAIT
            )
            assert isinstance(chat_response, AgentChatResponse)
            e.on_end(payload={EventPayload.RESPONSE: chat_response})
        return chat_response

    @trace_method("chat")
    async def achat(
        self,
        message: str,
        chat_history: Optional[List[ChatMessage]] = None,
        function_call: Union[str, dict] = "auto",
    ) -> AgentChatResponse:
        with self.callback_manager.event(
            CBEventType.AGENT_STEP,
            payload={EventPayload.MESSAGES: [message]},
        ) as e:
            chat_response = await self._achat(
                message, chat_history, function_call, mode=ChatResponseMode.WAIT
            )
            assert isinstance(chat_response, AgentChatResponse)
            e.on_end(payload={EventPayload.RESPONSE: chat_response})
        return chat_response

    @trace_method("chat")
    def stream_chat(
        self,
        message: str,
        chat_history: Optional[List[ChatMessage]] = None,
        function_call: Union[str, dict] = "auto",
    ) -> StreamingAgentChatResponse:
        raise NotImplementedError("stream_chat not implemented")

    @trace_method("chat")
    async def astream_chat(
        self,
        message: str,
        chat_history: Optional[List[ChatMessage]] = None,
        function_call: Union[str, dict] = "auto",
    ) -> StreamingAgentChatResponse:
        raise NotImplementedError("astream_chat not implemented")

assistant property #

assistant: Any

获取助手。

client property #

client: Any

获取客户端。

thread_id property #

thread_id: str

获取线程ID。

files_dict property #

files_dict: Dict[str, str]

获取文件字典。

latest_message property #

latest_message: ChatMessage

获取最新消息。

from_new classmethod #

from_new(
    name: str,
    instructions: str,
    tools: Optional[List[BaseTool]] = None,
    openai_tools: Optional[List[Dict]] = None,
    thread_id: Optional[str] = None,
    model: str = "gpt-4-1106-preview",
    instructions_prefix: Optional[str] = None,
    run_retrieve_sleep_time: float = 0.1,
    files: Optional[List[str]] = None,
    callback_manager: Optional[CallbackManager] = None,
    verbose: bool = False,
    file_ids: Optional[List[str]] = None,
    api_key: Optional[str] = None,
) -> OpenAIAssistantAgent

来自新助手。

Parameters:

Name Type Description Default
name str

助手的名称

required
instructions str

助手的指示

required
tools Optional[List[BaseTool]]

工具列表

None
openai_tools Optional[List[Dict]]

OpenAI工具列表

None
thread_id Optional[str]

线程ID

None
model str

模型

'gpt-4-1106-preview'
run_retrieve_sleep_time float

运行检索睡眠时间

0.1
files Optional[List[str]]

文件

None
instructions_prefix Optional[str]

指示前缀

None
callback_manager Optional[CallbackManager]

回调管理器

None
verbose bool

冗长的

False
file_ids Optional[List[str]]

文件ID列表

None
api_key Optional[str]

OpenAI API密钥

None
Source code in llama_index/agent/openai/openai_assistant_agent.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
    @classmethod
    def from_new(
        cls,
        name: str,
        instructions: str,
        tools: Optional[List[BaseTool]] = None,
        openai_tools: Optional[List[Dict]] = None,
        thread_id: Optional[str] = None,
        model: str = "gpt-4-1106-preview",
        instructions_prefix: Optional[str] = None,
        run_retrieve_sleep_time: float = 0.1,
        files: Optional[List[str]] = None,
        callback_manager: Optional[CallbackManager] = None,
        verbose: bool = False,
        file_ids: Optional[List[str]] = None,
        api_key: Optional[str] = None,
    ) -> "OpenAIAssistantAgent":
        """来自新助手。

Args:
    name: 助手的名称
    instructions: 助手的指示
    tools: 工具列表
    openai_tools: OpenAI工具列表
    thread_id: 线程ID
    model: 模型
    run_retrieve_sleep_time: 运行检索睡眠时间
    files: 文件
    instructions_prefix: 指示前缀
    callback_manager: 回调管理器
    verbose: 冗长的
    file_ids: 文件ID列表
    api_key: OpenAI API密钥
"""
        from openai import OpenAI

        # this is the set of openai tools
        # not to be confused with the tools we pass in for function calling
        openai_tools = openai_tools or []
        tools = tools or []
        tool_fns = [t.metadata.to_openai_tool() for t in tools]
        all_openai_tools = openai_tools + tool_fns

        # initialize client
        client = OpenAI(api_key=api_key)

        # process files
        files = files or []
        file_ids = file_ids or []

        file_dict = _process_files(client, files)

        # TODO: openai's typing is a bit sus
        all_openai_tools = cast(List[Any], all_openai_tools)
        assistant = client.beta.assistants.create(
            name=name,
            instructions=instructions,
            tools=cast(List[Any], all_openai_tools),
            model=model,
        )
        return cls(
            client,
            assistant,
            tools,
            callback_manager=callback_manager,
            thread_id=thread_id,
            instructions_prefix=instructions_prefix,
            file_dict=file_dict,
            run_retrieve_sleep_time=run_retrieve_sleep_time,
            verbose=verbose,
        )

from_existing classmethod #

from_existing(
    assistant_id: str,
    tools: Optional[List[BaseTool]] = None,
    thread_id: Optional[str] = None,
    instructions_prefix: Optional[str] = None,
    run_retrieve_sleep_time: float = 0.1,
    callback_manager: Optional[CallbackManager] = None,
    api_key: Optional[str] = None,
    verbose: bool = False,
) -> OpenAIAssistantAgent

从现有助手ID。

Source code in llama_index/agent/openai/openai_assistant_agent.py
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
    @classmethod
    def from_existing(
        cls,
        assistant_id: str,
        tools: Optional[List[BaseTool]] = None,
        thread_id: Optional[str] = None,
        instructions_prefix: Optional[str] = None,
        run_retrieve_sleep_time: float = 0.1,
        callback_manager: Optional[CallbackManager] = None,
        api_key: Optional[str] = None,
        verbose: bool = False,
    ) -> "OpenAIAssistantAgent":
        """从现有助手ID。

Args:
    assistant_id:助手的ID
    tools:助手可以使用的BaseTools列表
    thread_id:线程ID
    run_retrieve_sleep_time:运行检索的休眠时间
    instructions_prefix:指令前缀
    callback_manager:回调管理器
    api_key:OpenAI API密钥
    verbose:详细信息
"""
        from openai import OpenAI

        # initialize client
        client = OpenAI(api_key=api_key)

        # get assistant
        assistant = client.beta.assistants.retrieve(assistant_id)
        # assistant.tools is incompatible with BaseTools so have to pass from params

        return cls(
            client,
            assistant,
            tools=tools,
            callback_manager=callback_manager,
            thread_id=thread_id,
            instructions_prefix=instructions_prefix,
            run_retrieve_sleep_time=run_retrieve_sleep_time,
            verbose=verbose,
        )

reset #

reset() -> None

删除并创建一个新的线程。

Source code in llama_index/agent/openai/openai_assistant_agent.py
322
323
324
325
326
327
def reset(self) -> None:
    """删除并创建一个新的线程。"""
    self._client.beta.threads.delete(self._thread_id)
    thread = self._client.beta.threads.create()
    thread_id = thread.id
    self._thread_id = thread_id

get_tools #

get_tools(message: str) -> List[BaseTool]

获取工具。

Source code in llama_index/agent/openai/openai_assistant_agent.py
329
330
331
def get_tools(self, message: str) -> List[BaseTool]:
    """获取工具。"""
    return self._tools

upload_files #

upload_files(files: List[str]) -> Dict[str, Any]

上传文件。

Source code in llama_index/agent/openai/openai_assistant_agent.py
333
334
335
def upload_files(self, files: List[str]) -> Dict[str, Any]:
    """上传文件。"""
    return _process_files(self._client, files)

add_message #

add_message(
    message: str, file_ids: Optional[List[str]] = None
) -> Any

向助手添加消息。

Source code in llama_index/agent/openai/openai_assistant_agent.py
337
338
339
340
341
342
343
344
345
def add_message(self, message: str, file_ids: Optional[List[str]] = None) -> Any:
    """向助手添加消息。"""
    attachments = format_attachments(file_ids=file_ids)
    return self._client.beta.threads.messages.create(
        thread_id=self._thread_id,
        role="user",
        content=message,
        attachments=attachments,
    )

run_assistant #

run_assistant(
    instructions_prefix: Optional[str] = None,
) -> Tuple[Any, Dict]

运行助手。

Source code in llama_index/agent/openai/openai_assistant_agent.py
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
def run_assistant(
    self, instructions_prefix: Optional[str] = None
) -> Tuple[Any, Dict]:
    """运行助手。"""
    instructions_prefix = instructions_prefix or self._instructions_prefix
    run = self._client.beta.threads.runs.create(
        thread_id=self._thread_id,
        assistant_id=self._assistant.id,
        instructions=instructions_prefix,
    )
    from openai.types.beta.threads import Run

    run = cast(Run, run)

    sources = []

    while run.status in ["queued", "in_progress", "requires_action"]:
        run = self._client.beta.threads.runs.retrieve(
            thread_id=self._thread_id, run_id=run.id
        )
        if run.status == "requires_action":
            cur_tool_outputs = self._run_function_calling(run)
            sources.extend(cur_tool_outputs)

        time.sleep(self._run_retrieve_sleep_time)
    if run.status == "failed":
        raise ValueError(
            f"Run failed with status {run.status}.\n" f"Error: {run.last_error}"
        )
    return run, {"sources": sources}

arun_assistant async #

arun_assistant(
    instructions_prefix: Optional[str] = None,
) -> Tuple[Any, Dict]

运行助手。

Source code in llama_index/agent/openai/openai_assistant_agent.py
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
async def arun_assistant(
    self, instructions_prefix: Optional[str] = None
) -> Tuple[Any, Dict]:
    """运行助手。"""
    instructions_prefix = instructions_prefix or self._instructions_prefix
    run = self._client.beta.threads.runs.create(
        thread_id=self._thread_id,
        assistant_id=self._assistant.id,
        instructions=instructions_prefix,
    )
    from openai.types.beta.threads import Run

    run = cast(Run, run)

    sources = []

    while run.status in ["queued", "in_progress", "requires_action"]:
        run = self._client.beta.threads.runs.retrieve(
            thread_id=self._thread_id, run_id=run.id
        )
        if run.status == "requires_action":
            cur_tool_outputs = await self._arun_function_calling(run)
            sources.extend(cur_tool_outputs)

        await asyncio.sleep(self._run_retrieve_sleep_time)
    if run.status == "failed":
        raise ValueError(
            f"Run failed with status {run.status}.\n" f"Error: {run.last_error}"
        )
    return run, {"sources": sources}