Skip to content

Core Callback Classes#

CallbackManager #

Bases: BaseCallbackHandler, ABC

# 回调管理器,用于处理LlamaIndex中事件的回调。

# 回调管理器提供了一种在事件开始/结束时调用处理程序的方法。

# 此外,回调管理器还会跟踪当前事件堆栈。它通过使用一些关键属性来实现这一点。
# - trace_stack - 尚未结束的事件的当前堆栈。当事件结束时,它将从堆栈中移除。
#                 由于这是一个contextvar,它对每个线程/任务都是唯一的。
# - trace_map - 事件ID与其子事件的映射。
#               在事件开始时,trace堆栈的底部被用作trace映射的当前父事件。
# - trace_id - 当前trace的简单名称,通常表示入口点(查询,索引构建,插入等)。

# Args:
#     handlers(List[BaseCallbackHandler]):要使用的处理程序列表。

# 用法:
#     with callback_manager.event(CBEventType.QUERY) as event:
#         event.on_start(payload={key, val})
#         ...
#         event.on_end(payload={key, val})
Source code in llama_index/core/callbacks/base.py
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
class CallbackManager(BaseCallbackHandler, ABC):
    """```python
# 回调管理器,用于处理LlamaIndex中事件的回调。

# 回调管理器提供了一种在事件开始/结束时调用处理程序的方法。

# 此外,回调管理器还会跟踪当前事件堆栈。它通过使用一些关键属性来实现这一点。
# - trace_stack - 尚未结束的事件的当前堆栈。当事件结束时,它将从堆栈中移除。
#                 由于这是一个contextvar,它对每个线程/任务都是唯一的。
# - trace_map - 事件ID与其子事件的映射。
#               在事件开始时,trace堆栈的底部被用作trace映射的当前父事件。
# - trace_id - 当前trace的简单名称,通常表示入口点(查询,索引构建,插入等)。

# Args:
#     handlers(List[BaseCallbackHandler]):要使用的处理程序列表。

# 用法:
#     with callback_manager.event(CBEventType.QUERY) as event:
#         event.on_start(payload={key, val})
#         ...
#         event.on_end(payload={key, val})
```"""

    def __init__(self, handlers: Optional[List[BaseCallbackHandler]] = None):
        """使用处理程序列表初始化管理器。"""
        from llama_index.core import global_handler

        handlers = handlers or []

        # add eval handlers based on global defaults
        if global_handler is not None:
            new_handler = global_handler
            # go through existing handlers, check if any are same type as new handler
            # if so, error
            for existing_handler in handlers:
                if isinstance(existing_handler, type(new_handler)):
                    raise ValueError(
                        "Cannot add two handlers of the same type "
                        f"{type(new_handler)} to the callback manager."
                    )
            handlers.append(new_handler)

        # if we passed in no handlers, use the global default
        if len(handlers) == 0:
            from llama_index.core.settings import Settings

            # hidden var access to prevent recursion in getter
            cb_manager = Settings._callback_manager
            if cb_manager is not None:
                handlers = cb_manager.handlers

        self.handlers = handlers
        self._trace_map: Dict[str, List[str]] = defaultdict(list)

    def on_event_start(
        self,
        event_type: CBEventType,
        payload: Optional[Dict[str, Any]] = None,
        event_id: Optional[str] = None,
        parent_id: Optional[str] = None,
        **kwargs: Any,
    ) -> str:
        """运行处理程序当事件开始并返回事件的ID。"""
        event_id = event_id or str(uuid.uuid4())

        # if no trace is running, start a default trace
        try:
            parent_id = parent_id or global_stack_trace.get()[-1]
        except IndexError:
            self.start_trace("llama-index")
            parent_id = global_stack_trace.get()[-1]
        parent_id = cast(str, parent_id)
        self._trace_map[parent_id].append(event_id)
        for handler in self.handlers:
            if event_type not in handler.event_starts_to_ignore:
                handler.on_event_start(
                    event_type,
                    payload,
                    event_id=event_id,
                    parent_id=parent_id,
                    **kwargs,
                )

        if event_type not in LEAF_EVENTS:
            # copy the stack trace to prevent conflicts with threads/coroutines
            current_trace_stack = global_stack_trace.get().copy()
            current_trace_stack.append(event_id)
            global_stack_trace.set(current_trace_stack)

        return event_id

    def on_event_end(
        self,
        event_type: CBEventType,
        payload: Optional[Dict[str, Any]] = None,
        event_id: Optional[str] = None,
        **kwargs: Any,
    ) -> None:
        """当事件结束时运行处理程序。"""
        event_id = event_id or str(uuid.uuid4())
        for handler in self.handlers:
            if event_type not in handler.event_ends_to_ignore:
                handler.on_event_end(event_type, payload, event_id=event_id, **kwargs)

        if event_type not in LEAF_EVENTS:
            # copy the stack trace to prevent conflicts with threads/coroutines
            current_trace_stack = global_stack_trace.get().copy()
            current_trace_stack.pop()
            global_stack_trace.set(current_trace_stack)

    def add_handler(self, handler: BaseCallbackHandler) -> None:
        """向回调管理器添加一个处理程序。"""
        self.handlers.append(handler)

    def remove_handler(self, handler: BaseCallbackHandler) -> None:
        """从回调管理器中移除一个处理程序。"""
        self.handlers.remove(handler)

    def set_handlers(self, handlers: List[BaseCallbackHandler]) -> None:
        """将处理程序设置为回调管理器上唯一的处理程序。"""
        self.handlers = handlers

    @classmethod
    def __modify_schema__(cls, schema: Dict[str, Any]) -> None:
        """避免序列化错误。"""
        schema.update(type="object", default={})

    @contextmanager
    def event(
        self,
        event_type: CBEventType,
        payload: Optional[Dict[str, Any]] = None,
        event_id: Optional[str] = None,
    ) -> Generator["EventContext", None, None]:
        """上下文管理器,用于启动和关闭事件。

处理发送 on_event_start 和 on_event_end 到指定事件的处理程序。

用法:
    with callback_manager.event(CBEventType.QUERY, payload={key, val}) as event:
        ...
        event.on_end(payload={key, val})  # 可选
"""
        # create event context wrapper
        event = EventContext(self, event_type, event_id=event_id)
        event.on_start(payload=payload)

        payload = None
        try:
            yield event
        except Exception as e:
            # data already logged to trace?
            if not hasattr(e, "event_added"):
                payload = {EventPayload.EXCEPTION: e}
                e.event_added = True  # type: ignore
                if not event.finished:
                    event.on_end(payload=payload)
            raise
        finally:
            # ensure event is ended
            if not event.finished:
                event.on_end(payload=payload)

    @contextmanager
    def as_trace(self, trace_id: str) -> Generator[None, None, None]:
        """上下文管理器,用于启动和关闭跟踪。"""
        self.start_trace(trace_id=trace_id)

        try:
            yield
        except Exception as e:
            # event already added to trace?
            if not hasattr(e, "event_added"):
                self.on_event_start(
                    CBEventType.EXCEPTION, payload={EventPayload.EXCEPTION: e}
                )
                e.event_added = True  # type: ignore

            raise
        finally:
            # ensure trace is ended
            self.end_trace(trace_id=trace_id)

    def start_trace(self, trace_id: Optional[str] = None) -> None:
        """当启动整体跟踪时运行。"""
        current_trace_stack_ids = global_stack_trace_ids.get().copy()
        if trace_id is not None:
            if len(current_trace_stack_ids) == 0:
                self._reset_trace_events()

                for handler in self.handlers:
                    handler.start_trace(trace_id=trace_id)

                current_trace_stack_ids = [trace_id]
            else:
                current_trace_stack_ids.append(trace_id)

        global_stack_trace_ids.set(current_trace_stack_ids)

    def end_trace(
        self,
        trace_id: Optional[str] = None,
        trace_map: Optional[Dict[str, List[str]]] = None,
    ) -> None:
        """当退出整体跟踪时运行。"""
        current_trace_stack_ids = global_stack_trace_ids.get().copy()
        if trace_id is not None and len(current_trace_stack_ids) > 0:
            current_trace_stack_ids.pop()
            if len(current_trace_stack_ids) == 0:
                for handler in self.handlers:
                    handler.end_trace(trace_id=trace_id, trace_map=self._trace_map)
                current_trace_stack_ids = []

        global_stack_trace_ids.set(current_trace_stack_ids)

    def _reset_trace_events(self) -> None:
        """辅助函数,用于重置当前的跟踪。"""
        self._trace_map = defaultdict(list)
        global_stack_trace.set([BASE_TRACE_EVENT])

    @property
    def trace_map(self) -> Dict[str, List[str]]:
        return self._trace_map

on_event_start #

on_event_start(
    event_type: CBEventType,
    payload: Optional[Dict[str, Any]] = None,
    event_id: Optional[str] = None,
    parent_id: Optional[str] = None,
    **kwargs: Any
) -> str

运行处理程序当事件开始并返回事件的ID。

Source code in llama_index/core/callbacks/base.py
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
def on_event_start(
    self,
    event_type: CBEventType,
    payload: Optional[Dict[str, Any]] = None,
    event_id: Optional[str] = None,
    parent_id: Optional[str] = None,
    **kwargs: Any,
) -> str:
    """运行处理程序当事件开始并返回事件的ID。"""
    event_id = event_id or str(uuid.uuid4())

    # if no trace is running, start a default trace
    try:
        parent_id = parent_id or global_stack_trace.get()[-1]
    except IndexError:
        self.start_trace("llama-index")
        parent_id = global_stack_trace.get()[-1]
    parent_id = cast(str, parent_id)
    self._trace_map[parent_id].append(event_id)
    for handler in self.handlers:
        if event_type not in handler.event_starts_to_ignore:
            handler.on_event_start(
                event_type,
                payload,
                event_id=event_id,
                parent_id=parent_id,
                **kwargs,
            )

    if event_type not in LEAF_EVENTS:
        # copy the stack trace to prevent conflicts with threads/coroutines
        current_trace_stack = global_stack_trace.get().copy()
        current_trace_stack.append(event_id)
        global_stack_trace.set(current_trace_stack)

    return event_id

on_event_end #

on_event_end(
    event_type: CBEventType,
    payload: Optional[Dict[str, Any]] = None,
    event_id: Optional[str] = None,
    **kwargs: Any
) -> None

当事件结束时运行处理程序。

Source code in llama_index/core/callbacks/base.py
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
def on_event_end(
    self,
    event_type: CBEventType,
    payload: Optional[Dict[str, Any]] = None,
    event_id: Optional[str] = None,
    **kwargs: Any,
) -> None:
    """当事件结束时运行处理程序。"""
    event_id = event_id or str(uuid.uuid4())
    for handler in self.handlers:
        if event_type not in handler.event_ends_to_ignore:
            handler.on_event_end(event_type, payload, event_id=event_id, **kwargs)

    if event_type not in LEAF_EVENTS:
        # copy the stack trace to prevent conflicts with threads/coroutines
        current_trace_stack = global_stack_trace.get().copy()
        current_trace_stack.pop()
        global_stack_trace.set(current_trace_stack)

add_handler #

add_handler(handler: BaseCallbackHandler) -> None

向回调管理器添加一个处理程序。

Source code in llama_index/core/callbacks/base.py
133
134
135
def add_handler(self, handler: BaseCallbackHandler) -> None:
    """向回调管理器添加一个处理程序。"""
    self.handlers.append(handler)

remove_handler #

remove_handler(handler: BaseCallbackHandler) -> None

从回调管理器中移除一个处理程序。

Source code in llama_index/core/callbacks/base.py
137
138
139
def remove_handler(self, handler: BaseCallbackHandler) -> None:
    """从回调管理器中移除一个处理程序。"""
    self.handlers.remove(handler)

set_handlers #

set_handlers(handlers: List[BaseCallbackHandler]) -> None

将处理程序设置为回调管理器上唯一的处理程序。

Source code in llama_index/core/callbacks/base.py
141
142
143
def set_handlers(self, handlers: List[BaseCallbackHandler]) -> None:
    """将处理程序设置为回调管理器上唯一的处理程序。"""
    self.handlers = handlers

event #

event(
    event_type: CBEventType,
    payload: Optional[Dict[str, Any]] = None,
    event_id: Optional[str] = None,
) -> Generator[EventContext, None, None]

上下文管理器,用于启动和关闭事件。

处理发送 on_event_start 和 on_event_end 到指定事件的处理程序。

用法: with callback_manager.event(CBEventType.QUERY, payload={key, val}) as event: ... event.on_end(payload={key, val}) # 可选

Source code in llama_index/core/callbacks/base.py
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
    @contextmanager
    def event(
        self,
        event_type: CBEventType,
        payload: Optional[Dict[str, Any]] = None,
        event_id: Optional[str] = None,
    ) -> Generator["EventContext", None, None]:
        """上下文管理器,用于启动和关闭事件。

处理发送 on_event_start 和 on_event_end 到指定事件的处理程序。

用法:
    with callback_manager.event(CBEventType.QUERY, payload={key, val}) as event:
        ...
        event.on_end(payload={key, val})  # 可选
"""
        # create event context wrapper
        event = EventContext(self, event_type, event_id=event_id)
        event.on_start(payload=payload)

        payload = None
        try:
            yield event
        except Exception as e:
            # data already logged to trace?
            if not hasattr(e, "event_added"):
                payload = {EventPayload.EXCEPTION: e}
                e.event_added = True  # type: ignore
                if not event.finished:
                    event.on_end(payload=payload)
            raise
        finally:
            # ensure event is ended
            if not event.finished:
                event.on_end(payload=payload)

as_trace #

as_trace(trace_id: str) -> Generator[None, None, None]

上下文管理器,用于启动和关闭跟踪。

Source code in llama_index/core/callbacks/base.py
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
@contextmanager
def as_trace(self, trace_id: str) -> Generator[None, None, None]:
    """上下文管理器,用于启动和关闭跟踪。"""
    self.start_trace(trace_id=trace_id)

    try:
        yield
    except Exception as e:
        # event already added to trace?
        if not hasattr(e, "event_added"):
            self.on_event_start(
                CBEventType.EXCEPTION, payload={EventPayload.EXCEPTION: e}
            )
            e.event_added = True  # type: ignore

        raise
    finally:
        # ensure trace is ended
        self.end_trace(trace_id=trace_id)

start_trace #

start_trace(trace_id: Optional[str] = None) -> None

当启动整体跟踪时运行。

Source code in llama_index/core/callbacks/base.py
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
def start_trace(self, trace_id: Optional[str] = None) -> None:
    """当启动整体跟踪时运行。"""
    current_trace_stack_ids = global_stack_trace_ids.get().copy()
    if trace_id is not None:
        if len(current_trace_stack_ids) == 0:
            self._reset_trace_events()

            for handler in self.handlers:
                handler.start_trace(trace_id=trace_id)

            current_trace_stack_ids = [trace_id]
        else:
            current_trace_stack_ids.append(trace_id)

    global_stack_trace_ids.set(current_trace_stack_ids)

end_trace #

end_trace(
    trace_id: Optional[str] = None,
    trace_map: Optional[Dict[str, List[str]]] = None,
) -> None

当退出整体跟踪时运行。

Source code in llama_index/core/callbacks/base.py
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
def end_trace(
    self,
    trace_id: Optional[str] = None,
    trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
    """当退出整体跟踪时运行。"""
    current_trace_stack_ids = global_stack_trace_ids.get().copy()
    if trace_id is not None and len(current_trace_stack_ids) > 0:
        current_trace_stack_ids.pop()
        if len(current_trace_stack_ids) == 0:
            for handler in self.handlers:
                handler.end_trace(trace_id=trace_id, trace_map=self._trace_map)
            current_trace_stack_ids = []

    global_stack_trace_ids.set(current_trace_stack_ids)

BaseCallbackHandler #

Bases: ABC

用于跟踪事件开始和结束的基本回调处理程序。

Source code in llama_index/core/callbacks/base_handler.py
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
class BaseCallbackHandler(ABC):
    """用于跟踪事件开始和结束的基本回调处理程序。"""

    def __init__(
        self,
        event_starts_to_ignore: List[CBEventType],
        event_ends_to_ignore: List[CBEventType],
    ) -> None:
        """初始化基本回调处理程序。"""
        self.event_starts_to_ignore = tuple(event_starts_to_ignore)
        self.event_ends_to_ignore = tuple(event_ends_to_ignore)

    @abstractmethod
    def on_event_start(
        self,
        event_type: CBEventType,
        payload: Optional[Dict[str, Any]] = None,
        event_id: str = "",
        parent_id: str = "",
        **kwargs: Any,
    ) -> str:
        """当事件开始时运行并返回事件的ID。"""

    @abstractmethod
    def on_event_end(
        self,
        event_type: CBEventType,
        payload: Optional[Dict[str, Any]] = None,
        event_id: str = "",
        **kwargs: Any,
    ) -> None:
        """事件结束时运行。"""

    @abstractmethod
    def start_trace(self, trace_id: Optional[str] = None) -> None:
        """当启动整体跟踪时运行。"""

    @abstractmethod
    def end_trace(
        self,
        trace_id: Optional[str] = None,
        trace_map: Optional[Dict[str, List[str]]] = None,
    ) -> None:
        """当退出整体跟踪时运行。"""

on_event_start abstractmethod #

on_event_start(
    event_type: CBEventType,
    payload: Optional[Dict[str, Any]] = None,
    event_id: str = "",
    parent_id: str = "",
    **kwargs: Any
) -> str

当事件开始时运行并返回事件的ID。

Source code in llama_index/core/callbacks/base_handler.py
24
25
26
27
28
29
30
31
32
33
@abstractmethod
def on_event_start(
    self,
    event_type: CBEventType,
    payload: Optional[Dict[str, Any]] = None,
    event_id: str = "",
    parent_id: str = "",
    **kwargs: Any,
) -> str:
    """当事件开始时运行并返回事件的ID。"""

on_event_end abstractmethod #

on_event_end(
    event_type: CBEventType,
    payload: Optional[Dict[str, Any]] = None,
    event_id: str = "",
    **kwargs: Any
) -> None

事件结束时运行。

Source code in llama_index/core/callbacks/base_handler.py
35
36
37
38
39
40
41
42
43
@abstractmethod
def on_event_end(
    self,
    event_type: CBEventType,
    payload: Optional[Dict[str, Any]] = None,
    event_id: str = "",
    **kwargs: Any,
) -> None:
    """事件结束时运行。"""

start_trace abstractmethod #

start_trace(trace_id: Optional[str] = None) -> None

当启动整体跟踪时运行。

Source code in llama_index/core/callbacks/base_handler.py
45
46
47
@abstractmethod
def start_trace(self, trace_id: Optional[str] = None) -> None:
    """当启动整体跟踪时运行。"""

end_trace abstractmethod #

end_trace(
    trace_id: Optional[str] = None,
    trace_map: Optional[Dict[str, List[str]]] = None,
) -> None

当退出整体跟踪时运行。

Source code in llama_index/core/callbacks/base_handler.py
49
50
51
52
53
54
55
@abstractmethod
def end_trace(
    self,
    trace_id: Optional[str] = None,
    trace_map: Optional[Dict[str, List[str]]] = None,
) -> None:
    """当退出整体跟踪时运行。"""

回调管理器的基本模式。

CBEvent dataclass #

通用类,用于存储事件信息。

Source code in llama_index/core/callbacks/schema.py
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
@dataclass
class CBEvent:
    """通用类,用于存储事件信息。"""

    event_type: CBEventType
    payload: Optional[Dict[str, Any]] = None
    time: str = ""
    id_: str = ""

    def __post_init__(self) -> None:
        """初始化时间和ID(如果需要)。"""
        if not self.time:
            self.time = datetime.now().strftime(TIMESTAMP_FORMAT)
        if not self.id_:
            self.id = str(uuid.uuid4())

CBEventType #

Bases: str, Enum

回调管理器事件类型。

属性

CHUNKING: 记录文本分割前后的日志。 NODE_PARSING: 记录文档和它们解析成的节点的日志。 EMBEDDING: 记录嵌入文本的数量的日志。 LLM: 记录LLM调用的模板和响应的日志。 QUERY: 跟踪每个查询的开始和结束。 RETRIEVE: 记录为查询检索到的节点的日志。 SYNTHESIZE: 记录合成调用的结果的日志。 TREE: 记录生成的摘要和摘要级别的日志。 SUB_QUESTION: 记录生成的子问题和答案的日志。

Source code in llama_index/core/callbacks/schema.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
class CBEventType(str, Enum):
    """回调管理器事件类型。

    属性:
        CHUNKING: 记录文本分割前后的日志。
        NODE_PARSING: 记录文档和它们解析成的节点的日志。
        EMBEDDING: 记录嵌入文本的数量的日志。
        LLM: 记录LLM调用的模板和响应的日志。
        QUERY: 跟踪每个查询的开始和结束。
        RETRIEVE: 记录为查询检索到的节点的日志。
        SYNTHESIZE: 记录合成调用的结果的日志。
        TREE: 记录生成的摘要和摘要级别的日志。
        SUB_QUESTION: 记录生成的子问题和答案的日志。"""

    CHUNKING = "chunking"
    NODE_PARSING = "node_parsing"
    EMBEDDING = "embedding"
    LLM = "llm"
    QUERY = "query"
    RETRIEVE = "retrieve"
    SYNTHESIZE = "synthesize"
    TREE = "tree"
    SUB_QUESTION = "sub_question"
    TEMPLATING = "templating"
    FUNCTION_CALL = "function_call"
    RERANKING = "reranking"
    EXCEPTION = "exception"
    AGENT_STEP = "agent_step"

EventPayload #

Bases: str, Enum

Source code in llama_index/core/callbacks/schema.py
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
class EventPayload(str, Enum):
    DOCUMENTS = "documents"  # list of documents before parsing
    CHUNKS = "chunks"  # list of text chunks
    NODES = "nodes"  # list of nodes
    PROMPT = "formatted_prompt"  # formatted prompt sent to LLM
    MESSAGES = "messages"  # list of messages sent to LLM
    COMPLETION = "completion"  # completion from LLM
    RESPONSE = "response"  # message response from LLM
    QUERY_STR = "query_str"  # query used for query engine
    SUB_QUESTION = "sub_question"  # a sub question & answer + sources
    EMBEDDINGS = "embeddings"  # list of embeddings
    TOP_K = "top_k"  # top k nodes retrieved
    ADDITIONAL_KWARGS = "additional_kwargs"  # additional kwargs for event call
    SERIALIZED = "serialized"  # serialized object for event caller
    FUNCTION_CALL = "function_call"  # function call for the LLM
    FUNCTION_OUTPUT = "function_call_response"  # function call output
    TOOL = "tool"  # tool used in LLM call
    MODEL_NAME = "model_name"  # model name used in an event
    TEMPLATE = "template"  # template used in LLM call
    TEMPLATE_VARS = "template_vars"  # template variables used in LLM call
    SYSTEM_PROMPT = "system_prompt"  # system prompt used in LLM call
    QUERY_WRAPPER_PROMPT = "query_wrapper_prompt"  # query wrapper prompt used in LLM
    EXCEPTION = "exception"  # exception raised in an event