Source code for langchain.callbacks.streaming_aiter_final_only

from __future__ import annotations

from typing import Any, Dict, List, Optional

from langchain_core.outputs import LLMResult

from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler

DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]


[docs]class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler): """回调处理程序,返回一个异步迭代器。 仅迭代代理的最终输出。"""
[docs] def append_to_last_tokens(self, token: str) -> None: self.last_tokens.append(token) self.last_tokens_stripped.append(token.strip()) if len(self.last_tokens) > len(self.answer_prefix_tokens): self.last_tokens.pop(0) self.last_tokens_stripped.pop(0)
[docs] def check_if_answer_reached(self) -> bool: if self.strip_tokens: return self.last_tokens_stripped == self.answer_prefix_tokens_stripped else: return self.last_tokens == self.answer_prefix_tokens
[docs] def __init__( self, *, answer_prefix_tokens: Optional[List[str]] = None, strip_tokens: bool = True, stream_prefix: bool = False, ) -> None: """实例化AsyncFinalIteratorCallbackHandler。 参数: answer_prefix_tokens:前缀答案的令牌序列。 默认值为["Final", "Answer", ":"] strip_tokens:在比较answer_prefix_tokens和最后的令牌时是否忽略空格和换行符? (以确定是否已到达答案) stream_prefix:答案前缀本身是否也应该被流式传输? """ super().__init__() if answer_prefix_tokens is None: self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS else: self.answer_prefix_tokens = answer_prefix_tokens if strip_tokens: self.answer_prefix_tokens_stripped = [ token.strip() for token in self.answer_prefix_tokens ] else: self.answer_prefix_tokens_stripped = self.answer_prefix_tokens self.last_tokens = [""] * len(self.answer_prefix_tokens) self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens) self.strip_tokens = strip_tokens self.stream_prefix = stream_prefix self.answer_reached = False
[docs] async def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any ) -> None: # If two calls are made in a row, this resets the state self.done.clear() self.answer_reached = False
[docs] async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: if self.answer_reached: self.done.set()
[docs] async def on_llm_new_token(self, token: str, **kwargs: Any) -> None: # Remember the last n tokens, where n = len(answer_prefix_tokens) self.append_to_last_tokens(token) # Check if the last n tokens match the answer_prefix_tokens list ... if self.check_if_answer_reached(): self.answer_reached = True if self.stream_prefix: for t in self.last_tokens: self.queue.put_nowait(t) return # If yes, then put tokens from now on if self.answer_reached: self.queue.put_nowait(token)