Source code for langchain_community.chat_models.minimax

"""封装了Minimax聊天模型。"""
import logging
from typing import Any, Dict, List, Optional, cast

from langchain_core.callbacks import (
    AsyncCallbackManagerForLLMRun,
    CallbackManagerForLLMRun,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
    AIMessage,
    BaseMessage,
    HumanMessage,
)
from langchain_core.outputs import ChatGeneration, ChatResult

from langchain_community.llms.minimax import MinimaxCommon
from langchain_community.llms.utils import enforce_stop_tokens

logger = logging.getLogger(__name__)


def _parse_message(msg_type: str, text: str) -> Dict:
    return {"sender_type": msg_type, "text": text}


def _parse_chat_history(history: List[BaseMessage]) -> List:
    """将一系列消息解析为历史记录。"""
    chat_history = []
    for message in history:
        content = cast(str, message.content)
        if isinstance(message, HumanMessage):
            chat_history.append(_parse_message("USER", content))
        if isinstance(message, AIMessage):
            chat_history.append(_parse_message("BOT", content))
    return chat_history


[docs]class MiniMaxChat(MinimaxCommon, BaseChatModel): """MiniMax大型语言模型。 要使用,您应该设置环境变量``MINIMAX_GROUP_ID``和``MINIMAX_API_KEY``,并使用您的API令牌,或将其作为命名参数传递给构造函数。 示例: .. code-block:: python from langchain_community.chat_models import MiniMaxChat llm = MiniMaxChat(model_name="abab5-chat")""" def _generate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: """生成对话中的下一轮内容。 参数: messages: 作为消息列表的对话历史。Code chat不支持上下文。 stop: 停止词列表(可选)。 run_manager: LLM运行的CallbackManager,目前未使用。 返回: 包含模型生成输出的ChatResult。 引发: ValueError: 如果列表中的最后一条消息不是来自人类。 """ if not messages: raise ValueError( "You should provide at least one message to start the chat!" ) history = _parse_chat_history(messages) payload = self._default_params payload["messages"] = history text = self._client.post(payload) # This is required since the stop are not enforced by the model parameters text = text if stop is None else enforce_stop_tokens(text, stop) return ChatResult(generations=[ChatGeneration(message=AIMessage(text))]) # type: ignore[misc] async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: raise NotImplementedError( """Minimax AI doesn't support async requests at the moment.""" )