Skip to content

Azure openai

AzureOpenAIMultiModal #

Bases: OpenAIMultiModal

Azure OpenAI。

要使用此功能,您必须首先在Azure OpenAI上部署一个模型。 与OpenAI不同,您需要指定一个 engine 参数来识别您的部署(在Azure门户中称为“模型部署名称”)。

  • model: 模型的名称(例如 text-davinci-003) 这仅用于决定完成 vs. 聊天终端。
  • engine: 这将对应于您在部署模型时选择的自定义名称。

您必须设置以下环境变量: - OPENAI_API_VERSION: 将其设置为 2023-05-15 这可能会在将来更改。 - AZURE_OPENAI_ENDPOINT: 您的终结点应该如下所示 https://YOUR_RESOURCE_NAME.openai.azure.com/ - AZURE_OPENAI_API_KEY: 如果api类型是 azure,则为您的API密钥

可以在此处找到更多信息: https://learn.microsoft.com/en-us/azure/cognitive-services/openai/quickstart?tabs=command-line&pivots=programming-language-python

Source code in llama_index/multi_modal_llms/azure_openai/base.py
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
class AzureOpenAIMultiModal(OpenAIMultiModal):
    """Azure OpenAI。

要使用此功能,您必须首先在Azure OpenAI上部署一个模型。
与OpenAI不同,您需要指定一个 `engine` 参数来识别您的部署(在Azure门户中称为“模型部署名称”)。

- model: 模型的名称(例如 `text-davinci-003`)
    这仅用于决定完成 vs. 聊天终端。
- engine: 这将对应于您在部署模型时选择的自定义名称。

您必须设置以下环境变量:
- `OPENAI_API_VERSION`: 将其设置为 `2023-05-15`
    这可能会在将来更改。
- `AZURE_OPENAI_ENDPOINT`: 您的终结点应该如下所示
    https://YOUR_RESOURCE_NAME.openai.azure.com/
- `AZURE_OPENAI_API_KEY`: 如果api类型是 `azure`,则为您的API密钥

可以在此处找到更多信息:
    https://learn.microsoft.com/en-us/azure/cognitive-services/openai/quickstart?tabs=command-line&pivots=programming-language-python"""

    engine: str = Field(description="The name of the deployed azure engine.")
    azure_endpoint: Optional[str] = Field(
        default=None, description="The Azure endpoint to use."
    )
    azure_deployment: Optional[str] = Field(
        default=None, description="The Azure deployment to use."
    )
    use_azure_ad: bool = Field(
        description="Indicates if Microsoft Entra ID (former Azure AD) is used for token authentication"
    )

    _azure_ad_token: Any = PrivateAttr(default=None)

    def __init__(
        self,
        model: str = "gpt-4-vision-preview",
        engine: Optional[str] = None,
        temperature: float = DEFAULT_TEMPERATURE,
        max_new_tokens: Optional[int] = 300,
        additional_kwargs: Optional[Dict[str, Any]] = None,
        context_window: Optional[int] = DEFAULT_CONTEXT_WINDOW,
        max_retries: int = 3,
        timeout: float = 60.0,
        image_detail: str = "low",
        api_key: Optional[str] = None,
        api_base: Optional[str] = None,
        api_version: Optional[str] = None,
        # azure specific
        azure_endpoint: Optional[str] = None,
        azure_deployment: Optional[str] = None,
        use_azure_ad: bool = False,
        # aliases for engine
        deployment_name: Optional[str] = None,
        deployment_id: Optional[str] = None,
        deployment: Optional[str] = None,
        messages_to_prompt: Optional[Callable] = None,
        completion_to_prompt: Optional[Callable] = None,
        callback_manager: Optional[CallbackManager] = None,
        default_headers: Optional[Dict[str, str]] = None,
        http_client: Optional[httpx.Client] = None,
        **kwargs: Any,
    ) -> None:
        engine = resolve_from_aliases(
            engine, deployment_name, deployment_id, deployment, azure_deployment
        )

        if engine is None:
            raise ValueError("You must specify an `engine` parameter.")

        azure_endpoint = get_from_param_or_env(
            "azure_endpoint", azure_endpoint, "AZURE_OPENAI_ENDPOINT", ""
        )
        super().__init__(
            engine=engine,
            model=model,
            temperature=temperature,
            max_new_tokens=max_new_tokens,
            additional_kwargs=additional_kwargs,
            context_window=context_window,
            max_retries=max_retries,
            timeout=timeout,
            image_detail=image_detail,
            api_key=api_key,
            api_base=api_base,
            api_version=api_version,
            messages_to_prompt=messages_to_prompt,
            completion_to_prompt=completion_to_prompt,
            callback_manager=callback_manager,
            azure_endpoint=azure_endpoint,
            azure_deployment=azure_deployment,
            use_azure_ad=use_azure_ad,
            default_headers=default_headers,
            http_client=http_client,
            **kwargs,
        )

    def _get_clients(self, **kwargs: Any) -> Tuple[SyncAzureOpenAI, AsyncAzureOpenAI]:
        client = SyncAzureOpenAI(**self._get_credential_kwargs())
        aclient = AsyncAzureOpenAI(**self._get_credential_kwargs())
        return client, aclient

    @classmethod
    def class_name(cls) -> str:
        return "azure_openai_multi_modal_llm"

    @property
    def metadata(self) -> MultiModalLLMMetadata:
        """多模式LLM元数据。"""
        return MultiModalLLMMetadata(
            num_output=self.max_new_tokens or DEFAULT_NUM_OUTPUTS,
            model_name=self.engine,
        )

    def _get_credential_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
        if self.use_azure_ad:
            self._azure_ad_token = refresh_openai_azuread_token(self._azure_ad_token)
            self.api_key = self._azure_ad_token.token

        return {
            "api_key": self.api_key or None,
            "max_retries": self.max_retries,
            "azure_endpoint": self.azure_endpoint,
            "azure_deployment": self.azure_deployment,
            "api_version": self.api_version,
            "default_headers": self.default_headers,
            "http_client": self._http_client,
            "timeout": self.timeout,
        }

    def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
        model_kwargs = super()._get_model_kwargs(**kwargs)
        model_kwargs["model"] = self.engine
        return model_kwargs

metadata property #

metadata: MultiModalLLMMetadata

多模式LLM元数据。