Source code for langchain_community.tools.edenai.text_moderation

from __future__ import annotations

import logging
from typing import Optional

from langchain_core.callbacks import CallbackManagerForToolRun

from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool

logger = logging.getLogger(__name__)


[docs]class EdenAiTextModerationTool(EdenaiTool): """用于查询Eden AI明确文本检测的工具。 要使用,请确保环境变量``EDENAI_API_KEY``已设置为您的API令牌。 您可以在此处找到您的令牌:https://app.edenai.run/admin/account/settings 有关API参考,请查阅Edenai文档:https://docs.edenai.co/reference/image_explicit_content_create。""" name = "edenai_explicit_content_detection_text" description = ( "A wrapper around edenai Services explicit content detection for text. " """Useful for when you have to scan text for offensive, sexually explicit or suggestive content, it checks also if there is any content of self-harm, violence, racist or hate speech.""" """the structure of the output is : 'the type of the explicit content : the likelihood of it being explicit' the likelihood is a number between 1 and 5, 1 being the lowest and 5 the highest. something is explicit if the likelihood is equal or higher than 3. for example : nsfw_likelihood: 1 this is not explicit. for example : nsfw_likelihood: 3 this is explicit. """ "Input should be a string." ) language: str feature: str = "text" subfeature: str = "moderation" def _parse_response(self, response: list) -> str: formatted_result = [] for result in response: if "nsfw_likelihood" in result.keys(): formatted_result.append( "nsfw_likelihood: " + str(result["nsfw_likelihood"]) ) for label, likelihood in zip(result["label"], result["likelihood"]): formatted_result.append(f'"{label}": {str(likelihood)}') return "\n".join(formatted_result) def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> str: """使用这个工具。""" query_params = {"text": query, "language": self.language} return self._call_eden_ai(query_params)