Skip to content

Base

BaseRagasLLM dataclass

BaseRagasLLM(
    run_config: RunConfig = RunConfig(),
    multiple_completion_supported: bool = False,
)

Bases: ABC

get_temperature

get_temperature(n: int) -> float

Return the temperature to use for completion based on n.

Source code in src/ragas/llms/base.py
def get_temperature(self, n: int) -> float:
    """Return the temperature to use for completion based on n."""
    return 0.3 if n > 1 else 1e-8

generate async

generate(
    prompt: PromptValue,
    n: int = 1,
    temperature: Optional[float] = None,
    stop: Optional[List[str]] = None,
    callbacks: Callbacks = None,
    is_async: bool = True,
) -> LLMResult

Generate text using the given event loop.

Source code in src/ragas/llms/base.py
async def generate(
    self,
    prompt: PromptValue,
    n: int = 1,
    temperature: t.Optional[float] = None,
    stop: t.Optional[t.List[str]] = None,
    callbacks: Callbacks = None,
    is_async: bool = True,
) -> LLMResult:
    """Generate text using the given event loop."""

    if temperature is None:
        temperature = 1e-8

    if is_async:
        agenerate_text_with_retry = add_async_retry(
            self.agenerate_text, self.run_config
        )
        return await agenerate_text_with_retry(
            prompt=prompt,
            n=n,
            temperature=temperature,
            stop=stop,
            callbacks=callbacks,
        )
    else:
        loop = asyncio.get_event_loop()
        generate_text_with_retry = add_retry(self.generate_text, self.run_config)
        generate_text = partial(
            generate_text_with_retry,
            prompt=prompt,
            n=n,
            temperature=temperature,
            stop=stop,
            callbacks=callbacks,
        )
        return await loop.run_in_executor(None, generate_text)

LangchainLLMWrapper

LangchainLLMWrapper(
    langchain_llm: BaseLanguageModel,
    run_config: Optional[RunConfig] = None,
)

Bases: BaseRagasLLM

A simple base class for RagasLLMs that is based on Langchain's BaseLanguageModel interface. it implements 2 functions: - generate_text: for generating text from a given PromptValue - agenerate_text: for generating text from a given PromptValue asynchronously

Source code in src/ragas/llms/base.py
def __init__(
    self, langchain_llm: BaseLanguageModel, run_config: t.Optional[RunConfig] = None
):
    self.langchain_llm = langchain_llm
    if run_config is None:
        run_config = RunConfig()
    self.set_run_config(run_config)

LlamaIndexLLMWrapper

LlamaIndexLLMWrapper(
    llm: BaseLLM, run_config: Optional[RunConfig] = None
)

Bases: BaseRagasLLM

A Adaptor for LlamaIndex LLMs

Source code in src/ragas/llms/base.py
def __init__(
    self,
    llm: BaseLLM,
    run_config: t.Optional[RunConfig] = None,
):
    self.llm = llm

    self._signature = ""
    if type(self.llm).__name__.lower() == "bedrock":
        self._signature = "bedrock"
    if run_config is None:
        run_config = RunConfig()
    self.set_run_config(run_config)

is_multiple_completion_supported

is_multiple_completion_supported(
    llm: BaseLanguageModel,
) -> bool

Return whether the given LLM supports n-completion.

Source code in src/ragas/llms/base.py
def is_multiple_completion_supported(llm: BaseLanguageModel) -> bool:
    """Return whether the given LLM supports n-completion."""
    for llm_type in MULTIPLE_COMPLETION_SUPPORTED:
        if isinstance(llm, llm_type):
            return True
    return False

llm_factory

llm_factory(
    model: str = "gpt-4o-mini",
    run_config: Optional[RunConfig] = None,
    default_headers: Optional[Dict[str, str]] = None,
    base_url: Optional[str] = None,
) -> BaseRagasLLM

Create and return a BaseRagasLLM instance. Used for running default LLMs used in Ragas (OpenAI).

Parameters:

Name Type Description Default
model str

The name of the model to use, by default "gpt-4o-mini".

'gpt-4o-mini'
run_config RunConfig

Configuration for the run, by default None.

None
default_headers dict of str

Default headers to be used in API requests, by default None.

None
base_url str

Base URL for the API, by default None.

None

Returns:

Type Description
BaseRagasLLM

An instance of BaseRagasLLM configured with the specified parameters.

Source code in src/ragas/llms/base.py
def llm_factory(
    model: str = "gpt-4o-mini",
    run_config: t.Optional[RunConfig] = None,
    default_headers: t.Optional[t.Dict[str, str]] = None,
    base_url: t.Optional[str] = None,
) -> BaseRagasLLM:
    """
    Create and return a BaseRagasLLM instance. Used for running default LLMs used
    in Ragas (OpenAI).

    Parameters
    ----------
    model : str, optional
        The name of the model to use, by default "gpt-4o-mini".
    run_config : RunConfig, optional
        Configuration for the run, by default None.
    default_headers : dict of str, optional
        Default headers to be used in API requests, by default None.
    base_url : str, optional
        Base URL for the API, by default None.

    Returns
    -------
    BaseRagasLLM
        An instance of BaseRagasLLM configured with the specified parameters.
    """
    timeout = None
    if run_config is not None:
        timeout = run_config.timeout

    # if helicone is enabled, use the helicone
    if helicone_config.is_enabled:
        default_headers = helicone_config.default_headers()
        base_url = helicone_config.base_url

    openai_model = ChatOpenAI(
        model=model, timeout=timeout, default_headers=default_headers, base_url=base_url
    )
    return LangchainLLMWrapper(openai_model, run_config)