Skip to content

Pydantic prompt

PydanticPrompt

PydanticPrompt(
    name: Optional[str] = None,
    language: str = "english",
    original_hash: Optional[str] = None,
)

Bases: BasePrompt, Generic[InputModel, OutputModel]

Source code in src/ragas/prompt/base.py
def __init__(
    self,
    name: t.Optional[str] = None,
    language: str = "english",
    original_hash: t.Optional[str] = None,
):
    if name is None:
        self.name = camel_to_snake(self.__class__.__name__)

    _check_if_language_is_supported(language)
    self.language = language
    self.original_hash = original_hash

generate async

generate(
    llm: BaseRagasLLM,
    data: InputModel,
    temperature: Optional[float] = None,
    stop: Optional[List[str]] = None,
    callbacks: Optional[Callbacks] = None,
) -> OutputModel

Generate a single output using the provided language model and input data.

This method is a special case of generate_multiple where only one output is generated.

Parameters:

Name Type Description Default
llm BaseRagasLLM

The language model to use for generation.

required
data InputModel

The input data for generation.

required
temperature float

The temperature parameter for controlling randomness in generation.

None
stop List[str]

A list of stop sequences to end generation.

None
callbacks Callbacks

Callback functions to be called during the generation process.

None

Returns:

Type Description
OutputModel

The generated output.

Notes

This method internally calls generate_multiple with n=1 and returns the first (and only) result.

Source code in src/ragas/prompt/pydantic_prompt.py
async def generate(
    self,
    llm: BaseRagasLLM,
    data: InputModel,
    temperature: t.Optional[float] = None,
    stop: t.Optional[t.List[str]] = None,
    callbacks: t.Optional[Callbacks] = None,
) -> OutputModel:
    """
    Generate a single output using the provided language model and input data.

    This method is a special case of `generate_multiple` where only one output is generated.

    Parameters
    ----------
    llm : BaseRagasLLM
        The language model to use for generation.
    data : InputModel
        The input data for generation.
    temperature : float, optional
        The temperature parameter for controlling randomness in generation.
    stop : List[str], optional
        A list of stop sequences to end generation.
    callbacks : Callbacks, optional
        Callback functions to be called during the generation process.

    Returns
    -------
    OutputModel
        The generated output.

    Notes
    -----
    This method internally calls `generate_multiple` with `n=1` and returns the first (and only) result.
    """
    callbacks = callbacks or []

    # this is just a special case of generate_multiple
    output_single = await self.generate_multiple(
        llm=llm,
        data=data,
        n=1,
        temperature=temperature,
        stop=stop,
        callbacks=callbacks,
    )
    return output_single[0]

generate_multiple async

generate_multiple(
    llm: BaseRagasLLM,
    data: InputModel,
    n: int = 1,
    temperature: Optional[float] = None,
    stop: Optional[List[str]] = None,
    callbacks: Optional[Callbacks] = None,
) -> List[OutputModel]

Generate multiple outputs using the provided language model and input data.

Parameters:

Name Type Description Default
llm BaseRagasLLM

The language model to use for generation.

required
data InputModel

The input data for generation.

required
n int

The number of outputs to generate. Default is 1.

1
temperature float

The temperature parameter for controlling randomness in generation.

None
stop List[str]

A list of stop sequences to end generation.

None
callbacks Callbacks

Callback functions to be called during the generation process.

None

Returns:

Type Description
List[OutputModel]

A list of generated outputs.

Raises:

Type Description
RagasOutputParserException

If there's an error parsing the output.

Source code in src/ragas/prompt/pydantic_prompt.py
async def generate_multiple(
    self,
    llm: BaseRagasLLM,
    data: InputModel,
    n: int = 1,
    temperature: t.Optional[float] = None,
    stop: t.Optional[t.List[str]] = None,
    callbacks: t.Optional[Callbacks] = None,
) -> t.List[OutputModel]:
    """
    Generate multiple outputs using the provided language model and input data.

    Parameters
    ----------
    llm : BaseRagasLLM
        The language model to use for generation.
    data : InputModel
        The input data for generation.
    n : int, optional
        The number of outputs to generate. Default is 1.
    temperature : float, optional
        The temperature parameter for controlling randomness in generation.
    stop : List[str], optional
        A list of stop sequences to end generation.
    callbacks : Callbacks, optional
        Callback functions to be called during the generation process.

    Returns
    -------
    List[OutputModel]
        A list of generated outputs.

    Raises
    ------
    RagasOutputParserException
        If there's an error parsing the output.
    """
    callbacks = callbacks or []
    processed_data = self.process_input(data)
    prompt_rm, prompt_cb = new_group(
        name=self.name,
        inputs={"data": processed_data},
        callbacks=callbacks,
    )
    prompt_value = PromptValue(prompt_str=self.to_string(processed_data))
    resp = await llm.generate(
        prompt_value,
        n=n,
        temperature=temperature,
        stop=stop,
        callbacks=prompt_cb,
    )

    output_models = []
    parser = RagasOutputParser(pydantic_object=self.output_model)
    for i in range(n):
        output_string = resp.generations[0][i].text
        try:
            answer = await parser.parse_output_string(
                output_string=output_string,
                prompt_value=prompt_value,
                llm=llm,
                callbacks=prompt_cb,
                max_retries=3,
            )
            processed_output = self.process_output(answer, data)  # type: ignore
            output_models.append(processed_output)
        except RagasOutputParserException as e:
            prompt_rm.on_chain_error(error=e)
            logger.error("Prompt %s failed to parse output: %s", self.name, e)
            raise e

    prompt_rm.on_chain_end({"output": output_models})
    return output_models

adapt async

adapt(
    target_language: str, llm: BaseRagasLLM
) -> "PydanticPrompt[InputModel, OutputModel]"

Adapt the prompt to a new language.

Source code in src/ragas/prompt/pydantic_prompt.py
async def adapt(
    self, target_language: str, llm: BaseRagasLLM
) -> "PydanticPrompt[InputModel, OutputModel]":
    """
    Adapt the prompt to a new language.
    """

    # throws ValueError if language is not supported
    _check_if_language_is_supported(target_language)

    # set the original hash, this is used to
    # identify the original prompt object when loading from file
    if self.original_hash is None:
        self.original_hash = hash(self)

    strings = get_all_strings(self.examples)
    translated_strings = await translate_statements_prompt.generate(
        llm=llm,
        data=ToTranslate(target_language=target_language, statements=strings),
    )

    translated_examples = update_strings(
        obj=self.examples,
        old_strings=strings,
        new_strings=translated_strings.statements,
    )

    new_prompt = copy.deepcopy(self)
    new_prompt.examples = translated_examples
    new_prompt.language = target_language
    return new_prompt

save

save(file_path: str)

Save the prompt to a file.

Source code in src/ragas/prompt/pydantic_prompt.py
def save(self, file_path: str):
    """
    Save the prompt to a file.
    """
    data = {
        "ragas_version": __version__,
        "original_hash": (
            hash(self) if self.original_hash is None else self.original_hash
        ),
        "language": self.language,
        "instruction": self.instruction,
        "examples": [
            {"input": example[0].model_dump(), "output": example[1].model_dump()}
            for example in self.examples
        ],
    }
    if os.path.exists(file_path):
        raise FileExistsError(f"The file '{file_path}' already exists.")
    with open(file_path, "w") as f:
        json.dump(data, f, indent=2)
        print(f"Prompt saved to {file_path}")