Skip to content

Prompt

PromptValue

Bases: PromptValue

to_messages

to_messages() -> List[BaseMessage]

Return prompt as a list of Messages.

Source code in src/ragas/llms/prompt.py
def to_messages(self) -> t.List[BaseMessage]:
    """Return prompt as a list of Messages."""
    return [HumanMessage(content=self.to_string())]

Prompt

Bases: BaseModel

Prompt is a class that represents a prompt for the ragas metrics.

Attributes: name (str): The name of the prompt. instruction (str): The instruction for the prompt. output_format_instruction (str): The output format instruction for the prompt. examples (List[Dict[str, Any]]): List of example inputs and outputs for the prompt. input_keys (List[str]): List of input variable names. output_key (str): The output variable name. output_type (Literal["json", "str"]): The type of the output (default: "json"). language (str): The language of the prompt (default: "english").

validate_prompt

validate_prompt(values: Dict[str, Any]) -> Dict[str, Any]

Validate the template string to ensure that it is in desired format.

Source code in src/ragas/llms/prompt.py
@root_validator
def validate_prompt(cls, values: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]:
    """
    Validate the template string to ensure that it is in desired format.
    """
    if values.get("instruction") is None or values.get("instruction") == "":
        raise ValueError("instruction cannot be empty")
    if values.get("input_keys") is None or values.get("instruction") == []:
        raise ValueError("input_keys cannot be empty")
    if values.get("output_key") is None or values.get("output_key") == "":
        raise ValueError("output_key cannot be empty")

    if values.get("examples"):
        output_key = values["output_key"]
        for no, example in enumerate(values["examples"]):
            for inp_key in values["input_keys"]:
                if inp_key not in example:
                    raise ValueError(
                        f"example {no+1} does not have the variable {inp_key} in the definition"
                    )
            if output_key not in example:
                raise ValueError(
                    f"example {no+1} does not have the variable {output_key} in the definition"
                )
            if values["output_type"].lower() == "json":
                try:
                    if output_key in example:
                        if isinstance(example[output_key], str):
                            json.loads(example[output_key])
                except ValueError as e:
                    raise ValueError(
                        f"{output_key} in example {no+1} is not in valid json format: {e}"
                    )

    return values

to_string

to_string() -> str

Generate the prompt string from the variables.

Source code in src/ragas/llms/prompt.py
def to_string(self) -> str:
    """
    Generate the prompt string from the variables.
    """
    prompt_elements = [self.instruction]
    if self.output_format_instruction:
        prompt_elements.append(
            "\n"
            + self.output_format_instruction.replace("{", "{{").replace("}", "}}")
        )
    prompt_str = "\n".join(prompt_elements) + "\n"

    if self.examples:
        prompt_str += "\nExamples:\n"
        # Format the examples to match the Langchain prompt template
        for example in self.examples:
            for key, value in example.items():
                is_json = isinstance(value, (dict, list))
                value = (
                    json.dumps(value, ensure_ascii=False).encode("utf8").decode()
                )
                value = (
                    value.replace("{", "{{").replace("}", "}}")
                    if self.output_type.lower() == "json"
                    else value
                )
                prompt_str += (
                    f"\n{key}: {value}"
                    if not is_json
                    else f"\n{key}: ```{value}```"
                )
            prompt_str += "\n"

    prompt_str += "\nYour actual task:\n"

    if self.input_keys:
        prompt_str += "".join(f"\n{key}: {{{key}}}" for key in self.input_keys)
    if self.output_key:
        prompt_str += f"\n{self.output_key}: \n"

    return prompt_str

get_example_str

get_example_str(example_no: int) -> str

Get the example string from the example number.

Source code in src/ragas/llms/prompt.py
def get_example_str(self, example_no: int) -> str:
    """
    Get the example string from the example number.
    """
    if example_no >= len(self.examples):
        raise ValueError(f"example number {example_no} is out of range")
    example = self.examples[example_no]
    example_str = ""
    for key, value in example.items():
        value = json.dumps(value, ensure_ascii=False).encode("utf8").decode()
        value = (
            value.replace("{", "{{").replace("}", "}}")
            if self.output_type.lower() == "json"
            else value
        )
        example_str += f"\n{key}: {value}"
    return "```" + example_str + "```"

format

format(**kwargs: Any) -> PromptValue

Format the Prompt object into a ChatPromptTemplate object to be used in metrics.

Source code in src/ragas/llms/prompt.py
def format(self, **kwargs: t.Any) -> PromptValue:
    """
    Format the Prompt object into a ChatPromptTemplate object to be used in metrics.
    """
    if set(self.input_keys) != set(kwargs.keys()):
        raise ValueError(
            f"Input variables {self.input_keys} do not match with the given parameters {list(kwargs.keys())}"
        )
    for key, value in kwargs.items():
        if isinstance(value, str):
            kwargs[key] = json.dumps(value)

    prompt = self.to_string()
    return PromptValue(prompt_str=prompt.format(**kwargs))