Skip to main content

Guard

Guard

The Guard class.

This class is the main entry point for using Guardrails. It is initialized from one of the following class methods:

  • from_rail
  • from_rail_string
  • from_pydantic
  • from_string

The __call__ method functions as a wrapper around LLM APIs. It takes in an LLM API, and optional prompt parameters, and returns the raw output from the LLM and the validated output.

configure(num_reasks: Optional[int] = None)

def configure(num_reasks: Optional[int] = None)

Configure the Guard.

from_rail(cls, rail_file: str, num_reasks: Optional[int] = None, tracer: Optional[Tracer] = None, *, name: Optional[str] = None, description: Optional[str] = None)

@classmethod
def from_rail(cls,
rail_file: str,
num_reasks: Optional[int] = None,
tracer: Optional[Tracer] = None,
*,
name: Optional[str] = None,
description: Optional[str] = None)

Create a Schema from a .rail file.

Arguments:

  • rail_file - The path to the .rail file.
  • num_reasks - The max times to re-ask the LLM for invalid output.

Returns:

An instance of the Guard class.

from_rail_string(cls, rail_string: str, num_reasks: Optional[int] = None, tracer: Optional[Tracer] = None, *, name: Optional[str] = None, description: Optional[str] = None)

@classmethod
def from_rail_string(cls,
rail_string: str,
num_reasks: Optional[int] = None,
tracer: Optional[Tracer] = None,
*,
name: Optional[str] = None,
description: Optional[str] = None)

Create a Schema from a .rail string.

Arguments:

  • rail_string - The .rail string.
  • num_reasks - The max times to re-ask the LLM for invalid output.

Returns:

An instance of the Guard class.

from_pydantic(cls, output_class: Union[Type[BaseModel], Type[List[Type[BaseModel]]]], prompt: Optional[str] = None, instructions: Optional[str] = None, num_reasks: Optional[int] = None, reask_prompt: Optional[str] = None, reask_instructions: Optional[str] = None, tracer: Optional[Tracer] = None, *, name: Optional[str] = None, description: Optional[str] = None)

@classmethod
def from_pydantic(cls,
output_class: Union[Type[BaseModel],
Type[List[Type[BaseModel]]]],
prompt: Optional[str] = None,
instructions: Optional[str] = None,
num_reasks: Optional[int] = None,
reask_prompt: Optional[str] = None,
reask_instructions: Optional[str] = None,
tracer: Optional[Tracer] = None,
*,
name: Optional[str] = None,
description: Optional[str] = None)

Create a Guard instance from a Pydantic model and prompt.

from_string(cls, validators: Sequence[Validator], description: Optional[str] = None, prompt: Optional[str] = None, instructions: Optional[str] = None, reask_prompt: Optional[str] = None, reask_instructions: Optional[str] = None, num_reasks: Optional[int] = None, tracer: Optional[Tracer] = None, *, name: Optional[str] = None, guard_description: Optional[str] = None)

@classmethod
def from_string(cls,
validators: Sequence[Validator],
description: Optional[str] = None,
prompt: Optional[str] = None,
instructions: Optional[str] = None,
reask_prompt: Optional[str] = None,
reask_instructions: Optional[str] = None,
num_reasks: Optional[int] = None,
tracer: Optional[Tracer] = None,
*,
name: Optional[str] = None,
guard_description: Optional[str] = None)

Create a Guard instance for a string response with prompt, instructions, and validations.

Arguments:

  • validators - (List[Validator]): The list of validators to apply to the string output.
  • description str, optional - A description for the string to be generated. Defaults to None.
  • prompt str, optional - The prompt used to generate the string. Defaults to None.
  • instructions str, optional - Instructions for chat models. Defaults to None.
  • reask_prompt str, optional - An alternative prompt to use during reasks. Defaults to None.
  • reask_instructions str, optional - Alternative instructions to use during reasks. Defaults to None.
  • num_reasks int, optional - The max times to re-ask the LLM for invalid output.

__call__(llm_api: Callable, prompt_params: Optional[Dict] = None, num_reasks: Optional[int] = None, prompt: Optional[str] = None, instructions: Optional[str] = None, msg_history: Optional[List[Dict]] = None, metadata: Optional[Dict] = None, full_schema_reask: Optional[bool] = None, stream: Optional[bool] = False, *args, **kwargs)

@overload
def __call__(
llm_api: Callable,
prompt_params: Optional[Dict] = None,
num_reasks: Optional[int] = None,
prompt: Optional[str] = None,
instructions: Optional[str] = None,
msg_history: Optional[List[Dict]] = None,
metadata: Optional[Dict] = None,
full_schema_reask: Optional[bool] = None,
stream: Optional[bool] = False,
*args,
**kwargs
) -> Union[ValidationOutcome[OT], Iterable[ValidationOutcome[OT]]]

__call__(llm_api: Callable[[Any], Awaitable[Any]], prompt_params: Optional[Dict] = None, num_reasks: Optional[int] = None, prompt: Optional[str] = None, instructions: Optional[str] = None, msg_history: Optional[List[Dict]] = None, metadata: Optional[Dict] = None, full_schema_reask: Optional[bool] = None, *args, **kwargs)

@overload
def __call__(llm_api: Callable[[Any], Awaitable[Any]],
prompt_params: Optional[Dict] = None,
num_reasks: Optional[int] = None,
prompt: Optional[str] = None,
instructions: Optional[str] = None,
msg_history: Optional[List[Dict]] = None,
metadata: Optional[Dict] = None,
full_schema_reask: Optional[bool] = None,
*args,
**kwargs) -> Awaitable[ValidationOutcome[OT]]

__call__(llm_api: Union[Callable, Callable[[Any], Awaitable[Any]]], prompt_params: Optional[Dict] = None, num_reasks: Optional[int] = None, prompt: Optional[str] = None, instructions: Optional[str] = None, msg_history: Optional[List[Dict]] = None, metadata: Optional[Dict] = None, full_schema_reask: Optional[bool] = None, *args, **kwargs)

def __call__(
llm_api: Union[Callable, Callable[[Any], Awaitable[Any]]],
prompt_params: Optional[Dict] = None,
num_reasks: Optional[int] = None,
prompt: Optional[str] = None,
instructions: Optional[str] = None,
msg_history: Optional[List[Dict]] = None,
metadata: Optional[Dict] = None,
full_schema_reask: Optional[bool] = None,
*args,
**kwargs
) -> Union[
Union[ValidationOutcome[OT], Iterable[ValidationOutcome[OT]]],
Awaitable[ValidationOutcome[OT]],
]

Call the LLM and validate the output. Pass an async LLM API to return a coroutine.

Arguments:

  • llm_api - The LLM API to call (e.g. openai.Completion.create or openai.Completion.acreate)
  • prompt_params - The parameters to pass to the prompt.format() method.
  • num_reasks - The max times to re-ask the LLM for invalid output.
  • prompt - The prompt to use for the LLM.
  • instructions - Instructions for chat models.
  • msg_history - The message history to pass to the LLM.
  • metadata - Metadata to pass to the validators.
  • full_schema_reask - When reasking, whether to regenerate the full schema or just the incorrect values. Defaults to True if a base model is provided, False otherwise.

Returns:

The raw text output from the LLM and the validated output.

parse(llm_output: str, metadata: Optional[Dict] = None, llm_api: None = None, num_reasks: Optional[int] = None, prompt_params: Optional[Dict] = None, full_schema_reask: Optional[bool] = None, *args, **kwargs)

@overload
def parse(llm_output: str,
metadata: Optional[Dict] = None,
llm_api: None = None,
num_reasks: Optional[int] = None,
prompt_params: Optional[Dict] = None,
full_schema_reask: Optional[bool] = None,
*args,
**kwargs) -> ValidationOutcome[OT]

parse(llm_output: str, metadata: Optional[Dict] = None, llm_api: Callable[[Any], Awaitable[Any]] = ..., num_reasks: Optional[int] = None, prompt_params: Optional[Dict] = None, full_schema_reask: Optional[bool] = None, *args, **kwargs)

@overload
def parse(llm_output: str,
metadata: Optional[Dict] = None,
llm_api: Callable[[Any], Awaitable[Any]] = ...,
num_reasks: Optional[int] = None,
prompt_params: Optional[Dict] = None,
full_schema_reask: Optional[bool] = None,
*args,
**kwargs) -> Awaitable[ValidationOutcome[OT]]

parse(llm_output: str, metadata: Optional[Dict] = None, llm_api: Optional[Callable] = None, num_reasks: Optional[int] = None, prompt_params: Optional[Dict] = None, full_schema_reask: Optional[bool] = None, *args, **kwargs)

@overload
def parse(llm_output: str,
metadata: Optional[Dict] = None,
llm_api: Optional[Callable] = None,
num_reasks: Optional[int] = None,
prompt_params: Optional[Dict] = None,
full_schema_reask: Optional[bool] = None,
*args,
**kwargs) -> ValidationOutcome[OT]

parse(llm_output: str, metadata: Optional[Dict] = None, llm_api: Optional[Callable] = None, num_reasks: Optional[int] = None, prompt_params: Optional[Dict] = None, full_schema_reask: Optional[bool] = None, *args, **kwargs)

def parse(
llm_output: str,
metadata: Optional[Dict] = None,
llm_api: Optional[Callable] = None,
num_reasks: Optional[int] = None,
prompt_params: Optional[Dict] = None,
full_schema_reask: Optional[bool] = None,
*args,
**kwargs
) -> Union[ValidationOutcome[OT], Awaitable[ValidationOutcome[OT]]]

Alternate flow to using Guard where the llm_output is known.

Arguments:

  • llm_output - The output being parsed and validated.
  • metadata - Metadata to pass to the validators.
  • llm_api - The LLM API to call (e.g. openai.Completion.create or openai.Completion.acreate)
  • num_reasks - The max times to re-ask the LLM for invalid output.
  • prompt_params - The parameters to pass to the prompt.format() method.
  • full_schema_reask - When reasking, whether to regenerate the full schema or just the incorrect values.

Returns:

The validated response. This is either a string or a dictionary, determined by the object schema defined in the RAILspec.