Skip to content

structure

Class info

Classes

Name Children Inherits
Agent
llmling_agent.agent.agent
Agent for AI-powered interaction with LLMling resources and tools.

    🛈 DocStrings

    High-level pipeline functions for agent execution.

    get_structured async

    get_structured(
        prompt: str,
        response_type: type[T],
        model: ModelType,
        *,
        system_prompt: str | None = None,
        max_retries: int = 3,
        error_handler: Callable[[Exception], T | None] | None = None,
    ) -> T
    

    Get structured output from LLM using function calling.

    This function creates a temporary agent that uses the class constructor as a tool to generate structured output. It handles: - Type conversion from Python types to JSON schema - Constructor parameter validation - Error handling with optional recovery

    Parameters:

    Name Type Description Default
    prompt str

    The prompt to send to the LLM

    required
    response_type type[T]

    The type to create (class with typed constructor)

    required
    model ModelType

    model to use

    required
    system_prompt str | None

    Optional system instructions

    None
    max_retries int

    Max attempts for parsing (default: 3)

    3
    error_handler Callable[[Exception], T | None] | None

    Optional error handler for recovery

    None

    Returns:

    Type Description
    T

    Instance of response_type

    Example
    class TaskResult:
        '''Analysis result for a task.'''
        def __init__(
            self,
            success: bool,
            message: str,
            due_date: datetime | None = None
        ):
            self.success = success
            self.message = message
            self.due_date = due_date
    
    result = await get_structured(
        "Analyze task: Deploy monitoring",
        TaskResult,
        system_prompt="You analyze task success"
    )
    print(f"Success: {result.success}")
    

    Raises:

    Type Description
    TypeError

    If response_type is not a valid type

    ValueError

    If constructor schema cannot be created

    Exception

    If LLM call fails and no error_handler recovers

    Source code in src/llmling_agent/functional/structure.py
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    async def get_structured[T](
        prompt: str,
        response_type: type[T],
        model: ModelType,
        *,
        system_prompt: str | None = None,
        max_retries: int = 3,
        error_handler: Callable[[Exception], T | None] | None = None,
    ) -> T:
        """Get structured output from LLM using function calling.
    
        This function creates a temporary agent that uses the class constructor
        as a tool to generate structured output. It handles:
        - Type conversion from Python types to JSON schema
        - Constructor parameter validation
        - Error handling with optional recovery
    
        Args:
            prompt: The prompt to send to the LLM
            response_type: The type to create (class with typed constructor)
            model: model to use
            system_prompt: Optional system instructions
            max_retries: Max attempts for parsing (default: 3)
            error_handler: Optional error handler for recovery
    
        Returns:
            Instance of response_type
    
        Example:
            ```python
            class TaskResult:
                '''Analysis result for a task.'''
                def __init__(
                    self,
                    success: bool,
                    message: str,
                    due_date: datetime | None = None
                ):
                    self.success = success
                    self.message = message
                    self.due_date = due_date
    
            result = await get_structured(
                "Analyze task: Deploy monitoring",
                TaskResult,
                system_prompt="You analyze task success"
            )
            print(f"Success: {result.success}")
            ```
    
        Raises:
            TypeError: If response_type is not a valid type
            ValueError: If constructor schema cannot be created
            Exception: If LLM call fails and no error_handler recovers
        """
        """Get structured output from LLM using function calling."""
        async with Agent(
            model=model,
            system_prompt=system_prompt or [],
            name="structured",
            retries=max_retries,
        ) as agent:
            try:
                return await agent.talk.extract(prompt, response_type)
            except Exception as e:
                if error_handler and (err_result := error_handler(e)):
                    return err_result
                raise
    

    get_structured_multiple async

    get_structured_multiple(prompt: str, target: type[T], model: ModelType) -> list[T]
    

    Extract multiple structured instances from text.

    Source code in src/llmling_agent/functional/structure.py
    91
    92
    93
    94
    95
    96
    97
    98
    async def get_structured_multiple[T](
        prompt: str,
        target: type[T],
        model: ModelType,
    ) -> list[T]:
        """Extract multiple structured instances from text."""
        async with Agent(model=model, name="structured") as agent:
            return await agent.talk.extract_multiple(prompt, target)
    

    pick_one async

    pick_one(prompt: str, options: type[T | Enum] | list[T], model: ModelType) -> T
    

    Pick one option from a list of choices.

    Source code in src/llmling_agent/functional/structure.py
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    async def pick_one[T](
        prompt: str,
        options: type[T | Enum] | list[T],
        model: ModelType,
    ) -> T:
        """Pick one option from a list of choices."""
        instances: dict[str, T] = {}
    
        # Create mapping and descriptions
        if isinstance(options, type):
            if issubclass(options, Enum):
                choices = {e.name: (e.value, str(e.value)) for e in options}
            else:
                literal_opts = get_args(options)
                choices = {str(opt): (opt, str(opt)) for opt in literal_opts}
        else:  # List
            choices = {str(i): (opt, repr(opt)) for i, opt in enumerate(options)}
    
        async def select_option(option: Literal[tuple(choices.keys())]) -> str:  # type: ignore
            """Pick one of the available options.
    
            Args:
                option: Which option to pick
            """
            instances["selected"] = choices[option][0]
            return f"Selected: {option}"
    
        # Add options to docstring
        docs = "\n".join(f"- {k}: {desc}" for k, (_, desc) in choices.items())
        assert select_option.__doc__
        select_option.__doc__ += f"\nOptions:\n{docs}"
        sys_prompt = "Select the most appropriate option based on the context."
        async with Agent(model=model, system_prompt=sys_prompt) as agent:
            agent.tools.register_tool(select_option, enabled=True)
            await agent.run(prompt)
            return instances["selected"]