Skip to content

structured

Class info

Classes

Name Children Inherits
BaseResponseDefinition
llmling_agent.models.result_types
Base class for response definitions.
MessageNode
llmling_agent.messaging.messagenode
Base class for all message processing nodes.
StructuredAgent
llmling_agent.agent.structured
Wrapper for Agent that enforces a specific result type.

    🛈 DocStrings

    LLMling integration with PydanticAI for AI-powered resource interaction.

    StructuredAgent

    Bases: MessageNode[TDeps, TResult]

    Wrapper for Agent that enforces a specific result type.

    This wrapper ensures the agent always returns results of the specified type. The type can be provided as: - A Python type for validation - A response definition name from the manifest - A complete response definition instance

    Source code in src/llmling_agent/agent/structured.py
     51
     52
     53
     54
     55
     56
     57
     58
     59
     60
     61
     62
     63
     64
     65
     66
     67
     68
     69
     70
     71
     72
     73
     74
     75
     76
     77
     78
     79
     80
     81
     82
     83
     84
     85
     86
     87
     88
     89
     90
     91
     92
     93
     94
     95
     96
     97
     98
     99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    222
    223
    224
    225
    226
    227
    228
    229
    230
    231
    232
    233
    234
    235
    236
    237
    238
    239
    240
    241
    242
    243
    244
    245
    246
    247
    248
    249
    250
    251
    252
    253
    254
    255
    256
    257
    258
    259
    260
    261
    262
    263
    264
    265
    266
    267
    268
    269
    270
    271
    272
    273
    274
    275
    276
    277
    278
    279
    280
    281
    282
    283
    284
    285
    286
    287
    288
    289
    290
    291
    292
    293
    294
    295
    296
    297
    298
    299
    300
    301
    302
    303
    304
    305
    306
    307
    308
    309
    310
    311
    312
    313
    314
    315
    316
    317
    318
    319
    320
    321
    322
    323
    324
    325
    326
    327
    328
    329
    330
    331
    332
    333
    334
    335
    336
    337
    338
    339
    340
    341
    342
    343
    344
    345
    346
    347
    348
    349
    350
    351
    352
    353
    354
    355
    356
    357
    358
    359
    360
    361
    362
    363
    364
    365
    366
    367
    368
    369
    370
    371
    372
    373
    374
    375
    376
    377
    378
    379
    380
    381
    382
    383
    384
    385
    386
    387
    388
    389
    390
    391
    392
    393
    394
    395
    396
    397
    398
    class StructuredAgent[TDeps, TResult](MessageNode[TDeps, TResult]):
        """Wrapper for Agent that enforces a specific result type.
    
        This wrapper ensures the agent always returns results of the specified type.
        The type can be provided as:
        - A Python type for validation
        - A response definition name from the manifest
        - A complete response definition instance
        """
    
        def __init__(
            self,
            agent: Agent[TDeps] | StructuredAgent[TDeps, TResult] | Callable[..., TResult],
            result_type: type[TResult] | str | ResponseDefinition,
            *,
            tool_name: str | None = None,
            tool_description: str | None = None,
        ):
            """Initialize structured agent wrapper.
    
            Args:
                agent: Base agent to wrap
                result_type: Expected result type:
                    - BaseModel / dataclasses
                    - Name of response definition in manifest
                    - Complete response definition instance
                tool_name: Optional override for tool name
                tool_description: Optional override for tool description
    
            Raises:
                ValueError: If named response type not found in manifest
            """
            from llmling_agent.agent.agent import Agent
    
            logger.debug("StructuredAgent.run result_type = %s", result_type)
            match agent:
                case StructuredAgent():
                    self._agent: Agent[TDeps] = agent._agent
                case Callable():
                    self._agent = Agent[TDeps](provider=agent, name=agent.__name__)
                case Agent():
                    self._agent = agent
                case _:
                    msg = "Invalid agent type"
                    raise ValueError(msg)
    
            super().__init__(name=self._agent.name)
    
            self._result_type = to_type(result_type)
            agent.set_result_type(result_type)
    
            match result_type:
                case type() | str():
                    # For types and named definitions, use overrides if provided
                    self._agent.set_result_type(
                        result_type,
                        tool_name=tool_name,
                        tool_description=tool_description,
                    )
                case BaseResponseDefinition():
                    # For response definitions, use as-is
                    # (overrides don't apply to complete definitions)
                    self._agent.set_result_type(result_type)
    
        async def __aenter__(self) -> Self:
            """Enter async context and set up MCP servers.
    
            Called when agent enters its async context. Sets up any configured
            MCP servers and their tools.
            """
            await self._agent.__aenter__()
            return self
    
        async def __aexit__(
            self,
            exc_type: type[BaseException] | None,
            exc_val: BaseException | None,
            exc_tb: TracebackType | None,
        ):
            """Exit async context."""
            await self._agent.__aexit__(exc_type, exc_val, exc_tb)
    
        def __and__(
            self, other: AnyAgent[Any, Any] | Team[Any] | ProcessorCallback[TResult]
        ) -> Team[TDeps]:
            return self._agent.__and__(other)
    
        def __or__(self, other: Agent | ProcessorCallback | BaseTeam) -> TeamRun:
            return self._agent.__or__(other)
    
        async def _run(
            self,
            *prompt: AnyPromptType | TResult,
            result_type: type[TResult] | None = None,
            model: ModelType = None,
            tool_choice: bool | str | list[str] = True,
            store_history: bool = True,
            message_id: str | None = None,
            conversation_id: str | None = None,
            wait_for_connections: bool | None = None,
        ) -> ChatMessage[TResult]:
            """Run with fixed result type.
    
            Args:
                prompt: Any prompt-compatible object or structured objects of type TResult
                result_type: Expected result type:
                    - BaseModel / dataclasses
                    - Name of response definition in manifest
                    - Complete response definition instance
                model: Optional model override
                tool_choice: Control tool usage:
                    - True: Allow all tools
                    - False: No tools
                    - str: Use specific tool
                    - list[str]: Allow specific tools
                store_history: Whether the message exchange should be added to the
                               context window
                message_id: Optional message id for the returned message.
                            Automatically generated if not provided.
                conversation_id: Optional conversation id for the returned message.
                wait_for_connections: Whether to wait for all connections to complete
            """
            typ = result_type or self._result_type
            return await self._agent._run(
                *prompt,
                result_type=typ,
                model=model,
                store_history=store_history,
                tool_choice=tool_choice,
                message_id=message_id,
                conversation_id=conversation_id,
                wait_for_connections=wait_for_connections,
            )
    
        async def validate_against(
            self,
            prompt: str,
            criteria: type[TResult],
            **kwargs: Any,
        ) -> bool:
            """Check if agent's response satisfies stricter criteria."""
            result = await self.run(prompt, **kwargs)
            try:
                criteria.model_validate(result.content.model_dump())  # type: ignore
            except ValidationError:
                return False
            else:
                return True
    
        def __repr__(self) -> str:
            type_name = getattr(self._result_type, "__name__", str(self._result_type))
            return f"StructuredAgent({self._agent!r}, result_type={type_name})"
    
        def __prompt__(self) -> str:
            type_name = getattr(self._result_type, "__name__", str(self._result_type))
            base_info = self._agent.__prompt__()
            return f"{base_info}\nStructured output type: {type_name}"
    
        def __getattr__(self, name: str) -> Any:
            return getattr(self._agent, name)
    
        @property
        def context(self) -> AgentContext[TDeps]:
            return self._agent.context
    
        @context.setter
        def context(self, value: Any):
            self._agent.context = value
    
        @property
        def name(self) -> str:
            return self._agent.name
    
        @name.setter
        def name(self, value: str):
            self._agent.name = value
    
        @property
        def tools(self) -> ToolManager:
            return self._agent.tools
    
        @overload
        def to_structured(
            self,
            result_type: None,
            *,
            tool_name: str | None = None,
            tool_description: str | None = None,
        ) -> Agent[TDeps]: ...
    
        @overload
        def to_structured[TNewResult](
            self,
            result_type: type[TNewResult] | str | ResponseDefinition,
            *,
            tool_name: str | None = None,
            tool_description: str | None = None,
        ) -> StructuredAgent[TDeps, TNewResult]: ...
    
        def to_structured[TNewResult](
            self,
            result_type: type[TNewResult] | str | ResponseDefinition | None,
            *,
            tool_name: str | None = None,
            tool_description: str | None = None,
        ) -> Agent[TDeps] | StructuredAgent[TDeps, TNewResult]:
            if result_type is None:
                return self._agent
    
            return StructuredAgent(
                self._agent,
                result_type=result_type,
                tool_name=tool_name,
                tool_description=tool_description,
            )
    
        @property
        def stats(self) -> MessageStats:
            return self._agent.stats
    
        async def run_iter(
            self,
            *prompt_groups: Sequence[AnyPromptType | PIL.Image.Image | os.PathLike[str]],
            **kwargs: Any,
        ) -> AsyncIterator[ChatMessage[Any]]:
            """Forward run_iter to wrapped agent."""
            async for message in self._agent.run_iter(*prompt_groups, **kwargs):
                yield message
    
        async def run_job(
            self,
            job: Job[TDeps, TResult],
            *,
            store_history: bool = True,
            include_agent_tools: bool = True,
        ) -> ChatMessage[TResult]:
            """Execute a pre-defined job ensuring type compatibility.
    
            Args:
                job: Job configuration to execute
                store_history: Whether to add job execution to conversation history
                include_agent_tools: Whether to include agent's tools alongside job tools
    
            Returns:
                Task execution result
    
            Raises:
                JobError: If job execution fails or types don't match
                ValueError: If job configuration is invalid
            """
            from llmling_agent.tasks import JobError
    
            # Validate dependency requirement
            if job.required_dependency is not None:  # noqa: SIM102
                if not isinstance(self.context.data, job.required_dependency):
                    msg = (
                        f"Agent dependencies ({type(self.context.data)}) "
                        f"don't match job requirement ({job.required_dependency})"
                    )
                    raise JobError(msg)
    
            # Validate return type requirement
            if job.required_return_type != self._result_type:
                msg = (
                    f"Agent result type ({self._result_type}) "
                    f"doesn't match job requirement ({job.required_return_type})"
                )
                raise JobError(msg)
    
            # Load task knowledge if provided
            if job.knowledge:
                # Add knowledge sources to context
                resources: list[Resource | str] = list(job.knowledge.paths) + list(
                    job.knowledge.resources
                )
                for source in resources:
                    await self.conversation.load_context_source(source)
                for prompt in job.knowledge.prompts:
                    await self.conversation.load_context_source(prompt)
    
            try:
                # Register task tools temporarily
                tools = job.get_tools()
    
                # Use temporary tools
                with self._agent.tools.temporary_tools(
                    tools, exclusive=not include_agent_tools
                ):
                    # Execute job using StructuredAgent's run to maintain type safety
                    return await self.run(await job.get_prompt(), store_history=store_history)
    
            except Exception as e:
                msg = f"Task execution failed: {e}"
                logger.exception(msg)
                raise JobError(msg) from e
    
        @classmethod
        def from_callback(
            cls,
            callback: ProcessorCallback[TResult],
            *,
            name: str | None = None,
            **kwargs: Any,
        ) -> StructuredAgent[None, TResult]:
            """Create a structured agent from a processing callback.
    
            Args:
                callback: Function to process messages. Can be:
                    - sync or async
                    - with or without context
                    - with explicit return type
                name: Optional name for the agent
                **kwargs: Additional arguments for agent
    
            Example:
                ```python
                class AnalysisResult(BaseModel):
                    sentiment: float
                    topics: list[str]
    
                def analyze(msg: str) -> AnalysisResult:
                    return AnalysisResult(sentiment=0.8, topics=["tech"])
    
                analyzer = StructuredAgent.from_callback(analyze)
                ```
            """
            from llmling_agent.agent.agent import Agent
            from llmling_agent_providers.callback import CallbackProvider
    
            name = name or callback.__name__ or "processor"
            provider = CallbackProvider(callback, name=name)
            agent = Agent[None](provider=provider, name=name, **kwargs)
            # Get return type from signature for validation
            hints = get_type_hints(callback)
            return_type = hints.get("return")
    
            # If async, unwrap from Awaitable
            if (
                return_type
                and hasattr(return_type, "__origin__")
                and return_type.__origin__ is Awaitable
            ):
                return_type = return_type.__args__[0]
            return StructuredAgent[None, TResult](agent, return_type or str)  # type: ignore
    
        def is_busy(self) -> bool:
            """Check if agent is currently processing tasks."""
            return bool(self._pending_tasks or self._background_task)
    

    __aenter__ async

    __aenter__() -> Self
    

    Enter async context and set up MCP servers.

    Called when agent enters its async context. Sets up any configured MCP servers and their tools.

    Source code in src/llmling_agent/agent/structured.py
    115
    116
    117
    118
    119
    120
    121
    122
    async def __aenter__(self) -> Self:
        """Enter async context and set up MCP servers.
    
        Called when agent enters its async context. Sets up any configured
        MCP servers and their tools.
        """
        await self._agent.__aenter__()
        return self
    

    __aexit__ async

    __aexit__(
        exc_type: type[BaseException] | None,
        exc_val: BaseException | None,
        exc_tb: TracebackType | None,
    )
    

    Exit async context.

    Source code in src/llmling_agent/agent/structured.py
    124
    125
    126
    127
    128
    129
    130
    131
    async def __aexit__(
        self,
        exc_type: type[BaseException] | None,
        exc_val: BaseException | None,
        exc_tb: TracebackType | None,
    ):
        """Exit async context."""
        await self._agent.__aexit__(exc_type, exc_val, exc_tb)
    

    __init__

    __init__(
        agent: Agent[TDeps] | StructuredAgent[TDeps, TResult] | Callable[..., TResult],
        result_type: type[TResult] | str | ResponseDefinition,
        *,
        tool_name: str | None = None,
        tool_description: str | None = None,
    )
    

    Initialize structured agent wrapper.

    Parameters:

    Name Type Description Default
    agent Agent[TDeps] | StructuredAgent[TDeps, TResult] | Callable[..., TResult]

    Base agent to wrap

    required
    result_type type[TResult] | str | ResponseDefinition

    Expected result type: - BaseModel / dataclasses - Name of response definition in manifest - Complete response definition instance

    required
    tool_name str | None

    Optional override for tool name

    None
    tool_description str | None

    Optional override for tool description

    None

    Raises:

    Type Description
    ValueError

    If named response type not found in manifest

    Source code in src/llmling_agent/agent/structured.py
     61
     62
     63
     64
     65
     66
     67
     68
     69
     70
     71
     72
     73
     74
     75
     76
     77
     78
     79
     80
     81
     82
     83
     84
     85
     86
     87
     88
     89
     90
     91
     92
     93
     94
     95
     96
     97
     98
     99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    def __init__(
        self,
        agent: Agent[TDeps] | StructuredAgent[TDeps, TResult] | Callable[..., TResult],
        result_type: type[TResult] | str | ResponseDefinition,
        *,
        tool_name: str | None = None,
        tool_description: str | None = None,
    ):
        """Initialize structured agent wrapper.
    
        Args:
            agent: Base agent to wrap
            result_type: Expected result type:
                - BaseModel / dataclasses
                - Name of response definition in manifest
                - Complete response definition instance
            tool_name: Optional override for tool name
            tool_description: Optional override for tool description
    
        Raises:
            ValueError: If named response type not found in manifest
        """
        from llmling_agent.agent.agent import Agent
    
        logger.debug("StructuredAgent.run result_type = %s", result_type)
        match agent:
            case StructuredAgent():
                self._agent: Agent[TDeps] = agent._agent
            case Callable():
                self._agent = Agent[TDeps](provider=agent, name=agent.__name__)
            case Agent():
                self._agent = agent
            case _:
                msg = "Invalid agent type"
                raise ValueError(msg)
    
        super().__init__(name=self._agent.name)
    
        self._result_type = to_type(result_type)
        agent.set_result_type(result_type)
    
        match result_type:
            case type() | str():
                # For types and named definitions, use overrides if provided
                self._agent.set_result_type(
                    result_type,
                    tool_name=tool_name,
                    tool_description=tool_description,
                )
            case BaseResponseDefinition():
                # For response definitions, use as-is
                # (overrides don't apply to complete definitions)
                self._agent.set_result_type(result_type)
    

    _run async

    _run(
        *prompt: AnyPromptType | TResult,
        result_type: type[TResult] | None = None,
        model: ModelType = None,
        tool_choice: bool | str | list[str] = True,
        store_history: bool = True,
        message_id: str | None = None,
        conversation_id: str | None = None,
        wait_for_connections: bool | None = None,
    ) -> ChatMessage[TResult]
    

    Run with fixed result type.

    Parameters:

    Name Type Description Default
    prompt AnyPromptType | TResult

    Any prompt-compatible object or structured objects of type TResult

    ()
    result_type type[TResult] | None

    Expected result type: - BaseModel / dataclasses - Name of response definition in manifest - Complete response definition instance

    None
    model ModelType

    Optional model override

    None
    tool_choice bool | str | list[str]

    Control tool usage: - True: Allow all tools - False: No tools - str: Use specific tool - list[str]: Allow specific tools

    True
    store_history bool

    Whether the message exchange should be added to the context window

    True
    message_id str | None

    Optional message id for the returned message. Automatically generated if not provided.

    None
    conversation_id str | None

    Optional conversation id for the returned message.

    None
    wait_for_connections bool | None

    Whether to wait for all connections to complete

    None
    Source code in src/llmling_agent/agent/structured.py
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    async def _run(
        self,
        *prompt: AnyPromptType | TResult,
        result_type: type[TResult] | None = None,
        model: ModelType = None,
        tool_choice: bool | str | list[str] = True,
        store_history: bool = True,
        message_id: str | None = None,
        conversation_id: str | None = None,
        wait_for_connections: bool | None = None,
    ) -> ChatMessage[TResult]:
        """Run with fixed result type.
    
        Args:
            prompt: Any prompt-compatible object or structured objects of type TResult
            result_type: Expected result type:
                - BaseModel / dataclasses
                - Name of response definition in manifest
                - Complete response definition instance
            model: Optional model override
            tool_choice: Control tool usage:
                - True: Allow all tools
                - False: No tools
                - str: Use specific tool
                - list[str]: Allow specific tools
            store_history: Whether the message exchange should be added to the
                           context window
            message_id: Optional message id for the returned message.
                        Automatically generated if not provided.
            conversation_id: Optional conversation id for the returned message.
            wait_for_connections: Whether to wait for all connections to complete
        """
        typ = result_type or self._result_type
        return await self._agent._run(
            *prompt,
            result_type=typ,
            model=model,
            store_history=store_history,
            tool_choice=tool_choice,
            message_id=message_id,
            conversation_id=conversation_id,
            wait_for_connections=wait_for_connections,
        )
    

    from_callback classmethod

    from_callback(
        callback: ProcessorCallback[TResult], *, name: str | None = None, **kwargs: Any
    ) -> StructuredAgent[None, TResult]
    

    Create a structured agent from a processing callback.

    Parameters:

    Name Type Description Default
    callback ProcessorCallback[TResult]

    Function to process messages. Can be: - sync or async - with or without context - with explicit return type

    required
    name str | None

    Optional name for the agent

    None
    **kwargs Any

    Additional arguments for agent

    {}
    Example
    class AnalysisResult(BaseModel):
        sentiment: float
        topics: list[str]
    
    def analyze(msg: str) -> AnalysisResult:
        return AnalysisResult(sentiment=0.8, topics=["tech"])
    
    analyzer = StructuredAgent.from_callback(analyze)
    
    Source code in src/llmling_agent/agent/structured.py
    347
    348
    349
    350
    351
    352
    353
    354
    355
    356
    357
    358
    359
    360
    361
    362
    363
    364
    365
    366
    367
    368
    369
    370
    371
    372
    373
    374
    375
    376
    377
    378
    379
    380
    381
    382
    383
    384
    385
    386
    387
    388
    389
    390
    391
    392
    393
    394
    @classmethod
    def from_callback(
        cls,
        callback: ProcessorCallback[TResult],
        *,
        name: str | None = None,
        **kwargs: Any,
    ) -> StructuredAgent[None, TResult]:
        """Create a structured agent from a processing callback.
    
        Args:
            callback: Function to process messages. Can be:
                - sync or async
                - with or without context
                - with explicit return type
            name: Optional name for the agent
            **kwargs: Additional arguments for agent
    
        Example:
            ```python
            class AnalysisResult(BaseModel):
                sentiment: float
                topics: list[str]
    
            def analyze(msg: str) -> AnalysisResult:
                return AnalysisResult(sentiment=0.8, topics=["tech"])
    
            analyzer = StructuredAgent.from_callback(analyze)
            ```
        """
        from llmling_agent.agent.agent import Agent
        from llmling_agent_providers.callback import CallbackProvider
    
        name = name or callback.__name__ or "processor"
        provider = CallbackProvider(callback, name=name)
        agent = Agent[None](provider=provider, name=name, **kwargs)
        # Get return type from signature for validation
        hints = get_type_hints(callback)
        return_type = hints.get("return")
    
        # If async, unwrap from Awaitable
        if (
            return_type
            and hasattr(return_type, "__origin__")
            and return_type.__origin__ is Awaitable
        ):
            return_type = return_type.__args__[0]
        return StructuredAgent[None, TResult](agent, return_type or str)  # type: ignore
    

    is_busy

    is_busy() -> bool
    

    Check if agent is currently processing tasks.

    Source code in src/llmling_agent/agent/structured.py
    396
    397
    398
    def is_busy(self) -> bool:
        """Check if agent is currently processing tasks."""
        return bool(self._pending_tasks or self._background_task)
    

    run_iter async

    run_iter(
        *prompt_groups: Sequence[AnyPromptType | Image | PathLike[str]], **kwargs: Any
    ) -> AsyncIterator[ChatMessage[Any]]
    

    Forward run_iter to wrapped agent.

    Source code in src/llmling_agent/agent/structured.py
    271
    272
    273
    274
    275
    276
    277
    278
    async def run_iter(
        self,
        *prompt_groups: Sequence[AnyPromptType | PIL.Image.Image | os.PathLike[str]],
        **kwargs: Any,
    ) -> AsyncIterator[ChatMessage[Any]]:
        """Forward run_iter to wrapped agent."""
        async for message in self._agent.run_iter(*prompt_groups, **kwargs):
            yield message
    

    run_job async

    run_job(
        job: Job[TDeps, TResult],
        *,
        store_history: bool = True,
        include_agent_tools: bool = True,
    ) -> ChatMessage[TResult]
    

    Execute a pre-defined job ensuring type compatibility.

    Parameters:

    Name Type Description Default
    job Job[TDeps, TResult]

    Job configuration to execute

    required
    store_history bool

    Whether to add job execution to conversation history

    True
    include_agent_tools bool

    Whether to include agent's tools alongside job tools

    True

    Returns:

    Type Description
    ChatMessage[TResult]

    Task execution result

    Raises:

    Type Description
    JobError

    If job execution fails or types don't match

    ValueError

    If job configuration is invalid

    Source code in src/llmling_agent/agent/structured.py
    280
    281
    282
    283
    284
    285
    286
    287
    288
    289
    290
    291
    292
    293
    294
    295
    296
    297
    298
    299
    300
    301
    302
    303
    304
    305
    306
    307
    308
    309
    310
    311
    312
    313
    314
    315
    316
    317
    318
    319
    320
    321
    322
    323
    324
    325
    326
    327
    328
    329
    330
    331
    332
    333
    334
    335
    336
    337
    338
    339
    340
    341
    342
    343
    344
    345
    async def run_job(
        self,
        job: Job[TDeps, TResult],
        *,
        store_history: bool = True,
        include_agent_tools: bool = True,
    ) -> ChatMessage[TResult]:
        """Execute a pre-defined job ensuring type compatibility.
    
        Args:
            job: Job configuration to execute
            store_history: Whether to add job execution to conversation history
            include_agent_tools: Whether to include agent's tools alongside job tools
    
        Returns:
            Task execution result
    
        Raises:
            JobError: If job execution fails or types don't match
            ValueError: If job configuration is invalid
        """
        from llmling_agent.tasks import JobError
    
        # Validate dependency requirement
        if job.required_dependency is not None:  # noqa: SIM102
            if not isinstance(self.context.data, job.required_dependency):
                msg = (
                    f"Agent dependencies ({type(self.context.data)}) "
                    f"don't match job requirement ({job.required_dependency})"
                )
                raise JobError(msg)
    
        # Validate return type requirement
        if job.required_return_type != self._result_type:
            msg = (
                f"Agent result type ({self._result_type}) "
                f"doesn't match job requirement ({job.required_return_type})"
            )
            raise JobError(msg)
    
        # Load task knowledge if provided
        if job.knowledge:
            # Add knowledge sources to context
            resources: list[Resource | str] = list(job.knowledge.paths) + list(
                job.knowledge.resources
            )
            for source in resources:
                await self.conversation.load_context_source(source)
            for prompt in job.knowledge.prompts:
                await self.conversation.load_context_source(prompt)
    
        try:
            # Register task tools temporarily
            tools = job.get_tools()
    
            # Use temporary tools
            with self._agent.tools.temporary_tools(
                tools, exclusive=not include_agent_tools
            ):
                # Execute job using StructuredAgent's run to maintain type safety
                return await self.run(await job.get_prompt(), store_history=store_history)
    
        except Exception as e:
            msg = f"Task execution failed: {e}"
            logger.exception(msg)
            raise JobError(msg) from e
    

    validate_against async

    validate_against(prompt: str, criteria: type[TResult], **kwargs: Any) -> bool
    

    Check if agent's response satisfies stricter criteria.

    Source code in src/llmling_agent/agent/structured.py
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    async def validate_against(
        self,
        prompt: str,
        criteria: type[TResult],
        **kwargs: Any,
    ) -> bool:
        """Check if agent's response satisfies stricter criteria."""
        result = await self.run(prompt, **kwargs)
        try:
            criteria.model_validate(result.content.model_dump())  # type: ignore
        except ValidationError:
            return False
        else:
            return True