Skip to content

AgentConfig

Base classes

Name Children Inherits
NodeConfig
llmling_agent_config.nodes
Configuration for a Node of the messaging system.

⋔ Inheritance diagram

graph TD
  94123207821136["agents.AgentConfig"]
  94123170552208["nodes.NodeConfig"]
  94123169468080["schema.Schema"]
  94123157077184["main.BaseModel"]
  139872072243680["builtins.object"]
  94123170552208 --> 94123207821136
  94123169468080 --> 94123170552208
  94123157077184 --> 94123169468080
  139872072243680 --> 94123157077184

🛈 DocStrings

Bases: NodeConfig

Configuration for a single agent in the system.

Defines an agent's complete configuration including its model, environment, and behavior settings.

Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/agent_configuration/

Source code in src/llmling_agent/models/agents.py
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
class AgentConfig(NodeConfig):
    """Configuration for a single agent in the system.

    Defines an agent's complete configuration including its model, environment,
    and behavior settings.

    Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/agent_configuration/
    """

    model_config = ConfigDict(
        json_schema_extra={
            "x-icon": "octicon:hubot-16",
            "x-doc-title": "Agent Configuration",
        }
    )

    inherits: str | None = Field(default=None, title="Inheritance source")
    """Name of agent config to inherit from"""

    model: str | ModelName | AnyModelConfig | None = Field(
        default=None,
        examples=["openai:gpt-5-nano"],
        title="Model configuration or name",
        json_schema_extra={
            "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/model_configuration/"
        },
    )
    """The model to use for this agent. Can be either a simple model name
    string (e.g. 'openai:gpt-5') or a structured model definition.

    Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/model_configuration/
    """

    tools: list[ToolConfig | str] = Field(
        default_factory=list,
        examples=[
            ["webbrowser:open", "builtins:print"],
            [
                {
                    "type": "import",
                    "import_path": "webbrowser:open",
                    "name": "web_browser",
                }
            ],
        ],
        title="Tool configurations",
        json_schema_extra={
            "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/tool_configuration/"
        },
    )
    """A list of tools to register with this agent.

    Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/tool_configuration/
    """

    toolsets: list[ToolsetConfig] = Field(
        default_factory=list,
        examples=[
            [
                {
                    "type": "openapi",
                    "spec": "https://api.example.com/openapi.json",
                    "namespace": "api",
                },
                {
                    "type": "file_access",
                },
                {
                    "type": "composio",
                    "user_id": "user123@example.com",
                    "toolsets": ["github", "slack"],
                },
            ],
        ],
        title="Toolset configurations",
        json_schema_extra={
            "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/toolset_configuration/"
        },
    )
    """Toolset configurations for extensible tool collections.

    Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/toolset_configuration/
    """

    session: str | SessionQuery | MemoryConfig | None = Field(
        default=None,
        examples=["main_session", "user_123"],
        title="Session configuration",
        json_schema_extra={
            "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/session_configuration/"
        },
    )
    """Session configuration for conversation recovery.

    Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/session_configuration/
    """

    output_type: str | StructuredResponseConfig | None = Field(
        default=None,
        examples=["json_response", "code_output"],
        title="Response type",
        json_schema_extra={
            "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/response_configuration/"
        },
    )
    """Name of the response definition to use.

    Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/response_configuration/
    """

    retries: int = Field(default=1, ge=0, examples=[1, 3], title="Model retries")
    """Number of retries for failed operations (maps to pydantic-ai's retries)"""

    output_retries: int | None = Field(default=None, examples=[1, 3], title="Output retries")
    """Max retries for result validation"""

    end_strategy: EndStrategy = Field(
        default="early",
        examples=["early", "exhaust"],
        title="Tool execution strategy",
    )
    """The strategy for handling multiple tool calls when a final result is found"""

    avatar: str | None = Field(
        default=None,
        examples=["https://example.com/avatar.png", "/assets/robot.jpg"],
        title="Avatar image",
    )
    """URL or path to agent's avatar image"""

    system_prompts: Sequence[str | PromptConfig] = Field(
        default_factory=list,
        title="System prompts",
        examples=[["You are an AI assistant."]],
        json_schema_extra={
            "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/system_prompts_configuration/"
        },
    )
    """System prompts for the agent. Can be strings or structured prompt configs.

    Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/system_prompts_configuration/
    """

    # context_sources: list[ContextSource] = Field(default_factory=list)
    # """Initial context sources to load"""

    config_file_path: str | None = Field(
        default=None,
        examples=["/path/to/config.yml", "configs/agent.yaml"],
        title="Configuration file path",
    )
    """Config file path for resolving environment."""

    knowledge: Knowledge | None = Field(
        default=None,
        title="Knowledge sources",
        json_schema_extra={
            "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/knowledge_configuration/"
        },
    )
    """Knowledge sources for this agent.

    Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/knowledge_configuration/
    """

    workers: list[WorkerConfig] = Field(
        default_factory=list,
        examples=[
            [{"type": "agent", "name": "web_agent", "reset_history_on_run": True}],
            [{"type": "team", "name": "analysis_team"}],
        ],
        title="Worker agents",
        json_schema_extra={
            "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/worker_configuration/"
        },
    )
    """Worker agents which will be available as tools.

    Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/worker_configuration/
    """

    requires_tool_confirmation: ToolConfirmationMode = Field(
        default="per_tool",
        examples=["always", "never", "per_tool"],
        title="Tool confirmation mode",
    )
    """How to handle tool confirmation:
    - "always": Always require confirmation for all tools
    - "never": Never require confirmation (ignore tool settings)
    - "per_tool": Use individual tool settings
    """

    debug: bool = Field(default=False, title="Debug mode")
    """Enable debug output for this agent."""

    usage_limits: UsageLimits | None = Field(default=None, title="Usage limits")
    """Usage limits for this agent."""

    tool_mode: ToolMode | None = Field(
        default=None,
        examples=["codemode"],
        title="Tool execution mode",
    )
    """Tool execution mode:
    - None: Default mode - tools are called directly
    - "codemode": Tools are wrapped in a Python execution environment
    """

    def is_structured(self) -> bool:
        """Check if this config defines a structured agent."""
        return self.output_type is not None

    @model_validator(mode="before")
    @classmethod
    def validate_output_type(cls, data: dict[str, Any]) -> dict[str, Any]:
        """Convert result type and apply its settings."""
        output_type = data.get("output_type")
        if isinstance(output_type, dict):
            # Extract response-specific settings
            tool_name = output_type.pop("result_tool_name", None)
            tool_description = output_type.pop("result_tool_description", None)
            retries = output_type.pop("output_retries", None)

            # Convert remaining dict to ResponseDefinition
            if "type" not in output_type["response_schema"]:
                output_type["response_schema"]["type"] = "inline"
            data["output_type"]["response_schema"] = InlineSchemaDef(**output_type)

            # Apply extracted settings to agent config
            if tool_name:
                data["result_tool_name"] = tool_name
            if tool_description:
                data["result_tool_description"] = tool_description
            if retries is not None:
                data["output_retries"] = retries

        return data

    @model_validator(mode="before")
    @classmethod
    def handle_model_types(cls, data: dict[str, Any]) -> dict[str, Any]:
        """Convert model inputs to appropriate format."""
        if isinstance((model := data.get("model")), str):
            data["model"] = {"type": "string", "identifier": model}
        return data

    def get_toolsets(self) -> list[ResourceProvider]:
        """Get all resource providers for this agent."""
        providers: list[ResourceProvider] = []

        # Add providers from toolsets
        for toolset_config in self.toolsets:
            try:
                provider = toolset_config.get_provider()
                providers.append(provider)
            except Exception as e:
                msg = "Failed to create provider for toolset"
                logger.exception(msg, toolset_config)
                raise ValueError(msg) from e

        return providers

    def get_tool_provider(self) -> ResourceProvider | None:
        """Get tool provider for this agent."""
        from llmling_agent.tools.base import Tool

        # Create provider for static tools
        if not self.tools:
            return None
        static_tools: list[Tool] = []
        for tool_config in self.tools:
            try:
                match tool_config:
                    case str():
                        if tool_config.startswith("crewai_tools"):
                            obj = import_class(tool_config)()
                            static_tools.append(Tool.from_crewai_tool(obj))
                        elif tool_config.startswith("langchain"):
                            obj = import_class(tool_config)()
                            static_tools.append(Tool.from_langchain_tool(obj))
                        else:
                            tool = Tool.from_callable(tool_config)
                            static_tools.append(tool)
                    case BaseToolConfig():
                        static_tools.append(tool_config.get_tool())
            except Exception:
                logger.exception("Failed to load tool", config=tool_config)
                continue

        return StaticResourceProvider(name="builtin", tools=static_tools)

    def get_session_config(self) -> MemoryConfig:
        """Get resolved memory configuration."""
        match self.session:
            case str() | UUID():
                return MemoryConfig(session=SessionQuery(name=str(self.session)))
            case SessionQuery():
                return MemoryConfig(session=self.session)
            case MemoryConfig():
                return self.session
            case None:
                return MemoryConfig()
            case _:
                msg = f"Invalid session configuration: {self.session}"
                raise ValueError(msg)

    def get_system_prompts(self) -> list[BasePrompt]:
        """Get all system prompts as BasePrompts."""
        from llmling_agent_config.system_prompts import (
            FilePromptConfig,
            FunctionPromptConfig,
            LibraryPromptConfig,
            StaticPromptConfig,
        )

        prompts: list[BasePrompt] = []
        for prompt in self.system_prompts:
            match prompt:
                case str():
                    # Convert string to StaticPrompt
                    static_prompt = StaticPrompt(
                        name="system",
                        description="System prompt",
                        messages=[PromptMessage(role="system", content=prompt)],
                    )
                    prompts.append(static_prompt)
                case StaticPromptConfig(content=content):
                    # Convert StaticPromptConfig to StaticPrompt
                    static_prompt = StaticPrompt(
                        name="system",
                        description="System prompt",
                        messages=[PromptMessage(role="system", content=content)],
                    )
                    prompts.append(static_prompt)
                case FilePromptConfig(path=path):
                    # Load and convert file-based prompt

                    template_path = Path(path)
                    if not template_path.is_absolute() and self.config_file_path:
                        base_path = Path(self.config_file_path).parent
                        template_path = base_path / path

                    template_content = template_path.read_text("utf-8")
                    # Create a template-based prompt
                    # (for now as StaticPrompt with placeholder)
                    static_prompt = StaticPrompt(
                        name="system",
                        description=f"File prompt: {path}",
                        messages=[PromptMessage(role="system", content=template_content)],
                    )
                    prompts.append(static_prompt)
                case LibraryPromptConfig(reference=reference):
                    # Create placeholder for library prompts (resolved by manifest)
                    msg = PromptMessage(role="system", content=f"[LIBRARY:{reference}]")
                    static_prompt = StaticPrompt(
                        name="system",
                        description=f"Library: {reference}",
                        messages=[msg],
                    )
                    prompts.append(static_prompt)
                case FunctionPromptConfig(arguments=arguments, function=function):
                    # Import and call the function to get prompt content
                    content = function(**arguments)
                    static_prompt = StaticPrompt(
                        name="system",
                        description=f"Function prompt: {function}",
                        messages=[PromptMessage(role="system", content=content)],
                    )
                    prompts.append(static_prompt)
                case BasePrompt():
                    prompts.append(prompt)
        return prompts

    def render_system_prompts(self, context: dict[str, Any] | None = None) -> list[str]:
        """Render system prompts with context."""
        from llmling_agent_config.system_prompts import (
            FilePromptConfig,
            FunctionPromptConfig,
            LibraryPromptConfig,
            StaticPromptConfig,
        )

        if not context:
            # Default context
            context = {"name": self.name, "id": 1, "model": self.model}

        rendered_prompts: list[str] = []
        for prompt in self.system_prompts:
            match prompt:
                case (str() as content) | StaticPromptConfig(content=content):
                    rendered_prompts.append(render_prompt(content, {"agent": context}))
                case FilePromptConfig(path=path, variables=variables):
                    # Load and render Jinja template from file

                    template_path = Path(path)
                    if not template_path.is_absolute() and self.config_file_path:
                        base_path = Path(self.config_file_path).parent
                        template_path = base_path / path

                    template_content = template_path.read_text("utf-8")
                    template_ctx = {"agent": context, **variables}
                    rendered_prompts.append(render_prompt(template_content, template_ctx))
                case LibraryPromptConfig(reference=reference):
                    # This will be handled by the manifest's get_agent method
                    # For now, just add a placeholder
                    rendered_prompts.append(f"[LIBRARY:{reference}]")
                case FunctionPromptConfig(function=function, arguments=arguments):
                    # Import and call the function to get prompt content
                    content = function(**arguments)
                    rendered_prompts.append(render_prompt(content, {"agent": context}))

        return rendered_prompts

avatar class-attribute instance-attribute

avatar: str | None = Field(
    default=None,
    examples=["https://example.com/avatar.png", "/assets/robot.jpg"],
    title="Avatar image",
)

URL or path to agent's avatar image

config_file_path class-attribute instance-attribute

config_file_path: str | None = Field(
    default=None,
    examples=["/path/to/config.yml", "configs/agent.yaml"],
    title="Configuration file path",
)

Config file path for resolving environment.

debug class-attribute instance-attribute

debug: bool = Field(default=False, title='Debug mode')

Enable debug output for this agent.

end_strategy class-attribute instance-attribute

end_strategy: EndStrategy = Field(
    default="early", examples=["early", "exhaust"], title="Tool execution strategy"
)

The strategy for handling multiple tool calls when a final result is found

inherits class-attribute instance-attribute

inherits: str | None = Field(default=None, title='Inheritance source')

Name of agent config to inherit from

knowledge class-attribute instance-attribute

knowledge: Knowledge | None = Field(
    default=None,
    title="Knowledge sources",
    json_schema_extra={
        "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/knowledge_configuration/"
    },
)

model class-attribute instance-attribute

model: str | ModelName | AnyModelConfig | None = Field(
    default=None,
    examples=["openai:gpt-5-nano"],
    title="Model configuration or name",
    json_schema_extra={
        "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/model_configuration/"
    },
)

The model to use for this agent. Can be either a simple model name string (e.g. 'openai:gpt-5') or a structured model definition.

Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/model_configuration/

output_retries class-attribute instance-attribute

output_retries: int | None = Field(default=None, examples=[1, 3], title='Output retries')

Max retries for result validation

output_type class-attribute instance-attribute

output_type: str | StructuredResponseConfig | None = Field(
    default=None,
    examples=["json_response", "code_output"],
    title="Response type",
    json_schema_extra={
        "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/response_configuration/"
    },
)

requires_tool_confirmation class-attribute instance-attribute

requires_tool_confirmation: ToolConfirmationMode = Field(
    default="per_tool", examples=["always", "never", "per_tool"], title="Tool confirmation mode"
)

How to handle tool confirmation: - "always": Always require confirmation for all tools - "never": Never require confirmation (ignore tool settings) - "per_tool": Use individual tool settings

retries class-attribute instance-attribute

retries: int = Field(default=1, ge=0, examples=[1, 3], title='Model retries')

Number of retries for failed operations (maps to pydantic-ai's retries)

session class-attribute instance-attribute

session: str | SessionQuery | MemoryConfig | None = Field(
    default=None,
    examples=["main_session", "user_123"],
    title="Session configuration",
    json_schema_extra={
        "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/session_configuration/"
    },
)

Session configuration for conversation recovery.

Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/session_configuration/

system_prompts class-attribute instance-attribute

system_prompts: Sequence[str | PromptConfig] = Field(
    default_factory=list,
    title="System prompts",
    examples=[["You are an AI assistant."]],
    json_schema_extra={
        "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/system_prompts_configuration/"
    },
)

System prompts for the agent. Can be strings or structured prompt configs.

Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/system_prompts_configuration/

tool_mode class-attribute instance-attribute

tool_mode: ToolMode | None = Field(default=None, examples=["codemode"], title="Tool execution mode")

Tool execution mode: - None: Default mode - tools are called directly - "codemode": Tools are wrapped in a Python execution environment

tools class-attribute instance-attribute

tools: list[ToolConfig | str] = Field(
    default_factory=list,
    examples=[
        ["webbrowser:open", "builtins:print"],
        [{"type": "import", "import_path": "webbrowser:open", "name": "web_browser"}],
    ],
    title="Tool configurations",
    json_schema_extra={
        "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/tool_configuration/"
    },
)

A list of tools to register with this agent.

Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/tool_configuration/

toolsets class-attribute instance-attribute

toolsets: list[ToolsetConfig] = Field(
    default_factory=list,
    examples=[
        [
            {"type": "openapi", "spec": "https://api.example.com/openapi.json", "namespace": "api"},
            {"type": "file_access"},
            {"type": "composio", "user_id": "user123@example.com", "toolsets": ["github", "slack"]},
        ]
    ],
    title="Toolset configurations",
    json_schema_extra={
        "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/toolset_configuration/"
    },
)

Toolset configurations for extensible tool collections.

Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/toolset_configuration/

usage_limits class-attribute instance-attribute

usage_limits: UsageLimits | None = Field(default=None, title='Usage limits')

Usage limits for this agent.

workers class-attribute instance-attribute

workers: list[WorkerConfig] = Field(
    default_factory=list,
    examples=[
        [{"type": "agent", "name": "web_agent", "reset_history_on_run": True}],
        [{"type": "team", "name": "analysis_team"}],
    ],
    title="Worker agents",
    json_schema_extra={
        "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/worker_configuration/"
    },
)

Worker agents which will be available as tools.

Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/worker_configuration/

get_session_config

get_session_config() -> MemoryConfig

Get resolved memory configuration.

Source code in src/llmling_agent/models/agents.py
538
539
540
541
542
543
544
545
546
547
548
549
550
551
def get_session_config(self) -> MemoryConfig:
    """Get resolved memory configuration."""
    match self.session:
        case str() | UUID():
            return MemoryConfig(session=SessionQuery(name=str(self.session)))
        case SessionQuery():
            return MemoryConfig(session=self.session)
        case MemoryConfig():
            return self.session
        case None:
            return MemoryConfig()
        case _:
            msg = f"Invalid session configuration: {self.session}"
            raise ValueError(msg)

get_system_prompts

get_system_prompts() -> list[BasePrompt]

Get all system prompts as BasePrompts.

Source code in src/llmling_agent/models/agents.py
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
def get_system_prompts(self) -> list[BasePrompt]:
    """Get all system prompts as BasePrompts."""
    from llmling_agent_config.system_prompts import (
        FilePromptConfig,
        FunctionPromptConfig,
        LibraryPromptConfig,
        StaticPromptConfig,
    )

    prompts: list[BasePrompt] = []
    for prompt in self.system_prompts:
        match prompt:
            case str():
                # Convert string to StaticPrompt
                static_prompt = StaticPrompt(
                    name="system",
                    description="System prompt",
                    messages=[PromptMessage(role="system", content=prompt)],
                )
                prompts.append(static_prompt)
            case StaticPromptConfig(content=content):
                # Convert StaticPromptConfig to StaticPrompt
                static_prompt = StaticPrompt(
                    name="system",
                    description="System prompt",
                    messages=[PromptMessage(role="system", content=content)],
                )
                prompts.append(static_prompt)
            case FilePromptConfig(path=path):
                # Load and convert file-based prompt

                template_path = Path(path)
                if not template_path.is_absolute() and self.config_file_path:
                    base_path = Path(self.config_file_path).parent
                    template_path = base_path / path

                template_content = template_path.read_text("utf-8")
                # Create a template-based prompt
                # (for now as StaticPrompt with placeholder)
                static_prompt = StaticPrompt(
                    name="system",
                    description=f"File prompt: {path}",
                    messages=[PromptMessage(role="system", content=template_content)],
                )
                prompts.append(static_prompt)
            case LibraryPromptConfig(reference=reference):
                # Create placeholder for library prompts (resolved by manifest)
                msg = PromptMessage(role="system", content=f"[LIBRARY:{reference}]")
                static_prompt = StaticPrompt(
                    name="system",
                    description=f"Library: {reference}",
                    messages=[msg],
                )
                prompts.append(static_prompt)
            case FunctionPromptConfig(arguments=arguments, function=function):
                # Import and call the function to get prompt content
                content = function(**arguments)
                static_prompt = StaticPrompt(
                    name="system",
                    description=f"Function prompt: {function}",
                    messages=[PromptMessage(role="system", content=content)],
                )
                prompts.append(static_prompt)
            case BasePrompt():
                prompts.append(prompt)
    return prompts

get_tool_provider

get_tool_provider() -> ResourceProvider | None

Get tool provider for this agent.

Source code in src/llmling_agent/models/agents.py
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
def get_tool_provider(self) -> ResourceProvider | None:
    """Get tool provider for this agent."""
    from llmling_agent.tools.base import Tool

    # Create provider for static tools
    if not self.tools:
        return None
    static_tools: list[Tool] = []
    for tool_config in self.tools:
        try:
            match tool_config:
                case str():
                    if tool_config.startswith("crewai_tools"):
                        obj = import_class(tool_config)()
                        static_tools.append(Tool.from_crewai_tool(obj))
                    elif tool_config.startswith("langchain"):
                        obj = import_class(tool_config)()
                        static_tools.append(Tool.from_langchain_tool(obj))
                    else:
                        tool = Tool.from_callable(tool_config)
                        static_tools.append(tool)
                case BaseToolConfig():
                    static_tools.append(tool_config.get_tool())
        except Exception:
            logger.exception("Failed to load tool", config=tool_config)
            continue

    return StaticResourceProvider(name="builtin", tools=static_tools)

get_toolsets

get_toolsets() -> list[ResourceProvider]

Get all resource providers for this agent.

Source code in src/llmling_agent/models/agents.py
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
def get_toolsets(self) -> list[ResourceProvider]:
    """Get all resource providers for this agent."""
    providers: list[ResourceProvider] = []

    # Add providers from toolsets
    for toolset_config in self.toolsets:
        try:
            provider = toolset_config.get_provider()
            providers.append(provider)
        except Exception as e:
            msg = "Failed to create provider for toolset"
            logger.exception(msg, toolset_config)
            raise ValueError(msg) from e

    return providers

handle_model_types classmethod

handle_model_types(data: dict[str, Any]) -> dict[str, Any]

Convert model inputs to appropriate format.

Source code in src/llmling_agent/models/agents.py
485
486
487
488
489
490
491
@model_validator(mode="before")
@classmethod
def handle_model_types(cls, data: dict[str, Any]) -> dict[str, Any]:
    """Convert model inputs to appropriate format."""
    if isinstance((model := data.get("model")), str):
        data["model"] = {"type": "string", "identifier": model}
    return data

is_structured

is_structured() -> bool

Check if this config defines a structured agent.

Source code in src/llmling_agent/models/agents.py
455
456
457
def is_structured(self) -> bool:
    """Check if this config defines a structured agent."""
    return self.output_type is not None

render_system_prompts

render_system_prompts(context: dict[str, Any] | None = None) -> list[str]

Render system prompts with context.

Source code in src/llmling_agent/models/agents.py
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
def render_system_prompts(self, context: dict[str, Any] | None = None) -> list[str]:
    """Render system prompts with context."""
    from llmling_agent_config.system_prompts import (
        FilePromptConfig,
        FunctionPromptConfig,
        LibraryPromptConfig,
        StaticPromptConfig,
    )

    if not context:
        # Default context
        context = {"name": self.name, "id": 1, "model": self.model}

    rendered_prompts: list[str] = []
    for prompt in self.system_prompts:
        match prompt:
            case (str() as content) | StaticPromptConfig(content=content):
                rendered_prompts.append(render_prompt(content, {"agent": context}))
            case FilePromptConfig(path=path, variables=variables):
                # Load and render Jinja template from file

                template_path = Path(path)
                if not template_path.is_absolute() and self.config_file_path:
                    base_path = Path(self.config_file_path).parent
                    template_path = base_path / path

                template_content = template_path.read_text("utf-8")
                template_ctx = {"agent": context, **variables}
                rendered_prompts.append(render_prompt(template_content, template_ctx))
            case LibraryPromptConfig(reference=reference):
                # This will be handled by the manifest's get_agent method
                # For now, just add a placeholder
                rendered_prompts.append(f"[LIBRARY:{reference}]")
            case FunctionPromptConfig(function=function, arguments=arguments):
                # Import and call the function to get prompt content
                content = function(**arguments)
                rendered_prompts.append(render_prompt(content, {"agent": context}))

    return rendered_prompts

validate_output_type classmethod

validate_output_type(data: dict[str, Any]) -> dict[str, Any]

Convert result type and apply its settings.

Source code in src/llmling_agent/models/agents.py
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
@model_validator(mode="before")
@classmethod
def validate_output_type(cls, data: dict[str, Any]) -> dict[str, Any]:
    """Convert result type and apply its settings."""
    output_type = data.get("output_type")
    if isinstance(output_type, dict):
        # Extract response-specific settings
        tool_name = output_type.pop("result_tool_name", None)
        tool_description = output_type.pop("result_tool_description", None)
        retries = output_type.pop("output_retries", None)

        # Convert remaining dict to ResponseDefinition
        if "type" not in output_type["response_schema"]:
            output_type["response_schema"]["type"] = "inline"
        data["output_type"]["response_schema"] = InlineSchemaDef(**output_type)

        # Apply extracted settings to agent config
        if tool_name:
            data["result_tool_name"] = tool_name
        if tool_description:
            data["result_tool_description"] = tool_description
        if retries is not None:
            data["output_retries"] = retries

    return data

Show source on GitHub