Skip to content

AgentConfig

Base classes

Name Children Inherits
NodeConfig
llmling_agent_config.nodes
Configuration for a Node of the messaging system.

⋔ Inheritance diagram

graph TD
  94096737880560["agents.AgentConfig"]
  94096737576992["nodes.NodeConfig"]
  94096720270928["schema.Schema"]
  94096710877664["main.BaseModel"]
  140089511254496["builtins.object"]
  94096737576992 --> 94096737880560
  94096720270928 --> 94096737576992
  94096710877664 --> 94096720270928
  140089511254496 --> 94096710877664

🛈 DocStrings

Bases: NodeConfig

Configuration for a single agent in the system.

Defines an agent's complete configuration including its model, environment, and behavior settings. Each agent can have its own: - Language model configuration - Environment setup (tools and resources) - Response type definitions - System prompts and default user prompts

The configuration can be loaded from YAML or created programmatically.

Source code in src/llmling_agent/models/agents.py
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
class AgentConfig(NodeConfig):
    """Configuration for a single agent in the system.

    Defines an agent's complete configuration including its model, environment,
    and behavior settings. Each agent can have its own:
    - Language model configuration
    - Environment setup (tools and resources)
    - Response type definitions
    - System prompts and default user prompts

    The configuration can be loaded from YAML or created programmatically.
    """

    inherits: str | None = None
    """Name of agent config to inherit from"""

    model: str | AnyModelConfig | None = None
    """The model to use for this agent. Can be either a simple model name
    string (e.g. 'openai:gpt-5') or a structured model definition."""

    tools: list[ToolConfig | str] = Field(default_factory=list)
    """A list of tools to register with this agent."""

    toolsets: list[ToolsetConfig] = Field(default_factory=list)
    """Toolset configurations for extensible tool collections."""

    session: str | SessionQuery | MemoryConfig | None = None
    """Session configuration for conversation recovery."""

    output_type: str | StructuredResponseConfig | None = None
    """Name of the response definition to use"""

    retries: int = 1
    """Number of retries for failed operations (maps to pydantic-ai's retries)"""

    result_tool_name: str = "final_result"
    """Name of the tool used for structured responses"""

    result_tool_description: str | None = None
    """Custom description for the result tool"""

    output_retries: int | None = None
    """Max retries for result validation"""

    end_strategy: EndStrategy = "early"
    """The strategy for handling multiple tool calls when a final result is found"""

    avatar: str | None = None
    """URL or path to agent's avatar image"""

    system_prompts: Sequence[str | PromptConfig] = Field(default_factory=list)
    """System prompts for the agent. Can be strings or structured prompt configs."""

    # context_sources: list[ContextSource] = Field(default_factory=list)
    # """Initial context sources to load"""

    config_file_path: str | None = None
    """Config file path for resolving environment."""

    knowledge: Knowledge | None = None
    """Knowledge sources for this agent."""

    workers: list[WorkerConfig] = Field(default_factory=list)
    """Worker agents which will be available as tools."""

    requires_tool_confirmation: ToolConfirmationMode = "per_tool"
    """How to handle tool confirmation:
    - "always": Always require confirmation for all tools
    - "never": Never require confirmation (ignore tool settings)
    - "per_tool": Use individual tool settings
    """

    debug: bool = False
    """Enable debug output for this agent."""

    usage_limits: UsageLimits | None = None
    """Usage limits for this agent."""

    def is_structured(self) -> bool:
        """Check if this config defines a structured agent."""
        return self.output_type is not None

    @model_validator(mode="before")
    @classmethod
    def validate_output_type(cls, data: dict[str, Any]) -> dict[str, Any]:
        """Convert result type and apply its settings."""
        output_type = data.get("output_type")
        if isinstance(output_type, dict):
            # Extract response-specific settings
            tool_name = output_type.pop("result_tool_name", None)
            tool_description = output_type.pop("result_tool_description", None)
            retries = output_type.pop("output_retries", None)

            # Convert remaining dict to ResponseDefinition
            if "type" not in output_type["response_schema"]:
                output_type["response_schema"]["type"] = "inline"
            data["output_type"]["response_schema"] = InlineSchemaDef(**output_type)

            # Apply extracted settings to agent config
            if tool_name:
                data["result_tool_name"] = tool_name
            if tool_description:
                data["result_tool_description"] = tool_description
            if retries is not None:
                data["output_retries"] = retries

        return data

    @model_validator(mode="before")
    @classmethod
    def handle_model_types(cls, data: dict[str, Any]) -> dict[str, Any]:
        """Convert model inputs to appropriate format."""
        if isinstance((model := data.get("model")), str):
            data["model"] = {"type": "string", "identifier": model}
        return data

    def get_toolsets(self) -> list[ResourceProvider]:
        """Get all resource providers for this agent."""
        providers: list[ResourceProvider] = []

        # Add providers from toolsets
        for toolset_config in self.toolsets:
            try:
                provider = toolset_config.get_provider()
                providers.append(provider)
            except Exception as e:
                msg = "Failed to create provider for toolset"
                logger.exception(msg, toolset_config)
                raise ValueError(msg) from e

        return providers

    def get_tool_provider(self) -> ResourceProvider | None:
        """Get tool provider for this agent."""
        from llmling_agent.tools.base import Tool

        # Create provider for static tools
        if not self.tools:
            return None
        static_tools: list[Tool] = []
        for tool_config in self.tools:
            try:
                match tool_config:
                    case str():
                        if tool_config.startswith("crewai_tools"):
                            obj = import_class(tool_config)()
                            static_tools.append(Tool.from_crewai_tool(obj))
                        elif tool_config.startswith("langchain"):
                            obj = import_class(tool_config)()
                            static_tools.append(Tool.from_langchain_tool(obj))
                        else:
                            tool = Tool.from_callable(tool_config)
                            static_tools.append(tool)
                    case BaseToolConfig():
                        static_tools.append(tool_config.get_tool())
            except Exception:
                logger.exception("Failed to load tool", config=tool_config)
                continue

        return StaticResourceProvider(name="builtin", tools=static_tools)

    def get_session_config(self) -> MemoryConfig:
        """Get resolved memory configuration."""
        match self.session:
            case str() | UUID():
                return MemoryConfig(session=SessionQuery(name=str(self.session)))
            case SessionQuery():
                return MemoryConfig(session=self.session)
            case MemoryConfig():
                return self.session
            case None:
                return MemoryConfig()
            case _:
                msg = f"Invalid session configuration: {self.session}"
                raise ValueError(msg)

    def get_system_prompts(self) -> list[BasePrompt]:
        """Get all system prompts as BasePrompts."""
        from llmling_agent_config.system_prompts import (
            FilePromptConfig,
            FunctionPromptConfig,
            LibraryPromptConfig,
            StaticPromptConfig,
        )

        prompts: list[BasePrompt] = []
        for prompt in self.system_prompts:
            match prompt:
                case str():
                    # Convert string to StaticPrompt
                    static_prompt = StaticPrompt(
                        name="system",
                        description="System prompt",
                        messages=[PromptMessage(role="system", content=prompt)],
                    )
                    prompts.append(static_prompt)
                case StaticPromptConfig(content=content):
                    # Convert StaticPromptConfig to StaticPrompt
                    static_prompt = StaticPrompt(
                        name="system",
                        description="System prompt",
                        messages=[PromptMessage(role="system", content=content)],
                    )
                    prompts.append(static_prompt)
                case FilePromptConfig(path=path):
                    # Load and convert file-based prompt

                    template_path = Path(path)
                    if not template_path.is_absolute() and self.config_file_path:
                        base_path = Path(self.config_file_path).parent
                        template_path = base_path / path

                    template_content = template_path.read_text("utf-8")
                    # Create a template-based prompt
                    # (for now as StaticPrompt with placeholder)
                    static_prompt = StaticPrompt(
                        name="system",
                        description=f"File prompt: {path}",
                        messages=[PromptMessage(role="system", content=template_content)],
                    )
                    prompts.append(static_prompt)
                case LibraryPromptConfig(reference=reference):
                    # Create placeholder for library prompts (resolved by manifest)
                    msg = PromptMessage(role="system", content=f"[LIBRARY:{reference}]")
                    static_prompt = StaticPrompt(
                        name="system",
                        description=f"Library: {reference}",
                        messages=[msg],
                    )
                    prompts.append(static_prompt)
                case FunctionPromptConfig(arguments=arguments, function=function):
                    # Import and call the function to get prompt content
                    content = function(**arguments)
                    static_prompt = StaticPrompt(
                        name="system",
                        description=f"Function prompt: {function}",
                        messages=[PromptMessage(role="system", content=content)],
                    )
                    prompts.append(static_prompt)
                case BasePrompt():
                    prompts.append(prompt)
        return prompts

    def render_system_prompts(self, context: dict[str, Any] | None = None) -> list[str]:
        """Render system prompts with context."""
        from llmling_agent_config.system_prompts import (
            FilePromptConfig,
            FunctionPromptConfig,
            LibraryPromptConfig,
            StaticPromptConfig,
        )

        if not context:
            # Default context
            context = {"name": self.name, "id": 1, "model": self.model}

        rendered_prompts: list[str] = []
        for prompt in self.system_prompts:
            match prompt:
                case (str() as content) | StaticPromptConfig(content=content):
                    rendered_prompts.append(render_prompt(content, {"agent": context}))
                case FilePromptConfig(path=path, variables=variables):
                    # Load and render Jinja template from file

                    template_path = Path(path)
                    if not template_path.is_absolute() and self.config_file_path:
                        base_path = Path(self.config_file_path).parent
                        template_path = base_path / path

                    template_content = template_path.read_text("utf-8")
                    template_ctx = {"agent": context, **variables}
                    rendered_prompts.append(render_prompt(template_content, template_ctx))
                case LibraryPromptConfig(reference=reference):
                    # This will be handled by the manifest's get_agent method
                    # For now, just add a placeholder
                    rendered_prompts.append(f"[LIBRARY:{reference}]")
                case FunctionPromptConfig(function=function, arguments=arguments):
                    # Import and call the function to get prompt content
                    content = function(**arguments)
                    rendered_prompts.append(render_prompt(content, {"agent": context}))

        return rendered_prompts

avatar class-attribute instance-attribute

avatar: str | None = None

URL or path to agent's avatar image

config_file_path class-attribute instance-attribute

config_file_path: str | None = None

Config file path for resolving environment.

debug class-attribute instance-attribute

debug: bool = False

Enable debug output for this agent.

end_strategy class-attribute instance-attribute

end_strategy: EndStrategy = 'early'

The strategy for handling multiple tool calls when a final result is found

inherits class-attribute instance-attribute

inherits: str | None = None

Name of agent config to inherit from

knowledge class-attribute instance-attribute

knowledge: Knowledge | None = None

Knowledge sources for this agent.

model class-attribute instance-attribute

model: str | AnyModelConfig | None = None

The model to use for this agent. Can be either a simple model name string (e.g. 'openai:gpt-5') or a structured model definition.

output_retries class-attribute instance-attribute

output_retries: int | None = None

Max retries for result validation

output_type class-attribute instance-attribute

output_type: str | StructuredResponseConfig | None = None

Name of the response definition to use

requires_tool_confirmation class-attribute instance-attribute

requires_tool_confirmation: ToolConfirmationMode = 'per_tool'

How to handle tool confirmation: - "always": Always require confirmation for all tools - "never": Never require confirmation (ignore tool settings) - "per_tool": Use individual tool settings

result_tool_description class-attribute instance-attribute

result_tool_description: str | None = None

Custom description for the result tool

result_tool_name class-attribute instance-attribute

result_tool_name: str = 'final_result'

Name of the tool used for structured responses

retries class-attribute instance-attribute

retries: int = 1

Number of retries for failed operations (maps to pydantic-ai's retries)

session class-attribute instance-attribute

session: str | SessionQuery | MemoryConfig | None = None

Session configuration for conversation recovery.

system_prompts class-attribute instance-attribute

system_prompts: Sequence[str | PromptConfig] = Field(default_factory=list)

System prompts for the agent. Can be strings or structured prompt configs.

tools class-attribute instance-attribute

tools: list[ToolConfig | str] = Field(default_factory=list)

A list of tools to register with this agent.

toolsets class-attribute instance-attribute

toolsets: list[ToolsetConfig] = Field(default_factory=list)

Toolset configurations for extensible tool collections.

usage_limits class-attribute instance-attribute

usage_limits: UsageLimits | None = None

Usage limits for this agent.

workers class-attribute instance-attribute

workers: list[WorkerConfig] = Field(default_factory=list)

Worker agents which will be available as tools.

get_session_config

get_session_config() -> MemoryConfig

Get resolved memory configuration.

Source code in src/llmling_agent/models/agents.py
202
203
204
205
206
207
208
209
210
211
212
213
214
215
def get_session_config(self) -> MemoryConfig:
    """Get resolved memory configuration."""
    match self.session:
        case str() | UUID():
            return MemoryConfig(session=SessionQuery(name=str(self.session)))
        case SessionQuery():
            return MemoryConfig(session=self.session)
        case MemoryConfig():
            return self.session
        case None:
            return MemoryConfig()
        case _:
            msg = f"Invalid session configuration: {self.session}"
            raise ValueError(msg)

get_system_prompts

get_system_prompts() -> list[BasePrompt]

Get all system prompts as BasePrompts.

Source code in src/llmling_agent/models/agents.py
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
def get_system_prompts(self) -> list[BasePrompt]:
    """Get all system prompts as BasePrompts."""
    from llmling_agent_config.system_prompts import (
        FilePromptConfig,
        FunctionPromptConfig,
        LibraryPromptConfig,
        StaticPromptConfig,
    )

    prompts: list[BasePrompt] = []
    for prompt in self.system_prompts:
        match prompt:
            case str():
                # Convert string to StaticPrompt
                static_prompt = StaticPrompt(
                    name="system",
                    description="System prompt",
                    messages=[PromptMessage(role="system", content=prompt)],
                )
                prompts.append(static_prompt)
            case StaticPromptConfig(content=content):
                # Convert StaticPromptConfig to StaticPrompt
                static_prompt = StaticPrompt(
                    name="system",
                    description="System prompt",
                    messages=[PromptMessage(role="system", content=content)],
                )
                prompts.append(static_prompt)
            case FilePromptConfig(path=path):
                # Load and convert file-based prompt

                template_path = Path(path)
                if not template_path.is_absolute() and self.config_file_path:
                    base_path = Path(self.config_file_path).parent
                    template_path = base_path / path

                template_content = template_path.read_text("utf-8")
                # Create a template-based prompt
                # (for now as StaticPrompt with placeholder)
                static_prompt = StaticPrompt(
                    name="system",
                    description=f"File prompt: {path}",
                    messages=[PromptMessage(role="system", content=template_content)],
                )
                prompts.append(static_prompt)
            case LibraryPromptConfig(reference=reference):
                # Create placeholder for library prompts (resolved by manifest)
                msg = PromptMessage(role="system", content=f"[LIBRARY:{reference}]")
                static_prompt = StaticPrompt(
                    name="system",
                    description=f"Library: {reference}",
                    messages=[msg],
                )
                prompts.append(static_prompt)
            case FunctionPromptConfig(arguments=arguments, function=function):
                # Import and call the function to get prompt content
                content = function(**arguments)
                static_prompt = StaticPrompt(
                    name="system",
                    description=f"Function prompt: {function}",
                    messages=[PromptMessage(role="system", content=content)],
                )
                prompts.append(static_prompt)
            case BasePrompt():
                prompts.append(prompt)
    return prompts

get_tool_provider

get_tool_provider() -> ResourceProvider | None

Get tool provider for this agent.

Source code in src/llmling_agent/models/agents.py
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
def get_tool_provider(self) -> ResourceProvider | None:
    """Get tool provider for this agent."""
    from llmling_agent.tools.base import Tool

    # Create provider for static tools
    if not self.tools:
        return None
    static_tools: list[Tool] = []
    for tool_config in self.tools:
        try:
            match tool_config:
                case str():
                    if tool_config.startswith("crewai_tools"):
                        obj = import_class(tool_config)()
                        static_tools.append(Tool.from_crewai_tool(obj))
                    elif tool_config.startswith("langchain"):
                        obj = import_class(tool_config)()
                        static_tools.append(Tool.from_langchain_tool(obj))
                    else:
                        tool = Tool.from_callable(tool_config)
                        static_tools.append(tool)
                case BaseToolConfig():
                    static_tools.append(tool_config.get_tool())
        except Exception:
            logger.exception("Failed to load tool", config=tool_config)
            continue

    return StaticResourceProvider(name="builtin", tools=static_tools)

get_toolsets

get_toolsets() -> list[ResourceProvider]

Get all resource providers for this agent.

Source code in src/llmling_agent/models/agents.py
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
def get_toolsets(self) -> list[ResourceProvider]:
    """Get all resource providers for this agent."""
    providers: list[ResourceProvider] = []

    # Add providers from toolsets
    for toolset_config in self.toolsets:
        try:
            provider = toolset_config.get_provider()
            providers.append(provider)
        except Exception as e:
            msg = "Failed to create provider for toolset"
            logger.exception(msg, toolset_config)
            raise ValueError(msg) from e

    return providers

handle_model_types classmethod

handle_model_types(data: dict[str, Any]) -> dict[str, Any]

Convert model inputs to appropriate format.

Source code in src/llmling_agent/models/agents.py
149
150
151
152
153
154
155
@model_validator(mode="before")
@classmethod
def handle_model_types(cls, data: dict[str, Any]) -> dict[str, Any]:
    """Convert model inputs to appropriate format."""
    if isinstance((model := data.get("model")), str):
        data["model"] = {"type": "string", "identifier": model}
    return data

is_structured

is_structured() -> bool

Check if this config defines a structured agent.

Source code in src/llmling_agent/models/agents.py
119
120
121
def is_structured(self) -> bool:
    """Check if this config defines a structured agent."""
    return self.output_type is not None

render_system_prompts

render_system_prompts(context: dict[str, Any] | None = None) -> list[str]

Render system prompts with context.

Source code in src/llmling_agent/models/agents.py
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
def render_system_prompts(self, context: dict[str, Any] | None = None) -> list[str]:
    """Render system prompts with context."""
    from llmling_agent_config.system_prompts import (
        FilePromptConfig,
        FunctionPromptConfig,
        LibraryPromptConfig,
        StaticPromptConfig,
    )

    if not context:
        # Default context
        context = {"name": self.name, "id": 1, "model": self.model}

    rendered_prompts: list[str] = []
    for prompt in self.system_prompts:
        match prompt:
            case (str() as content) | StaticPromptConfig(content=content):
                rendered_prompts.append(render_prompt(content, {"agent": context}))
            case FilePromptConfig(path=path, variables=variables):
                # Load and render Jinja template from file

                template_path = Path(path)
                if not template_path.is_absolute() and self.config_file_path:
                    base_path = Path(self.config_file_path).parent
                    template_path = base_path / path

                template_content = template_path.read_text("utf-8")
                template_ctx = {"agent": context, **variables}
                rendered_prompts.append(render_prompt(template_content, template_ctx))
            case LibraryPromptConfig(reference=reference):
                # This will be handled by the manifest's get_agent method
                # For now, just add a placeholder
                rendered_prompts.append(f"[LIBRARY:{reference}]")
            case FunctionPromptConfig(function=function, arguments=arguments):
                # Import and call the function to get prompt content
                content = function(**arguments)
                rendered_prompts.append(render_prompt(content, {"agent": context}))

    return rendered_prompts

validate_output_type classmethod

validate_output_type(data: dict[str, Any]) -> dict[str, Any]

Convert result type and apply its settings.

Source code in src/llmling_agent/models/agents.py
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
@model_validator(mode="before")
@classmethod
def validate_output_type(cls, data: dict[str, Any]) -> dict[str, Any]:
    """Convert result type and apply its settings."""
    output_type = data.get("output_type")
    if isinstance(output_type, dict):
        # Extract response-specific settings
        tool_name = output_type.pop("result_tool_name", None)
        tool_description = output_type.pop("result_tool_description", None)
        retries = output_type.pop("output_retries", None)

        # Convert remaining dict to ResponseDefinition
        if "type" not in output_type["response_schema"]:
            output_type["response_schema"]["type"] = "inline"
        data["output_type"]["response_schema"] = InlineSchemaDef(**output_type)

        # Apply extracted settings to agent config
        if tool_name:
            data["result_tool_name"] = tool_name
        if tool_description:
            data["result_tool_description"] = tool_description
        if retries is not None:
            data["output_retries"] = retries

    return data

Show source on GitHub