Skip to content

AgentConfig

Base classes

Name Children Inherits
NodeConfig
llmling_agent_config.nodes
Configuration for a Node of the messaging system.

⋔ Inheritance diagram

graph TD
  94701635409600["agents.AgentConfig"]
  94701622670944["nodes.NodeConfig"]
  94701626040304["schema.Schema"]
  94701612203136["main.BaseModel"]
  139765536922080["builtins.object"]
  94701622670944 --> 94701635409600
  94701626040304 --> 94701622670944
  94701612203136 --> 94701626040304
  139765536922080 --> 94701612203136

🛈 DocStrings

Bases: NodeConfig

Configuration for a single agent in the system.

Defines an agent's complete configuration including its model, environment, capabilities, and behavior settings. Each agent can have its own: - Language model configuration - Environment setup (tools and resources) - Response type definitions - System prompts and default user prompts - Role-based capabilities

The configuration can be loaded from YAML or created programmatically.

Source code in src/llmling_agent/models/agents.py
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
class AgentConfig(NodeConfig):
    """Configuration for a single agent in the system.

    Defines an agent's complete configuration including its model, environment,
    capabilities, and behavior settings. Each agent can have its own:
    - Language model configuration
    - Environment setup (tools and resources)
    - Response type definitions
    - System prompts and default user prompts
    - Role-based capabilities

    The configuration can be loaded from YAML or created programmatically.
    """

    provider: ProviderConfig | ProviderName = "pydantic_ai"
    """Provider configuration or shorthand type"""

    inherits: str | None = None
    """Name of agent config to inherit from"""

    model: str | AnyModelConfig | None = None
    """The model to use for this agent. Can be either a simple model name
    string (e.g. 'openai:gpt-5') or a structured model definition."""

    tools: list[ToolConfig | str] = Field(default_factory=list)
    """A list of tools to register with this agent."""

    toolsets: list[ToolsetConfig] = Field(default_factory=list)
    """Toolset configurations for extensible tool collections."""

    environment: str | AgentEnvironment | None = None
    """Environments configuration (path or object)"""

    capabilities: Capabilities = Field(default_factory=Capabilities)
    """Current agent's capabilities."""

    session: str | SessionQuery | MemoryConfig | None = None
    """Session configuration for conversation recovery."""

    result_type: str | StructuredResponseConfig | None = None
    """Name of the response definition to use"""

    retries: int = 1
    """Number of retries for failed operations (maps to pydantic-ai's retries)"""

    result_tool_name: str = "final_result"
    """Name of the tool used for structured responses"""

    result_tool_description: str | None = None
    """Custom description for the result tool"""

    output_retries: int | None = None
    """Max retries for result validation"""

    end_strategy: EndStrategy = "early"
    """The strategy for handling multiple tool calls when a final result is found"""

    avatar: str | None = None
    """URL or path to agent's avatar image"""

    system_prompts: Sequence[str | PromptConfig] = Field(default_factory=list)
    """System prompts for the agent. Can be strings or structured prompt configs."""

    user_prompts: list[str] = Field(default_factory=list)
    """Default user prompts for the agent"""

    # context_sources: list[ContextSource] = Field(default_factory=list)
    # """Initial context sources to load"""

    config_file_path: str | None = None
    """Config file path for resolving environment."""

    knowledge: Knowledge | None = None
    """Knowledge sources for this agent."""

    workers: list[WorkerConfig] = Field(default_factory=list)
    """Worker agents which will be available as tools."""

    requires_tool_confirmation: ToolConfirmationMode = "per_tool"
    """How to handle tool confirmation:
    - "always": Always require confirmation for all tools
    - "never": Never require confirmation (ignore tool settings)
    - "per_tool": Use individual tool settings
    """

    debug: bool = False
    """Enable debug output for this agent."""

    def is_structured(self) -> bool:
        """Check if this config defines a structured agent."""
        return self.result_type is not None

    @model_validator(mode="before")
    @classmethod
    def validate_result_type(cls, data: dict[str, Any]) -> dict[str, Any]:
        """Convert result type and apply its settings."""
        result_type = data.get("result_type")
        if isinstance(result_type, dict):
            # Extract response-specific settings
            tool_name = result_type.pop("result_tool_name", None)
            tool_description = result_type.pop("result_tool_description", None)
            retries = result_type.pop("output_retries", None)

            # Convert remaining dict to ResponseDefinition
            if "type" not in result_type["response_schema"]:
                result_type["response_schema"]["type"] = "inline"
            data["result_type"]["response_schema"] = InlineSchemaDef(**result_type)

            # Apply extracted settings to agent config
            if tool_name:
                data["result_tool_name"] = tool_name
            if tool_description:
                data["result_tool_description"] = tool_description
            if retries is not None:
                data["output_retries"] = retries

        return data

    @model_validator(mode="before")
    @classmethod
    def handle_model_types(cls, data: dict[str, Any]) -> dict[str, Any]:
        """Convert model inputs to appropriate format."""
        model = data.get("model")
        match model:
            case str():
                data["model"] = {"type": "string", "identifier": model}
        return data

    async def get_toolsets(self) -> list[ResourceProvider]:
        """Get all resource providers for this agent."""
        providers: list[ResourceProvider] = []

        # Add providers from toolsets
        for toolset_config in self.toolsets:
            try:
                provider = toolset_config.get_provider()
                providers.append(provider)
            except Exception as e:
                logger.exception(
                    "Failed to create provider for toolset: %r", toolset_config
                )
                msg = f"Failed to create provider for toolset: {e}"
                raise ValueError(msg) from e

        return providers

    def get_tool_provider(self) -> ResourceProvider | None:
        """Get tool provider for this agent."""
        from llmling_agent.tools.base import Tool

        # Create provider for static tools
        if not self.tools:
            return None
        static_tools: list[Tool] = []
        for tool_config in self.tools:
            try:
                match tool_config:
                    case str():
                        if tool_config.startswith("crewai_tools"):
                            obj = import_class(tool_config)()
                            static_tools.append(Tool.from_crewai_tool(obj))
                        elif tool_config.startswith("langchain"):
                            obj = import_class(tool_config)()
                            static_tools.append(Tool.from_langchain_tool(obj))
                        else:
                            tool = Tool.from_callable(tool_config)
                            static_tools.append(tool)
                    case BaseToolConfig():
                        static_tools.append(tool_config.get_tool())
            except Exception:
                logger.exception("Failed to load tool %r", tool_config)
                continue

        return StaticResourceProvider(name="builtin", tools=static_tools)

    def get_session_config(self) -> MemoryConfig:
        """Get resolved memory configuration."""
        match self.session:
            case str() | UUID():
                return MemoryConfig(session=SessionQuery(name=str(self.session)))
            case SessionQuery():
                return MemoryConfig(session=self.session)
            case MemoryConfig():
                return self.session
            case None:
                return MemoryConfig()
            case _:
                msg = f"Invalid session configuration: {self.session}"
                raise ValueError(msg)

    def get_system_prompts(self) -> list[BasePrompt]:
        """Get all system prompts as BasePrompts."""
        from llmling_agent_config.system_prompts import (
            FilePromptConfig,
            FunctionPromptConfig,
            LibraryPromptConfig,
            StaticPromptConfig,
        )

        prompts: list[BasePrompt] = []
        for prompt in self.system_prompts:
            match prompt:
                case str():
                    # Convert string to StaticPrompt
                    static_prompt = StaticPrompt(
                        name="system",
                        description="System prompt",
                        messages=[PromptMessage(role="system", content=prompt)],
                    )
                    prompts.append(static_prompt)
                case StaticPromptConfig():
                    # Convert StaticPromptConfig to StaticPrompt
                    static_prompt = StaticPrompt(
                        name="system",
                        description="System prompt",
                        messages=[PromptMessage(role="system", content=prompt.content)],
                    )
                    prompts.append(static_prompt)
                case FilePromptConfig():
                    # Load and convert file-based prompt
                    from pathlib import Path

                    template_path = Path(prompt.path)
                    if not template_path.is_absolute() and self.config_file_path:
                        base_path = Path(self.config_file_path).parent
                        template_path = base_path / prompt.path

                    template_content = template_path.read_text()
                    # Create a template-based prompt
                    # (for now as StaticPrompt with placeholder)
                    static_prompt = StaticPrompt(
                        name="system",
                        description=f"File prompt: {prompt.path}",
                        messages=[PromptMessage(role="system", content=template_content)],
                    )
                    prompts.append(static_prompt)
                case LibraryPromptConfig():
                    # Create placeholder for library prompts (resolved by manifest)
                    static_prompt = StaticPrompt(
                        name="system",
                        description=f"Library: {prompt.reference}",
                        messages=[
                            PromptMessage(
                                role="system",
                                content=f"[LIBRARY:{prompt.reference}]",
                            )
                        ],
                    )
                    prompts.append(static_prompt)
                case FunctionPromptConfig():
                    # Import and call the function to get prompt content
                    func = prompt.function
                    content = func(**prompt.arguments)
                    static_prompt = StaticPrompt(
                        name="system",
                        description=f"Function prompt: {prompt.function}",
                        messages=[PromptMessage(role="system", content=content)],
                    )
                    prompts.append(static_prompt)
                case BasePrompt():
                    prompts.append(prompt)
        return prompts

    def get_provider(self) -> AgentProvider:
        """Get resolved provider instance.

        Creates provider instance based on configuration:
        - Full provider config: Use as-is
        - Shorthand type: Create default provider config
        """
        # If string shorthand is used, convert to default provider config
        from llmling_agent_config.providers import (
            CallbackProviderConfig,
            HumanProviderConfig,
            PydanticAIProviderConfig,
        )

        provider_config = self.provider
        if isinstance(provider_config, str):
            match provider_config:
                case "pydantic_ai":
                    provider_config = PydanticAIProviderConfig(model=self.model)
                case "human":
                    provider_config = HumanProviderConfig()
                case _:
                    try:
                        fn = import_callable(provider_config)
                        provider_config = CallbackProviderConfig(callback=fn)
                    except Exception:  # noqa: BLE001
                        msg = f"Invalid provider type: {provider_config}"
                        raise ValueError(msg)  # noqa: B904

        # Create provider instance from config
        return provider_config.get_provider()

    def render_system_prompts(self, context: dict[str, Any] | None = None) -> list[str]:
        """Render system prompts with context."""
        from llmling_agent_config.system_prompts import (
            FilePromptConfig,
            FunctionPromptConfig,
            LibraryPromptConfig,
            StaticPromptConfig,
        )

        if not context:
            # Default context
            context = {"name": self.name, "id": 1, "model": self.model}

        rendered_prompts: list[str] = []
        for prompt in self.system_prompts:
            match prompt:
                case str():
                    rendered_prompts.append(render_prompt(prompt, {"agent": context}))
                case StaticPromptConfig():
                    rendered_prompts.append(
                        render_prompt(prompt.content, {"agent": context})
                    )
                case FilePromptConfig():
                    # Load and render Jinja template from file
                    from pathlib import Path

                    template_path = Path(prompt.path)
                    if not template_path.is_absolute() and self.config_file_path:
                        base_path = Path(self.config_file_path).parent
                        template_path = base_path / prompt.path

                    template_content = template_path.read_text()
                    template_context = {"agent": context, **prompt.variables}
                    rendered_prompts.append(
                        render_prompt(template_content, template_context)
                    )
                case LibraryPromptConfig():
                    # This will be handled by the manifest's get_agent method
                    # For now, just add a placeholder
                    rendered_prompts.append(f"[LIBRARY:{prompt.reference}]")
                case FunctionPromptConfig():
                    # Import and call the function to get prompt content
                    func = prompt.function
                    content = func(**prompt.arguments)
                    rendered_prompts.append(render_prompt(content, {"agent": context}))

        return rendered_prompts

    def get_config(self) -> Config:
        """Get configuration for this agent."""
        match self.environment:
            case None:
                # Create minimal config
                caps = LLMCapabilitiesConfig()
                global_settings = GlobalSettings(llm_capabilities=caps)
                return Config(global_settings=global_settings)
            case str() as path:
                # Backward compatibility: treat as file path
                resolved = self._resolve_environment_path(path, self.config_file_path)
                return Config.from_file(resolved)
            case FileEnvironment(uri=uri) as env:
                # Handle FileEnvironment instance
                resolved = env.get_file_path()
                return Config.from_file(resolved)
            case {"type": "file", "uri": uri}:
                # Handle raw dict matching file environment structure
                return Config.from_file(uri)
            case {"type": "inline", "config": config}:
                return config
            case InlineEnvironment() as config:
                return config
            case _:
                msg = f"Invalid environment configuration: {self.environment}"
                raise ValueError(msg)

    def get_environment_path(self) -> str | None:
        """Get environment file path if available."""
        match self.environment:
            case str() as path:
                return self._resolve_environment_path(path, self.config_file_path)
            case {"type": "file", "uri": uri} | FileEnvironment(uri=uri):
                return uri
            case _:
                return None

    @staticmethod
    def _resolve_environment_path(env: str, config_file_path: str | None = None) -> str:
        """Resolve environment path from config store or relative path."""
        from upath import UPath

        try:
            config_store = ConfigStore()
            return config_store.get_config(env)
        except KeyError:
            if config_file_path:
                base_dir = UPath(config_file_path).parent
                return str(base_dir / env)
            return env

avatar class-attribute instance-attribute

avatar: str | None = None

URL or path to agent's avatar image

capabilities class-attribute instance-attribute

capabilities: Capabilities = Field(default_factory=Capabilities)

Current agent's capabilities.

config_file_path class-attribute instance-attribute

config_file_path: str | None = None

Config file path for resolving environment.

debug class-attribute instance-attribute

debug: bool = False

Enable debug output for this agent.

end_strategy class-attribute instance-attribute

end_strategy: EndStrategy = 'early'

The strategy for handling multiple tool calls when a final result is found

environment class-attribute instance-attribute

environment: str | AgentEnvironment | None = None

Environments configuration (path or object)

inherits class-attribute instance-attribute

inherits: str | None = None

Name of agent config to inherit from

knowledge class-attribute instance-attribute

knowledge: Knowledge | None = None

Knowledge sources for this agent.

model class-attribute instance-attribute

model: str | AnyModelConfig | None = None

The model to use for this agent. Can be either a simple model name string (e.g. 'openai:gpt-5') or a structured model definition.

output_retries class-attribute instance-attribute

output_retries: int | None = None

Max retries for result validation

provider class-attribute instance-attribute

provider: ProviderConfig | ProviderName = 'pydantic_ai'

Provider configuration or shorthand type

requires_tool_confirmation class-attribute instance-attribute

requires_tool_confirmation: ToolConfirmationMode = 'per_tool'

How to handle tool confirmation: - "always": Always require confirmation for all tools - "never": Never require confirmation (ignore tool settings) - "per_tool": Use individual tool settings

result_tool_description class-attribute instance-attribute

result_tool_description: str | None = None

Custom description for the result tool

result_tool_name class-attribute instance-attribute

result_tool_name: str = 'final_result'

Name of the tool used for structured responses

result_type class-attribute instance-attribute

result_type: str | StructuredResponseConfig | None = None

Name of the response definition to use

retries class-attribute instance-attribute

retries: int = 1

Number of retries for failed operations (maps to pydantic-ai's retries)

session class-attribute instance-attribute

session: str | SessionQuery | MemoryConfig | None = None

Session configuration for conversation recovery.

system_prompts class-attribute instance-attribute

system_prompts: Sequence[str | PromptConfig] = Field(default_factory=list)

System prompts for the agent. Can be strings or structured prompt configs.

tools class-attribute instance-attribute

tools: list[ToolConfig | str] = Field(default_factory=list)

A list of tools to register with this agent.

toolsets class-attribute instance-attribute

toolsets: list[ToolsetConfig] = Field(default_factory=list)

Toolset configurations for extensible tool collections.

user_prompts class-attribute instance-attribute

user_prompts: list[str] = Field(default_factory=list)

Default user prompts for the agent

workers class-attribute instance-attribute

workers: list[WorkerConfig] = Field(default_factory=list)

Worker agents which will be available as tools.

get_config

get_config() -> Config

Get configuration for this agent.

Source code in src/llmling_agent/models/agents.py
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
def get_config(self) -> Config:
    """Get configuration for this agent."""
    match self.environment:
        case None:
            # Create minimal config
            caps = LLMCapabilitiesConfig()
            global_settings = GlobalSettings(llm_capabilities=caps)
            return Config(global_settings=global_settings)
        case str() as path:
            # Backward compatibility: treat as file path
            resolved = self._resolve_environment_path(path, self.config_file_path)
            return Config.from_file(resolved)
        case FileEnvironment(uri=uri) as env:
            # Handle FileEnvironment instance
            resolved = env.get_file_path()
            return Config.from_file(resolved)
        case {"type": "file", "uri": uri}:
            # Handle raw dict matching file environment structure
            return Config.from_file(uri)
        case {"type": "inline", "config": config}:
            return config
        case InlineEnvironment() as config:
            return config
        case _:
            msg = f"Invalid environment configuration: {self.environment}"
            raise ValueError(msg)

get_environment_path

get_environment_path() -> str | None

Get environment file path if available.

Source code in src/llmling_agent/models/agents.py
426
427
428
429
430
431
432
433
434
def get_environment_path(self) -> str | None:
    """Get environment file path if available."""
    match self.environment:
        case str() as path:
            return self._resolve_environment_path(path, self.config_file_path)
        case {"type": "file", "uri": uri} | FileEnvironment(uri=uri):
            return uri
        case _:
            return None

get_provider

get_provider() -> AgentProvider

Get resolved provider instance.

Creates provider instance based on configuration: - Full provider config: Use as-is - Shorthand type: Create default provider config

Source code in src/llmling_agent/models/agents.py
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
def get_provider(self) -> AgentProvider:
    """Get resolved provider instance.

    Creates provider instance based on configuration:
    - Full provider config: Use as-is
    - Shorthand type: Create default provider config
    """
    # If string shorthand is used, convert to default provider config
    from llmling_agent_config.providers import (
        CallbackProviderConfig,
        HumanProviderConfig,
        PydanticAIProviderConfig,
    )

    provider_config = self.provider
    if isinstance(provider_config, str):
        match provider_config:
            case "pydantic_ai":
                provider_config = PydanticAIProviderConfig(model=self.model)
            case "human":
                provider_config = HumanProviderConfig()
            case _:
                try:
                    fn = import_callable(provider_config)
                    provider_config = CallbackProviderConfig(callback=fn)
                except Exception:  # noqa: BLE001
                    msg = f"Invalid provider type: {provider_config}"
                    raise ValueError(msg)  # noqa: B904

    # Create provider instance from config
    return provider_config.get_provider()

get_session_config

get_session_config() -> MemoryConfig

Get resolved memory configuration.

Source code in src/llmling_agent/models/agents.py
231
232
233
234
235
236
237
238
239
240
241
242
243
244
def get_session_config(self) -> MemoryConfig:
    """Get resolved memory configuration."""
    match self.session:
        case str() | UUID():
            return MemoryConfig(session=SessionQuery(name=str(self.session)))
        case SessionQuery():
            return MemoryConfig(session=self.session)
        case MemoryConfig():
            return self.session
        case None:
            return MemoryConfig()
        case _:
            msg = f"Invalid session configuration: {self.session}"
            raise ValueError(msg)

get_system_prompts

get_system_prompts() -> list[BasePrompt]

Get all system prompts as BasePrompts.

Source code in src/llmling_agent/models/agents.py
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
def get_system_prompts(self) -> list[BasePrompt]:
    """Get all system prompts as BasePrompts."""
    from llmling_agent_config.system_prompts import (
        FilePromptConfig,
        FunctionPromptConfig,
        LibraryPromptConfig,
        StaticPromptConfig,
    )

    prompts: list[BasePrompt] = []
    for prompt in self.system_prompts:
        match prompt:
            case str():
                # Convert string to StaticPrompt
                static_prompt = StaticPrompt(
                    name="system",
                    description="System prompt",
                    messages=[PromptMessage(role="system", content=prompt)],
                )
                prompts.append(static_prompt)
            case StaticPromptConfig():
                # Convert StaticPromptConfig to StaticPrompt
                static_prompt = StaticPrompt(
                    name="system",
                    description="System prompt",
                    messages=[PromptMessage(role="system", content=prompt.content)],
                )
                prompts.append(static_prompt)
            case FilePromptConfig():
                # Load and convert file-based prompt
                from pathlib import Path

                template_path = Path(prompt.path)
                if not template_path.is_absolute() and self.config_file_path:
                    base_path = Path(self.config_file_path).parent
                    template_path = base_path / prompt.path

                template_content = template_path.read_text()
                # Create a template-based prompt
                # (for now as StaticPrompt with placeholder)
                static_prompt = StaticPrompt(
                    name="system",
                    description=f"File prompt: {prompt.path}",
                    messages=[PromptMessage(role="system", content=template_content)],
                )
                prompts.append(static_prompt)
            case LibraryPromptConfig():
                # Create placeholder for library prompts (resolved by manifest)
                static_prompt = StaticPrompt(
                    name="system",
                    description=f"Library: {prompt.reference}",
                    messages=[
                        PromptMessage(
                            role="system",
                            content=f"[LIBRARY:{prompt.reference}]",
                        )
                    ],
                )
                prompts.append(static_prompt)
            case FunctionPromptConfig():
                # Import and call the function to get prompt content
                func = prompt.function
                content = func(**prompt.arguments)
                static_prompt = StaticPrompt(
                    name="system",
                    description=f"Function prompt: {prompt.function}",
                    messages=[PromptMessage(role="system", content=content)],
                )
                prompts.append(static_prompt)
            case BasePrompt():
                prompts.append(prompt)
    return prompts

get_tool_provider

get_tool_provider() -> ResourceProvider | None

Get tool provider for this agent.

Source code in src/llmling_agent/models/agents.py
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
def get_tool_provider(self) -> ResourceProvider | None:
    """Get tool provider for this agent."""
    from llmling_agent.tools.base import Tool

    # Create provider for static tools
    if not self.tools:
        return None
    static_tools: list[Tool] = []
    for tool_config in self.tools:
        try:
            match tool_config:
                case str():
                    if tool_config.startswith("crewai_tools"):
                        obj = import_class(tool_config)()
                        static_tools.append(Tool.from_crewai_tool(obj))
                    elif tool_config.startswith("langchain"):
                        obj = import_class(tool_config)()
                        static_tools.append(Tool.from_langchain_tool(obj))
                    else:
                        tool = Tool.from_callable(tool_config)
                        static_tools.append(tool)
                case BaseToolConfig():
                    static_tools.append(tool_config.get_tool())
        except Exception:
            logger.exception("Failed to load tool %r", tool_config)
            continue

    return StaticResourceProvider(name="builtin", tools=static_tools)

get_toolsets async

get_toolsets() -> list[ResourceProvider]

Get all resource providers for this agent.

Source code in src/llmling_agent/models/agents.py
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
async def get_toolsets(self) -> list[ResourceProvider]:
    """Get all resource providers for this agent."""
    providers: list[ResourceProvider] = []

    # Add providers from toolsets
    for toolset_config in self.toolsets:
        try:
            provider = toolset_config.get_provider()
            providers.append(provider)
        except Exception as e:
            logger.exception(
                "Failed to create provider for toolset: %r", toolset_config
            )
            msg = f"Failed to create provider for toolset: {e}"
            raise ValueError(msg) from e

    return providers

handle_model_types classmethod

handle_model_types(data: dict[str, Any]) -> dict[str, Any]

Convert model inputs to appropriate format.

Source code in src/llmling_agent/models/agents.py
174
175
176
177
178
179
180
181
182
@model_validator(mode="before")
@classmethod
def handle_model_types(cls, data: dict[str, Any]) -> dict[str, Any]:
    """Convert model inputs to appropriate format."""
    model = data.get("model")
    match model:
        case str():
            data["model"] = {"type": "string", "identifier": model}
    return data

is_structured

is_structured() -> bool

Check if this config defines a structured agent.

Source code in src/llmling_agent/models/agents.py
144
145
146
def is_structured(self) -> bool:
    """Check if this config defines a structured agent."""
    return self.result_type is not None

render_system_prompts

render_system_prompts(context: dict[str, Any] | None = None) -> list[str]

Render system prompts with context.

Source code in src/llmling_agent/models/agents.py
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
def render_system_prompts(self, context: dict[str, Any] | None = None) -> list[str]:
    """Render system prompts with context."""
    from llmling_agent_config.system_prompts import (
        FilePromptConfig,
        FunctionPromptConfig,
        LibraryPromptConfig,
        StaticPromptConfig,
    )

    if not context:
        # Default context
        context = {"name": self.name, "id": 1, "model": self.model}

    rendered_prompts: list[str] = []
    for prompt in self.system_prompts:
        match prompt:
            case str():
                rendered_prompts.append(render_prompt(prompt, {"agent": context}))
            case StaticPromptConfig():
                rendered_prompts.append(
                    render_prompt(prompt.content, {"agent": context})
                )
            case FilePromptConfig():
                # Load and render Jinja template from file
                from pathlib import Path

                template_path = Path(prompt.path)
                if not template_path.is_absolute() and self.config_file_path:
                    base_path = Path(self.config_file_path).parent
                    template_path = base_path / prompt.path

                template_content = template_path.read_text()
                template_context = {"agent": context, **prompt.variables}
                rendered_prompts.append(
                    render_prompt(template_content, template_context)
                )
            case LibraryPromptConfig():
                # This will be handled by the manifest's get_agent method
                # For now, just add a placeholder
                rendered_prompts.append(f"[LIBRARY:{prompt.reference}]")
            case FunctionPromptConfig():
                # Import and call the function to get prompt content
                func = prompt.function
                content = func(**prompt.arguments)
                rendered_prompts.append(render_prompt(content, {"agent": context}))

    return rendered_prompts

validate_result_type classmethod

validate_result_type(data: dict[str, Any]) -> dict[str, Any]

Convert result type and apply its settings.

Source code in src/llmling_agent/models/agents.py
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
@model_validator(mode="before")
@classmethod
def validate_result_type(cls, data: dict[str, Any]) -> dict[str, Any]:
    """Convert result type and apply its settings."""
    result_type = data.get("result_type")
    if isinstance(result_type, dict):
        # Extract response-specific settings
        tool_name = result_type.pop("result_tool_name", None)
        tool_description = result_type.pop("result_tool_description", None)
        retries = result_type.pop("output_retries", None)

        # Convert remaining dict to ResponseDefinition
        if "type" not in result_type["response_schema"]:
            result_type["response_schema"]["type"] = "inline"
        data["result_type"]["response_schema"] = InlineSchemaDef(**result_type)

        # Apply extracted settings to agent config
        if tool_name:
            data["result_tool_name"] = tool_name
        if tool_description:
            data["result_tool_description"] = tool_description
        if retries is not None:
            data["output_retries"] = retries

    return data

Show source on GitHub