Skip to content

AgentConfig

Base classes

Name Children Inherits
NodeConfig
llmling_agent.models.nodes
Configuration for a Node of the messaging system.

⋔ Inheritance diagram

graph TD
  94004555546032["agents.AgentConfig"]
  94004552660176["nodes.NodeConfig"]
  94004513496160["main.BaseModel"]
  140104485245120["builtins.object"]
  94004552660176 --> 94004555546032
  94004513496160 --> 94004552660176
  140104485245120 --> 94004513496160

🛈 DocStrings

Bases: NodeConfig

Configuration for a single agent in the system.

Defines an agent's complete configuration including its model, environment, capabilities, and behavior settings. Each agent can have its own: - Language model configuration - Environment setup (tools and resources) - Response type definitions - System prompts and default user prompts - Role-based capabilities

The configuration can be loaded from YAML or created programmatically.

Source code in src/llmling_agent/models/agents.py
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
class AgentConfig(NodeConfig):
    """Configuration for a single agent in the system.

    Defines an agent's complete configuration including its model, environment,
    capabilities, and behavior settings. Each agent can have its own:
    - Language model configuration
    - Environment setup (tools and resources)
    - Response type definitions
    - System prompts and default user prompts
    - Role-based capabilities

    The configuration can be loaded from YAML or created programmatically.
    """

    provider: ProviderConfig | Literal["pydantic_ai", "human", "litellm"] = "pydantic_ai"
    """Provider configuration or shorthand type"""

    inherits: str | None = None
    """Name of agent config to inherit from"""

    model: str | AnyModelConfig | None = None
    """The model to use for this agent. Can be either a simple model name
    string (e.g. 'openai:gpt-4') or a structured model definition."""

    tools: list[ToolConfig | str] = Field(default_factory=list)
    """A list of tools to register with this agent."""

    toolsets: list[ToolsetConfig] = Field(default_factory=list)
    """Toolset configurations for extensible tool collections."""

    environment: str | AgentEnvironment | None = None
    """Environments configuration (path or object)"""

    capabilities: Capabilities = Field(default_factory=Capabilities)
    """Current agent's capabilities."""

    session: str | SessionQuery | MemoryConfig | None = None
    """Session configuration for conversation recovery."""

    result_type: str | ResponseDefinition | None = None
    """Name of the response definition to use"""

    retries: int = 1
    """Number of retries for failed operations (maps to pydantic-ai's retries)"""

    result_tool_name: str = "final_result"
    """Name of the tool used for structured responses"""

    result_tool_description: str | None = None
    """Custom description for the result tool"""

    result_retries: int | None = None
    """Max retries for result validation"""

    end_strategy: EndStrategy = "early"
    """The strategy for handling multiple tool calls when a final result is found"""

    avatar: str | None = None
    """URL or path to agent's avatar image"""

    system_prompts: list[str] = Field(default_factory=list)
    """System prompts for the agent"""

    library_system_prompts: list[str] = Field(default_factory=list)
    """System prompts for the agent from the library"""

    user_prompts: list[str] = Field(default_factory=list)
    """Default user prompts for the agent"""

    # context_sources: list[ContextSource] = Field(default_factory=list)
    # """Initial context sources to load"""

    config_file_path: str | None = None
    """Config file path for resolving environment."""

    knowledge: Knowledge | None = None
    """Knowledge sources for this agent."""

    workers: list[WorkerConfig] = Field(default_factory=list)
    """Worker agents which will be available as tools."""

    requires_tool_confirmation: ToolConfirmationMode = "per_tool"
    """How to handle tool confirmation:
    - "always": Always require confirmation for all tools
    - "never": Never require confirmation (ignore tool settings)
    - "per_tool": Use individual tool settings
    """

    debug: bool = False
    """Enable debug output for this agent."""

    def is_structured(self) -> bool:
        """Check if this config defines a structured agent."""
        return self.result_type is not None

    @model_validator(mode="before")
    @classmethod
    def normalize_workers(cls, data: dict[str, Any]) -> dict[str, Any]:
        """Convert string workers to WorkerConfig."""
        if workers := data.get("workers"):
            data["workers"] = [
                WorkerConfig.from_str(w)
                if isinstance(w, str)
                else w
                if isinstance(w, WorkerConfig)  # Keep existing WorkerConfig
                else WorkerConfig(**w)  # Convert dict to WorkerConfig
                for w in workers
            ]
        return data

    @model_validator(mode="before")
    @classmethod
    def validate_result_type(cls, data: dict[str, Any]) -> dict[str, Any]:
        """Convert result type and apply its settings."""
        result_type = data.get("result_type")
        if isinstance(result_type, dict):
            # Extract response-specific settings
            tool_name = result_type.pop("result_tool_name", None)
            tool_description = result_type.pop("result_tool_description", None)
            retries = result_type.pop("result_retries", None)

            # Convert remaining dict to ResponseDefinition
            if "type" not in result_type:
                result_type["type"] = "inline"
            data["result_type"] = InlineResponseDefinition(**result_type)

            # Apply extracted settings to agent config
            if tool_name:
                data["result_tool_name"] = tool_name
            if tool_description:
                data["result_tool_description"] = tool_description
            if retries is not None:
                data["result_retries"] = retries

        return data

    @model_validator(mode="before")
    @classmethod
    def handle_model_types(cls, data: dict[str, Any]) -> dict[str, Any]:
        """Convert model inputs to appropriate format."""
        model = data.get("model")
        match model:
            case str():
                data["model"] = {"type": "string", "identifier": model}
        return data

    async def get_toolsets(self) -> list[ResourceProvider]:
        """Get all resource providers for this agent."""
        providers: list[ResourceProvider] = []

        # Add providers from toolsets
        for toolset_config in self.toolsets:
            try:
                provider = toolset_config.get_provider()
                providers.append(provider)
            except Exception as e:
                logger.exception(
                    "Failed to create provider for toolset: %r", toolset_config
                )
                msg = f"Failed to create provider for toolset: {e}"
                raise ValueError(msg) from e

        return providers

    def get_tool_provider(self) -> ResourceProvider | None:
        """Get tool provider for this agent."""
        from llmling_agent.tools.base import ToolInfo

        # Create provider for static tools
        if not self.tools:
            return None
        static_tools: list[ToolInfo] = []
        for tool_config in self.tools:
            try:
                match tool_config:
                    case str():
                        if tool_config.startswith("crewai_tools"):
                            obj = import_class(tool_config)()
                            static_tools.append(ToolInfo.from_crewai_tool(obj))
                        elif tool_config.startswith("langchain"):
                            obj = import_class(tool_config)()
                            static_tools.append(ToolInfo.from_langchain_tool(obj))
                        else:
                            tool = ToolInfo.from_callable(tool_config)
                            static_tools.append(tool)
                    case BaseToolConfig():
                        static_tools.append(tool_config.get_tool())
            except Exception:
                logger.exception("Failed to load tool %r", tool_config)
                continue

        return StaticResourceProvider(name="builtin", tools=static_tools)

    def get_session_config(self) -> MemoryConfig:
        """Get resolved memory configuration."""
        match self.session:
            case str() | UUID():
                return MemoryConfig(session=SessionQuery(name=str(self.session)))
            case SessionQuery():
                return MemoryConfig(session=self.session)
            case MemoryConfig():
                return self.session
            case None:
                return MemoryConfig()

    def get_system_prompts(self) -> list[BasePrompt]:
        """Get all system prompts as BasePrompts."""
        prompts: list[BasePrompt] = []
        for prompt in self.system_prompts:
            match prompt:
                case str():
                    # Convert string to StaticPrompt
                    static_prompt = StaticPrompt(
                        name="system",
                        description="System prompt",
                        messages=[PromptMessage(role="system", content=prompt)],
                    )
                    prompts.append(static_prompt)
                case BasePrompt():
                    prompts.append(prompt)
        return prompts

    def get_provider(self) -> AgentProvider:
        """Get resolved provider instance.

        Creates provider instance based on configuration:
        - Full provider config: Use as-is
        - Shorthand type: Create default provider config
        """
        # If string shorthand is used, convert to default provider config
        from llmling_agent.models.providers import (
            CallbackProviderConfig,
            HumanProviderConfig,
            LiteLLMProviderConfig,
            PydanticAIProviderConfig,
        )

        provider_config = self.provider
        if isinstance(provider_config, str):
            match provider_config:
                case "pydantic_ai":
                    provider_config = PydanticAIProviderConfig(model=self.model)
                case "human":
                    provider_config = HumanProviderConfig()
                case "litellm":
                    provider_config = LiteLLMProviderConfig(
                        model=self.model if isinstance(self.model, str) else None
                    )
                case _:
                    try:
                        fn = import_callable(provider_config)
                        provider_config = CallbackProviderConfig(fn=fn)
                    except Exception:  # noqa: BLE001
                        msg = f"Invalid provider type: {provider_config}"
                        raise ValueError(msg)  # noqa: B904

        # Create provider instance from config
        return provider_config.get_provider()

    def render_system_prompts(self, context: dict[str, Any] | None = None) -> list[str]:
        """Render system prompts with context."""
        if not context:
            # Default context
            context = {"name": self.name, "id": 1, "model": self.model}
        return [render_prompt(p, {"agent": context}) for p in self.system_prompts]

    def get_config(self) -> Config:
        """Get configuration for this agent."""
        match self.environment:
            case None:
                # Create minimal config
                caps = LLMCapabilitiesConfig()
                global_settings = GlobalSettings(llm_capabilities=caps)
                return Config(global_settings=global_settings)
            case str() as path:
                # Backward compatibility: treat as file path
                resolved = self._resolve_environment_path(path, self.config_file_path)
                return Config.from_file(resolved)
            case FileEnvironment(uri=uri) as env:
                # Handle FileEnvironment instance
                resolved = env.get_file_path()
                return Config.from_file(resolved)
            case {"type": "file", "uri": uri}:
                # Handle raw dict matching file environment structure
                return Config.from_file(uri)
            case {"type": "inline", "config": config}:
                return config
            case InlineEnvironment() as config:
                return config
            case _:
                msg = f"Invalid environment configuration: {self.environment}"
                raise ValueError(msg)

    def get_environment_path(self) -> str | None:
        """Get environment file path if available."""
        match self.environment:
            case str() as path:
                return self._resolve_environment_path(path, self.config_file_path)
            case {"type": "file", "uri": uri} | FileEnvironment(uri=uri):
                return uri
            case _:
                return None

    @staticmethod
    def _resolve_environment_path(env: str, config_file_path: str | None = None) -> str:
        """Resolve environment path from config store or relative path."""
        from upath import UPath

        try:
            config_store = ConfigStore()
            return config_store.get_config(env)
        except KeyError:
            if config_file_path:
                base_dir = UPath(config_file_path).parent
                return str(base_dir / env)
            return env

avatar class-attribute instance-attribute

avatar: str | None = None

URL or path to agent's avatar image

capabilities class-attribute instance-attribute

capabilities: Capabilities = Field(default_factory=Capabilities)

Current agent's capabilities.

config_file_path class-attribute instance-attribute

config_file_path: str | None = None

Config file path for resolving environment.

debug class-attribute instance-attribute

debug: bool = False

Enable debug output for this agent.

end_strategy class-attribute instance-attribute

end_strategy: EndStrategy = 'early'

The strategy for handling multiple tool calls when a final result is found

environment class-attribute instance-attribute

environment: str | AgentEnvironment | None = None

Environments configuration (path or object)

inherits class-attribute instance-attribute

inherits: str | None = None

Name of agent config to inherit from

knowledge class-attribute instance-attribute

knowledge: Knowledge | None = None

Knowledge sources for this agent.

library_system_prompts class-attribute instance-attribute

library_system_prompts: list[str] = Field(default_factory=list)

System prompts for the agent from the library

model class-attribute instance-attribute

model: str | AnyModelConfig | None = None

The model to use for this agent. Can be either a simple model name string (e.g. 'openai:gpt-4') or a structured model definition.

provider class-attribute instance-attribute

provider: ProviderConfig | Literal['pydantic_ai', 'human', 'litellm'] = 'pydantic_ai'

Provider configuration or shorthand type

requires_tool_confirmation class-attribute instance-attribute

requires_tool_confirmation: ToolConfirmationMode = 'per_tool'

How to handle tool confirmation: - "always": Always require confirmation for all tools - "never": Never require confirmation (ignore tool settings) - "per_tool": Use individual tool settings

result_retries class-attribute instance-attribute

result_retries: int | None = None

Max retries for result validation

result_tool_description class-attribute instance-attribute

result_tool_description: str | None = None

Custom description for the result tool

result_tool_name class-attribute instance-attribute

result_tool_name: str = 'final_result'

Name of the tool used for structured responses

result_type class-attribute instance-attribute

result_type: str | ResponseDefinition | None = None

Name of the response definition to use

retries class-attribute instance-attribute

retries: int = 1

Number of retries for failed operations (maps to pydantic-ai's retries)

session class-attribute instance-attribute

session: str | SessionQuery | MemoryConfig | None = None

Session configuration for conversation recovery.

system_prompts class-attribute instance-attribute

system_prompts: list[str] = Field(default_factory=list)

System prompts for the agent

tools class-attribute instance-attribute

tools: list[ToolConfig | str] = Field(default_factory=list)

A list of tools to register with this agent.

toolsets class-attribute instance-attribute

toolsets: list[ToolsetConfig] = Field(default_factory=list)

Toolset configurations for extensible tool collections.

user_prompts class-attribute instance-attribute

user_prompts: list[str] = Field(default_factory=list)

Default user prompts for the agent

workers class-attribute instance-attribute

workers: list[WorkerConfig] = Field(default_factory=list)

Worker agents which will be available as tools.

_resolve_environment_path staticmethod

_resolve_environment_path(env: str, config_file_path: str | None = None) -> str

Resolve environment path from config store or relative path.

Source code in src/llmling_agent/models/agents.py
387
388
389
390
391
392
393
394
395
396
397
398
399
@staticmethod
def _resolve_environment_path(env: str, config_file_path: str | None = None) -> str:
    """Resolve environment path from config store or relative path."""
    from upath import UPath

    try:
        config_store = ConfigStore()
        return config_store.get_config(env)
    except KeyError:
        if config_file_path:
            base_dir = UPath(config_file_path).parent
            return str(base_dir / env)
        return env

get_config

get_config() -> Config

Get configuration for this agent.

Source code in src/llmling_agent/models/agents.py
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
def get_config(self) -> Config:
    """Get configuration for this agent."""
    match self.environment:
        case None:
            # Create minimal config
            caps = LLMCapabilitiesConfig()
            global_settings = GlobalSettings(llm_capabilities=caps)
            return Config(global_settings=global_settings)
        case str() as path:
            # Backward compatibility: treat as file path
            resolved = self._resolve_environment_path(path, self.config_file_path)
            return Config.from_file(resolved)
        case FileEnvironment(uri=uri) as env:
            # Handle FileEnvironment instance
            resolved = env.get_file_path()
            return Config.from_file(resolved)
        case {"type": "file", "uri": uri}:
            # Handle raw dict matching file environment structure
            return Config.from_file(uri)
        case {"type": "inline", "config": config}:
            return config
        case InlineEnvironment() as config:
            return config
        case _:
            msg = f"Invalid environment configuration: {self.environment}"
            raise ValueError(msg)

get_environment_path

get_environment_path() -> str | None

Get environment file path if available.

Source code in src/llmling_agent/models/agents.py
377
378
379
380
381
382
383
384
385
def get_environment_path(self) -> str | None:
    """Get environment file path if available."""
    match self.environment:
        case str() as path:
            return self._resolve_environment_path(path, self.config_file_path)
        case {"type": "file", "uri": uri} | FileEnvironment(uri=uri):
            return uri
        case _:
            return None

get_provider

get_provider() -> AgentProvider

Get resolved provider instance.

Creates provider instance based on configuration: - Full provider config: Use as-is - Shorthand type: Create default provider config

Source code in src/llmling_agent/models/agents.py
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
def get_provider(self) -> AgentProvider:
    """Get resolved provider instance.

    Creates provider instance based on configuration:
    - Full provider config: Use as-is
    - Shorthand type: Create default provider config
    """
    # If string shorthand is used, convert to default provider config
    from llmling_agent.models.providers import (
        CallbackProviderConfig,
        HumanProviderConfig,
        LiteLLMProviderConfig,
        PydanticAIProviderConfig,
    )

    provider_config = self.provider
    if isinstance(provider_config, str):
        match provider_config:
            case "pydantic_ai":
                provider_config = PydanticAIProviderConfig(model=self.model)
            case "human":
                provider_config = HumanProviderConfig()
            case "litellm":
                provider_config = LiteLLMProviderConfig(
                    model=self.model if isinstance(self.model, str) else None
                )
            case _:
                try:
                    fn = import_callable(provider_config)
                    provider_config = CallbackProviderConfig(fn=fn)
                except Exception:  # noqa: BLE001
                    msg = f"Invalid provider type: {provider_config}"
                    raise ValueError(msg)  # noqa: B904

    # Create provider instance from config
    return provider_config.get_provider()

get_session_config

get_session_config() -> MemoryConfig

Get resolved memory configuration.

Source code in src/llmling_agent/models/agents.py
277
278
279
280
281
282
283
284
285
286
287
def get_session_config(self) -> MemoryConfig:
    """Get resolved memory configuration."""
    match self.session:
        case str() | UUID():
            return MemoryConfig(session=SessionQuery(name=str(self.session)))
        case SessionQuery():
            return MemoryConfig(session=self.session)
        case MemoryConfig():
            return self.session
        case None:
            return MemoryConfig()

get_system_prompts

get_system_prompts() -> list[BasePrompt]

Get all system prompts as BasePrompts.

Source code in src/llmling_agent/models/agents.py
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
def get_system_prompts(self) -> list[BasePrompt]:
    """Get all system prompts as BasePrompts."""
    prompts: list[BasePrompt] = []
    for prompt in self.system_prompts:
        match prompt:
            case str():
                # Convert string to StaticPrompt
                static_prompt = StaticPrompt(
                    name="system",
                    description="System prompt",
                    messages=[PromptMessage(role="system", content=prompt)],
                )
                prompts.append(static_prompt)
            case BasePrompt():
                prompts.append(prompt)
    return prompts

get_tool_provider

get_tool_provider() -> ResourceProvider | None

Get tool provider for this agent.

Source code in src/llmling_agent/models/agents.py
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
def get_tool_provider(self) -> ResourceProvider | None:
    """Get tool provider for this agent."""
    from llmling_agent.tools.base import ToolInfo

    # Create provider for static tools
    if not self.tools:
        return None
    static_tools: list[ToolInfo] = []
    for tool_config in self.tools:
        try:
            match tool_config:
                case str():
                    if tool_config.startswith("crewai_tools"):
                        obj = import_class(tool_config)()
                        static_tools.append(ToolInfo.from_crewai_tool(obj))
                    elif tool_config.startswith("langchain"):
                        obj = import_class(tool_config)()
                        static_tools.append(ToolInfo.from_langchain_tool(obj))
                    else:
                        tool = ToolInfo.from_callable(tool_config)
                        static_tools.append(tool)
                case BaseToolConfig():
                    static_tools.append(tool_config.get_tool())
        except Exception:
            logger.exception("Failed to load tool %r", tool_config)
            continue

    return StaticResourceProvider(name="builtin", tools=static_tools)

get_toolsets async

get_toolsets() -> list[ResourceProvider]

Get all resource providers for this agent.

Source code in src/llmling_agent/models/agents.py
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
async def get_toolsets(self) -> list[ResourceProvider]:
    """Get all resource providers for this agent."""
    providers: list[ResourceProvider] = []

    # Add providers from toolsets
    for toolset_config in self.toolsets:
        try:
            provider = toolset_config.get_provider()
            providers.append(provider)
        except Exception as e:
            logger.exception(
                "Failed to create provider for toolset: %r", toolset_config
            )
            msg = f"Failed to create provider for toolset: {e}"
            raise ValueError(msg) from e

    return providers

handle_model_types classmethod

handle_model_types(data: dict[str, Any]) -> dict[str, Any]

Convert model inputs to appropriate format.

Source code in src/llmling_agent/models/agents.py
220
221
222
223
224
225
226
227
228
@model_validator(mode="before")
@classmethod
def handle_model_types(cls, data: dict[str, Any]) -> dict[str, Any]:
    """Convert model inputs to appropriate format."""
    model = data.get("model")
    match model:
        case str():
            data["model"] = {"type": "string", "identifier": model}
    return data

is_structured

is_structured() -> bool

Check if this config defines a structured agent.

Source code in src/llmling_agent/models/agents.py
175
176
177
def is_structured(self) -> bool:
    """Check if this config defines a structured agent."""
    return self.result_type is not None

normalize_workers classmethod

normalize_workers(data: dict[str, Any]) -> dict[str, Any]

Convert string workers to WorkerConfig.

Source code in src/llmling_agent/models/agents.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
@model_validator(mode="before")
@classmethod
def normalize_workers(cls, data: dict[str, Any]) -> dict[str, Any]:
    """Convert string workers to WorkerConfig."""
    if workers := data.get("workers"):
        data["workers"] = [
            WorkerConfig.from_str(w)
            if isinstance(w, str)
            else w
            if isinstance(w, WorkerConfig)  # Keep existing WorkerConfig
            else WorkerConfig(**w)  # Convert dict to WorkerConfig
            for w in workers
        ]
    return data

render_system_prompts

render_system_prompts(context: dict[str, Any] | None = None) -> list[str]

Render system prompts with context.

Source code in src/llmling_agent/models/agents.py
343
344
345
346
347
348
def render_system_prompts(self, context: dict[str, Any] | None = None) -> list[str]:
    """Render system prompts with context."""
    if not context:
        # Default context
        context = {"name": self.name, "id": 1, "model": self.model}
    return [render_prompt(p, {"agent": context}) for p in self.system_prompts]

validate_result_type classmethod

validate_result_type(data: dict[str, Any]) -> dict[str, Any]

Convert result type and apply its settings.

Source code in src/llmling_agent/models/agents.py
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
@model_validator(mode="before")
@classmethod
def validate_result_type(cls, data: dict[str, Any]) -> dict[str, Any]:
    """Convert result type and apply its settings."""
    result_type = data.get("result_type")
    if isinstance(result_type, dict):
        # Extract response-specific settings
        tool_name = result_type.pop("result_tool_name", None)
        tool_description = result_type.pop("result_tool_description", None)
        retries = result_type.pop("result_retries", None)

        # Convert remaining dict to ResponseDefinition
        if "type" not in result_type:
            result_type["type"] = "inline"
        data["result_type"] = InlineResponseDefinition(**result_type)

        # Apply extracted settings to agent config
        if tool_name:
            data["result_tool_name"] = tool_name
        if tool_description:
            data["result_tool_description"] = tool_description
        if retries is not None:
            data["result_retries"] = retries

    return data

Show source on GitHub