Skip to content

models

Class info

Classes

Name Children Inherits
AgentConfig
llmling_agent.models.agents
Configuration for a single agent in the system.
    • NodeConfig
    AgentsManifest
    llmling_agent.models.manifest
    Complete agent configuration manifest defining all available agents.

      🛈 DocStrings

      Core data models for LLMling agent.

      AgentConfig

      Bases: NodeConfig

      Configuration for a single agent in the system.

      Defines an agent's complete configuration including its model, environment, and behavior settings.

      Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/agent_configuration/

      Source code in src/llmling_agent/models/agents.py
       43
       44
       45
       46
       47
       48
       49
       50
       51
       52
       53
       54
       55
       56
       57
       58
       59
       60
       61
       62
       63
       64
       65
       66
       67
       68
       69
       70
       71
       72
       73
       74
       75
       76
       77
       78
       79
       80
       81
       82
       83
       84
       85
       86
       87
       88
       89
       90
       91
       92
       93
       94
       95
       96
       97
       98
       99
      100
      101
      102
      103
      104
      105
      106
      107
      108
      109
      110
      111
      112
      113
      114
      115
      116
      117
      118
      119
      120
      121
      122
      123
      124
      125
      126
      127
      128
      129
      130
      131
      132
      133
      134
      135
      136
      137
      138
      139
      140
      141
      142
      143
      144
      145
      146
      147
      148
      149
      150
      151
      152
      153
      154
      155
      156
      157
      158
      159
      160
      161
      162
      163
      164
      165
      166
      167
      168
      169
      170
      171
      172
      173
      174
      175
      176
      177
      178
      179
      180
      181
      182
      183
      184
      185
      186
      187
      188
      189
      190
      191
      192
      193
      194
      195
      196
      197
      198
      199
      200
      201
      202
      203
      204
      205
      206
      207
      208
      209
      210
      211
      212
      213
      214
      215
      216
      217
      218
      219
      220
      221
      222
      223
      224
      225
      226
      227
      228
      229
      230
      231
      232
      233
      234
      235
      236
      237
      238
      239
      240
      241
      242
      243
      244
      245
      246
      247
      248
      249
      250
      251
      252
      253
      254
      255
      256
      257
      258
      259
      260
      261
      262
      263
      264
      265
      266
      267
      268
      269
      270
      271
      272
      273
      274
      275
      276
      277
      278
      279
      280
      281
      282
      283
      284
      285
      286
      287
      288
      289
      290
      291
      292
      293
      294
      295
      296
      297
      298
      299
      300
      301
      302
      303
      304
      305
      306
      307
      308
      309
      310
      311
      312
      313
      314
      315
      316
      317
      318
      319
      320
      321
      322
      323
      324
      325
      326
      327
      328
      329
      330
      331
      332
      333
      334
      335
      336
      337
      338
      339
      340
      341
      342
      343
      344
      345
      346
      347
      348
      349
      350
      351
      352
      353
      354
      355
      356
      357
      358
      359
      360
      361
      362
      363
      364
      365
      366
      367
      368
      369
      370
      371
      372
      373
      374
      375
      376
      377
      378
      379
      380
      381
      382
      383
      384
      385
      386
      387
      388
      389
      390
      391
      392
      393
      394
      395
      396
      397
      398
      399
      400
      401
      402
      403
      404
      405
      406
      407
      408
      409
      410
      411
      412
      413
      414
      415
      416
      417
      418
      419
      420
      421
      422
      423
      424
      425
      426
      427
      428
      429
      430
      431
      432
      433
      434
      435
      436
      437
      438
      439
      440
      441
      442
      443
      444
      445
      446
      447
      448
      449
      450
      451
      452
      453
      454
      455
      456
      457
      458
      459
      460
      461
      462
      class AgentConfig(NodeConfig):
          """Configuration for a single agent in the system.
      
          Defines an agent's complete configuration including its model, environment,
          and behavior settings.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/agent_configuration/
          """
      
          inherits: str | None = Field(default=None, title="Inheritance source")
          """Name of agent config to inherit from"""
      
          model: str | ModelName | AnyModelConfig | None = Field(
              default=None,
              examples=["openai:gpt-5-nano"],
              title="Model configuration or name",
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/model_configuration/"
              },
          )
          """The model to use for this agent. Can be either a simple model name
          string (e.g. 'openai:gpt-5') or a structured model definition.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/model_configuration/
          """
      
          tools: list[ToolConfig | str] = Field(
              default_factory=list,
              examples=[
                  ["webbrowser:open", "builtins:print"],
                  [
                      {
                          "type": "import",
                          "import_path": "webbrowser:open",
                          "name": "web_browser",
                      }
                  ],
              ],
              title="Tool configurations",
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/tool_configuration/"
              },
          )
          """A list of tools to register with this agent.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/tool_configuration/
          """
      
          toolsets: list[ToolsetConfig] = Field(
              default_factory=list,
              examples=[
                  [
                      {
                          "type": "openapi",
                          "spec": "https://api.example.com/openapi.json",
                          "namespace": "api",
                      },
                      {
                          "type": "file_access",
                      },
                      {
                          "type": "composio",
                          "user_id": "user123@example.com",
                          "toolsets": ["github", "slack"],
                      },
                  ],
              ],
              title="Toolset configurations",
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/toolset_configuration/"
              },
          )
          """Toolset configurations for extensible tool collections.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/toolset_configuration/
          """
      
          session: str | SessionQuery | MemoryConfig | None = Field(
              default=None,
              examples=["main_session", "user_123"],
              title="Session configuration",
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/session_configuration/"
              },
          )
          """Session configuration for conversation recovery.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/session_configuration/
          """
      
          output_type: str | StructuredResponseConfig | None = Field(
              default=None,
              examples=["json_response", "code_output"],
              title="Response type",
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/response_configuration/"
              },
          )
          """Name of the response definition to use.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/response_configuration/
          """
      
          retries: int = Field(default=1, ge=0, examples=[1, 3], title="Model retries")
          """Number of retries for failed operations (maps to pydantic-ai's retries)"""
      
          output_retries: int | None = Field(
              default=None,
              examples=[1, 3],
              title="Output retries",
          )
          """Max retries for result validation"""
      
          end_strategy: EndStrategy = Field(
              default="early",
              examples=["early", "exhaust"],
              title="Tool execution strategy",
          )
          """The strategy for handling multiple tool calls when a final result is found"""
      
          avatar: str | None = Field(
              default=None,
              examples=["https://example.com/avatar.png", "/assets/robot.jpg"],
              title="Avatar image",
          )
          """URL or path to agent's avatar image"""
      
          system_prompts: Sequence[str | PromptConfig] = Field(
              default_factory=list,
              title="System prompts",
              examples=[["You are an AI assistant."]],
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/system_prompts_configuration/"
              },
          )
          """System prompts for the agent. Can be strings or structured prompt configs.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/system_prompts_configuration/
          """
      
          # context_sources: list[ContextSource] = Field(default_factory=list)
          # """Initial context sources to load"""
      
          config_file_path: str | None = Field(
              default=None,
              examples=["/path/to/config.yml", "configs/agent.yaml"],
              title="Configuration file path",
          )
          """Config file path for resolving environment."""
      
          knowledge: Knowledge | None = Field(
              default=None,
              title="Knowledge sources",
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/knowledge_configuration/"
              },
          )
          """Knowledge sources for this agent.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/knowledge_configuration/
          """
      
          workers: list[WorkerConfig] = Field(
              default_factory=list,
              examples=[
                  [
                      {
                          "type": "agent",
                          "name": "web_agent",
                          "reset_history_on_run": True,
                      }
                  ],
                  [
                      {
                          "type": "team",
                          "name": "analysis_team",
                      }
                  ],
              ],
              title="Worker agents",
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/worker_configuration/"
              },
          )
          """Worker agents which will be available as tools.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/worker_configuration/
          """
      
          requires_tool_confirmation: ToolConfirmationMode = Field(
              default="per_tool",
              examples=["always", "never", "per_tool"],
              title="Tool confirmation mode",
          )
          """How to handle tool confirmation:
          - "always": Always require confirmation for all tools
          - "never": Never require confirmation (ignore tool settings)
          - "per_tool": Use individual tool settings
          """
      
          debug: bool = Field(default=False, title="Debug mode")
          """Enable debug output for this agent."""
      
          usage_limits: UsageLimits | None = Field(default=None, title="Usage limits")
          """Usage limits for this agent."""
      
          tool_mode: ToolMode | None = Field(
              default=None,
              examples=["codemode"],
              title="Tool execution mode",
          )
          """Tool execution mode:
          - None: Default mode - tools are called directly
          - "codemode": Tools are wrapped in a Python execution environment
          """
      
          def is_structured(self) -> bool:
              """Check if this config defines a structured agent."""
              return self.output_type is not None
      
          @model_validator(mode="before")
          @classmethod
          def validate_output_type(cls, data: dict[str, Any]) -> dict[str, Any]:
              """Convert result type and apply its settings."""
              output_type = data.get("output_type")
              if isinstance(output_type, dict):
                  # Extract response-specific settings
                  tool_name = output_type.pop("result_tool_name", None)
                  tool_description = output_type.pop("result_tool_description", None)
                  retries = output_type.pop("output_retries", None)
      
                  # Convert remaining dict to ResponseDefinition
                  if "type" not in output_type["response_schema"]:
                      output_type["response_schema"]["type"] = "inline"
                  data["output_type"]["response_schema"] = InlineSchemaDef(**output_type)
      
                  # Apply extracted settings to agent config
                  if tool_name:
                      data["result_tool_name"] = tool_name
                  if tool_description:
                      data["result_tool_description"] = tool_description
                  if retries is not None:
                      data["output_retries"] = retries
      
              return data
      
          @model_validator(mode="before")
          @classmethod
          def handle_model_types(cls, data: dict[str, Any]) -> dict[str, Any]:
              """Convert model inputs to appropriate format."""
              if isinstance((model := data.get("model")), str):
                  data["model"] = {"type": "string", "identifier": model}
              return data
      
          def get_toolsets(self) -> list[ResourceProvider]:
              """Get all resource providers for this agent."""
              providers: list[ResourceProvider] = []
      
              # Add providers from toolsets
              for toolset_config in self.toolsets:
                  try:
                      provider = toolset_config.get_provider()
                      providers.append(provider)
                  except Exception as e:
                      msg = "Failed to create provider for toolset"
                      logger.exception(msg, toolset_config)
                      raise ValueError(msg) from e
      
              return providers
      
          def get_tool_provider(self) -> ResourceProvider | None:
              """Get tool provider for this agent."""
              from llmling_agent.tools.base import Tool
      
              # Create provider for static tools
              if not self.tools:
                  return None
              static_tools: list[Tool] = []
              for tool_config in self.tools:
                  try:
                      match tool_config:
                          case str():
                              if tool_config.startswith("crewai_tools"):
                                  obj = import_class(tool_config)()
                                  static_tools.append(Tool.from_crewai_tool(obj))
                              elif tool_config.startswith("langchain"):
                                  obj = import_class(tool_config)()
                                  static_tools.append(Tool.from_langchain_tool(obj))
                              else:
                                  tool = Tool.from_callable(tool_config)
                                  static_tools.append(tool)
                          case BaseToolConfig():
                              static_tools.append(tool_config.get_tool())
                  except Exception:
                      logger.exception("Failed to load tool", config=tool_config)
                      continue
      
              return StaticResourceProvider(name="builtin", tools=static_tools)
      
          def get_session_config(self) -> MemoryConfig:
              """Get resolved memory configuration."""
              match self.session:
                  case str() | UUID():
                      return MemoryConfig(session=SessionQuery(name=str(self.session)))
                  case SessionQuery():
                      return MemoryConfig(session=self.session)
                  case MemoryConfig():
                      return self.session
                  case None:
                      return MemoryConfig()
                  case _:
                      msg = f"Invalid session configuration: {self.session}"
                      raise ValueError(msg)
      
          def get_system_prompts(self) -> list[BasePrompt]:
              """Get all system prompts as BasePrompts."""
              from llmling_agent_config.system_prompts import (
                  FilePromptConfig,
                  FunctionPromptConfig,
                  LibraryPromptConfig,
                  StaticPromptConfig,
              )
      
              prompts: list[BasePrompt] = []
              for prompt in self.system_prompts:
                  match prompt:
                      case str():
                          # Convert string to StaticPrompt
                          static_prompt = StaticPrompt(
                              name="system",
                              description="System prompt",
                              messages=[PromptMessage(role="system", content=prompt)],
                          )
                          prompts.append(static_prompt)
                      case StaticPromptConfig(content=content):
                          # Convert StaticPromptConfig to StaticPrompt
                          static_prompt = StaticPrompt(
                              name="system",
                              description="System prompt",
                              messages=[PromptMessage(role="system", content=content)],
                          )
                          prompts.append(static_prompt)
                      case FilePromptConfig(path=path):
                          # Load and convert file-based prompt
      
                          template_path = Path(path)
                          if not template_path.is_absolute() and self.config_file_path:
                              base_path = Path(self.config_file_path).parent
                              template_path = base_path / path
      
                          template_content = template_path.read_text("utf-8")
                          # Create a template-based prompt
                          # (for now as StaticPrompt with placeholder)
                          static_prompt = StaticPrompt(
                              name="system",
                              description=f"File prompt: {path}",
                              messages=[PromptMessage(role="system", content=template_content)],
                          )
                          prompts.append(static_prompt)
                      case LibraryPromptConfig(reference=reference):
                          # Create placeholder for library prompts (resolved by manifest)
                          msg = PromptMessage(role="system", content=f"[LIBRARY:{reference}]")
                          static_prompt = StaticPrompt(
                              name="system",
                              description=f"Library: {reference}",
                              messages=[msg],
                          )
                          prompts.append(static_prompt)
                      case FunctionPromptConfig(arguments=arguments, function=function):
                          # Import and call the function to get prompt content
                          content = function(**arguments)
                          static_prompt = StaticPrompt(
                              name="system",
                              description=f"Function prompt: {function}",
                              messages=[PromptMessage(role="system", content=content)],
                          )
                          prompts.append(static_prompt)
                      case BasePrompt():
                          prompts.append(prompt)
              return prompts
      
          def render_system_prompts(self, context: dict[str, Any] | None = None) -> list[str]:
              """Render system prompts with context."""
              from llmling_agent_config.system_prompts import (
                  FilePromptConfig,
                  FunctionPromptConfig,
                  LibraryPromptConfig,
                  StaticPromptConfig,
              )
      
              if not context:
                  # Default context
                  context = {"name": self.name, "id": 1, "model": self.model}
      
              rendered_prompts: list[str] = []
              for prompt in self.system_prompts:
                  match prompt:
                      case (str() as content) | StaticPromptConfig(content=content):
                          rendered_prompts.append(render_prompt(content, {"agent": context}))
                      case FilePromptConfig(path=path, variables=variables):
                          # Load and render Jinja template from file
      
                          template_path = Path(path)
                          if not template_path.is_absolute() and self.config_file_path:
                              base_path = Path(self.config_file_path).parent
                              template_path = base_path / path
      
                          template_content = template_path.read_text("utf-8")
                          template_ctx = {"agent": context, **variables}
                          rendered_prompts.append(render_prompt(template_content, template_ctx))
                      case LibraryPromptConfig(reference=reference):
                          # This will be handled by the manifest's get_agent method
                          # For now, just add a placeholder
                          rendered_prompts.append(f"[LIBRARY:{reference}]")
                      case FunctionPromptConfig(function=function, arguments=arguments):
                          # Import and call the function to get prompt content
                          content = function(**arguments)
                          rendered_prompts.append(render_prompt(content, {"agent": context}))
      
              return rendered_prompts
      

      avatar class-attribute instance-attribute

      avatar: str | None = Field(
          default=None,
          examples=["https://example.com/avatar.png", "/assets/robot.jpg"],
          title="Avatar image",
      )
      

      URL or path to agent's avatar image

      config_file_path class-attribute instance-attribute

      config_file_path: str | None = Field(
          default=None,
          examples=["/path/to/config.yml", "configs/agent.yaml"],
          title="Configuration file path",
      )
      

      Config file path for resolving environment.

      debug class-attribute instance-attribute

      debug: bool = Field(default=False, title='Debug mode')
      

      Enable debug output for this agent.

      end_strategy class-attribute instance-attribute

      end_strategy: EndStrategy = Field(
          default="early", examples=["early", "exhaust"], title="Tool execution strategy"
      )
      

      The strategy for handling multiple tool calls when a final result is found

      inherits class-attribute instance-attribute

      inherits: str | None = Field(default=None, title='Inheritance source')
      

      Name of agent config to inherit from

      knowledge class-attribute instance-attribute

      knowledge: Knowledge | None = Field(
          default=None,
          title="Knowledge sources",
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/knowledge_configuration/"
          },
      )
      

      model class-attribute instance-attribute

      model: str | ModelName | AnyModelConfig | None = Field(
          default=None,
          examples=["openai:gpt-5-nano"],
          title="Model configuration or name",
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/model_configuration/"
          },
      )
      

      The model to use for this agent. Can be either a simple model name string (e.g. 'openai:gpt-5') or a structured model definition.

      Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/model_configuration/

      output_retries class-attribute instance-attribute

      output_retries: int | None = Field(default=None, examples=[1, 3], title='Output retries')
      

      Max retries for result validation

      output_type class-attribute instance-attribute

      output_type: str | StructuredResponseConfig | None = Field(
          default=None,
          examples=["json_response", "code_output"],
          title="Response type",
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/response_configuration/"
          },
      )
      

      requires_tool_confirmation class-attribute instance-attribute

      requires_tool_confirmation: ToolConfirmationMode = Field(
          default="per_tool",
          examples=["always", "never", "per_tool"],
          title="Tool confirmation mode",
      )
      

      How to handle tool confirmation: - "always": Always require confirmation for all tools - "never": Never require confirmation (ignore tool settings) - "per_tool": Use individual tool settings

      retries class-attribute instance-attribute

      retries: int = Field(default=1, ge=0, examples=[1, 3], title='Model retries')
      

      Number of retries for failed operations (maps to pydantic-ai's retries)

      session class-attribute instance-attribute

      session: str | SessionQuery | MemoryConfig | None = Field(
          default=None,
          examples=["main_session", "user_123"],
          title="Session configuration",
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/session_configuration/"
          },
      )
      

      Session configuration for conversation recovery.

      Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/session_configuration/

      system_prompts class-attribute instance-attribute

      system_prompts: Sequence[str | PromptConfig] = Field(
          default_factory=list,
          title="System prompts",
          examples=[["You are an AI assistant."]],
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/system_prompts_configuration/"
          },
      )
      

      System prompts for the agent. Can be strings or structured prompt configs.

      Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/system_prompts_configuration/

      tool_mode class-attribute instance-attribute

      tool_mode: ToolMode | None = Field(
          default=None, examples=["codemode"], title="Tool execution mode"
      )
      

      Tool execution mode: - None: Default mode - tools are called directly - "codemode": Tools are wrapped in a Python execution environment

      tools class-attribute instance-attribute

      tools: list[ToolConfig | str] = Field(
          default_factory=list,
          examples=[
              ["webbrowser:open", "builtins:print"],
              [{"type": "import", "import_path": "webbrowser:open", "name": "web_browser"}],
          ],
          title="Tool configurations",
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/tool_configuration/"
          },
      )
      

      A list of tools to register with this agent.

      Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/tool_configuration/

      toolsets class-attribute instance-attribute

      toolsets: list[ToolsetConfig] = Field(
          default_factory=list,
          examples=[
              [
                  {
                      "type": "openapi",
                      "spec": "https://api.example.com/openapi.json",
                      "namespace": "api",
                  },
                  {"type": "file_access"},
                  {
                      "type": "composio",
                      "user_id": "user123@example.com",
                      "toolsets": ["github", "slack"],
                  },
              ]
          ],
          title="Toolset configurations",
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/toolset_configuration/"
          },
      )
      

      Toolset configurations for extensible tool collections.

      Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/toolset_configuration/

      usage_limits class-attribute instance-attribute

      usage_limits: UsageLimits | None = Field(default=None, title='Usage limits')
      

      Usage limits for this agent.

      workers class-attribute instance-attribute

      workers: list[WorkerConfig] = Field(
          default_factory=list,
          examples=[
              [{"type": "agent", "name": "web_agent", "reset_history_on_run": True}],
              [{"type": "team", "name": "analysis_team"}],
          ],
          title="Worker agents",
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/worker_configuration/"
          },
      )
      

      Worker agents which will be available as tools.

      Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/worker_configuration/

      get_session_config

      get_session_config() -> MemoryConfig
      

      Get resolved memory configuration.

      Source code in src/llmling_agent/models/agents.py
      342
      343
      344
      345
      346
      347
      348
      349
      350
      351
      352
      353
      354
      355
      def get_session_config(self) -> MemoryConfig:
          """Get resolved memory configuration."""
          match self.session:
              case str() | UUID():
                  return MemoryConfig(session=SessionQuery(name=str(self.session)))
              case SessionQuery():
                  return MemoryConfig(session=self.session)
              case MemoryConfig():
                  return self.session
              case None:
                  return MemoryConfig()
              case _:
                  msg = f"Invalid session configuration: {self.session}"
                  raise ValueError(msg)
      

      get_system_prompts

      get_system_prompts() -> list[BasePrompt]
      

      Get all system prompts as BasePrompts.

      Source code in src/llmling_agent/models/agents.py
      357
      358
      359
      360
      361
      362
      363
      364
      365
      366
      367
      368
      369
      370
      371
      372
      373
      374
      375
      376
      377
      378
      379
      380
      381
      382
      383
      384
      385
      386
      387
      388
      389
      390
      391
      392
      393
      394
      395
      396
      397
      398
      399
      400
      401
      402
      403
      404
      405
      406
      407
      408
      409
      410
      411
      412
      413
      414
      415
      416
      417
      418
      419
      420
      421
      422
      def get_system_prompts(self) -> list[BasePrompt]:
          """Get all system prompts as BasePrompts."""
          from llmling_agent_config.system_prompts import (
              FilePromptConfig,
              FunctionPromptConfig,
              LibraryPromptConfig,
              StaticPromptConfig,
          )
      
          prompts: list[BasePrompt] = []
          for prompt in self.system_prompts:
              match prompt:
                  case str():
                      # Convert string to StaticPrompt
                      static_prompt = StaticPrompt(
                          name="system",
                          description="System prompt",
                          messages=[PromptMessage(role="system", content=prompt)],
                      )
                      prompts.append(static_prompt)
                  case StaticPromptConfig(content=content):
                      # Convert StaticPromptConfig to StaticPrompt
                      static_prompt = StaticPrompt(
                          name="system",
                          description="System prompt",
                          messages=[PromptMessage(role="system", content=content)],
                      )
                      prompts.append(static_prompt)
                  case FilePromptConfig(path=path):
                      # Load and convert file-based prompt
      
                      template_path = Path(path)
                      if not template_path.is_absolute() and self.config_file_path:
                          base_path = Path(self.config_file_path).parent
                          template_path = base_path / path
      
                      template_content = template_path.read_text("utf-8")
                      # Create a template-based prompt
                      # (for now as StaticPrompt with placeholder)
                      static_prompt = StaticPrompt(
                          name="system",
                          description=f"File prompt: {path}",
                          messages=[PromptMessage(role="system", content=template_content)],
                      )
                      prompts.append(static_prompt)
                  case LibraryPromptConfig(reference=reference):
                      # Create placeholder for library prompts (resolved by manifest)
                      msg = PromptMessage(role="system", content=f"[LIBRARY:{reference}]")
                      static_prompt = StaticPrompt(
                          name="system",
                          description=f"Library: {reference}",
                          messages=[msg],
                      )
                      prompts.append(static_prompt)
                  case FunctionPromptConfig(arguments=arguments, function=function):
                      # Import and call the function to get prompt content
                      content = function(**arguments)
                      static_prompt = StaticPrompt(
                          name="system",
                          description=f"Function prompt: {function}",
                          messages=[PromptMessage(role="system", content=content)],
                      )
                      prompts.append(static_prompt)
                  case BasePrompt():
                      prompts.append(prompt)
          return prompts
      

      get_tool_provider

      get_tool_provider() -> ResourceProvider | None
      

      Get tool provider for this agent.

      Source code in src/llmling_agent/models/agents.py
      313
      314
      315
      316
      317
      318
      319
      320
      321
      322
      323
      324
      325
      326
      327
      328
      329
      330
      331
      332
      333
      334
      335
      336
      337
      338
      339
      340
      def get_tool_provider(self) -> ResourceProvider | None:
          """Get tool provider for this agent."""
          from llmling_agent.tools.base import Tool
      
          # Create provider for static tools
          if not self.tools:
              return None
          static_tools: list[Tool] = []
          for tool_config in self.tools:
              try:
                  match tool_config:
                      case str():
                          if tool_config.startswith("crewai_tools"):
                              obj = import_class(tool_config)()
                              static_tools.append(Tool.from_crewai_tool(obj))
                          elif tool_config.startswith("langchain"):
                              obj = import_class(tool_config)()
                              static_tools.append(Tool.from_langchain_tool(obj))
                          else:
                              tool = Tool.from_callable(tool_config)
                              static_tools.append(tool)
                      case BaseToolConfig():
                          static_tools.append(tool_config.get_tool())
              except Exception:
                  logger.exception("Failed to load tool", config=tool_config)
                  continue
      
          return StaticResourceProvider(name="builtin", tools=static_tools)
      

      get_toolsets

      get_toolsets() -> list[ResourceProvider]
      

      Get all resource providers for this agent.

      Source code in src/llmling_agent/models/agents.py
      297
      298
      299
      300
      301
      302
      303
      304
      305
      306
      307
      308
      309
      310
      311
      def get_toolsets(self) -> list[ResourceProvider]:
          """Get all resource providers for this agent."""
          providers: list[ResourceProvider] = []
      
          # Add providers from toolsets
          for toolset_config in self.toolsets:
              try:
                  provider = toolset_config.get_provider()
                  providers.append(provider)
              except Exception as e:
                  msg = "Failed to create provider for toolset"
                  logger.exception(msg, toolset_config)
                  raise ValueError(msg) from e
      
          return providers
      

      handle_model_types classmethod

      handle_model_types(data: dict[str, Any]) -> dict[str, Any]
      

      Convert model inputs to appropriate format.

      Source code in src/llmling_agent/models/agents.py
      289
      290
      291
      292
      293
      294
      295
      @model_validator(mode="before")
      @classmethod
      def handle_model_types(cls, data: dict[str, Any]) -> dict[str, Any]:
          """Convert model inputs to appropriate format."""
          if isinstance((model := data.get("model")), str):
              data["model"] = {"type": "string", "identifier": model}
          return data
      

      is_structured

      is_structured() -> bool
      

      Check if this config defines a structured agent.

      Source code in src/llmling_agent/models/agents.py
      259
      260
      261
      def is_structured(self) -> bool:
          """Check if this config defines a structured agent."""
          return self.output_type is not None
      

      render_system_prompts

      render_system_prompts(context: dict[str, Any] | None = None) -> list[str]
      

      Render system prompts with context.

      Source code in src/llmling_agent/models/agents.py
      424
      425
      426
      427
      428
      429
      430
      431
      432
      433
      434
      435
      436
      437
      438
      439
      440
      441
      442
      443
      444
      445
      446
      447
      448
      449
      450
      451
      452
      453
      454
      455
      456
      457
      458
      459
      460
      461
      462
      def render_system_prompts(self, context: dict[str, Any] | None = None) -> list[str]:
          """Render system prompts with context."""
          from llmling_agent_config.system_prompts import (
              FilePromptConfig,
              FunctionPromptConfig,
              LibraryPromptConfig,
              StaticPromptConfig,
          )
      
          if not context:
              # Default context
              context = {"name": self.name, "id": 1, "model": self.model}
      
          rendered_prompts: list[str] = []
          for prompt in self.system_prompts:
              match prompt:
                  case (str() as content) | StaticPromptConfig(content=content):
                      rendered_prompts.append(render_prompt(content, {"agent": context}))
                  case FilePromptConfig(path=path, variables=variables):
                      # Load and render Jinja template from file
      
                      template_path = Path(path)
                      if not template_path.is_absolute() and self.config_file_path:
                          base_path = Path(self.config_file_path).parent
                          template_path = base_path / path
      
                      template_content = template_path.read_text("utf-8")
                      template_ctx = {"agent": context, **variables}
                      rendered_prompts.append(render_prompt(template_content, template_ctx))
                  case LibraryPromptConfig(reference=reference):
                      # This will be handled by the manifest's get_agent method
                      # For now, just add a placeholder
                      rendered_prompts.append(f"[LIBRARY:{reference}]")
                  case FunctionPromptConfig(function=function, arguments=arguments):
                      # Import and call the function to get prompt content
                      content = function(**arguments)
                      rendered_prompts.append(render_prompt(content, {"agent": context}))
      
          return rendered_prompts
      

      validate_output_type classmethod

      validate_output_type(data: dict[str, Any]) -> dict[str, Any]
      

      Convert result type and apply its settings.

      Source code in src/llmling_agent/models/agents.py
      263
      264
      265
      266
      267
      268
      269
      270
      271
      272
      273
      274
      275
      276
      277
      278
      279
      280
      281
      282
      283
      284
      285
      286
      287
      @model_validator(mode="before")
      @classmethod
      def validate_output_type(cls, data: dict[str, Any]) -> dict[str, Any]:
          """Convert result type and apply its settings."""
          output_type = data.get("output_type")
          if isinstance(output_type, dict):
              # Extract response-specific settings
              tool_name = output_type.pop("result_tool_name", None)
              tool_description = output_type.pop("result_tool_description", None)
              retries = output_type.pop("output_retries", None)
      
              # Convert remaining dict to ResponseDefinition
              if "type" not in output_type["response_schema"]:
                  output_type["response_schema"]["type"] = "inline"
              data["output_type"]["response_schema"] = InlineSchemaDef(**output_type)
      
              # Apply extracted settings to agent config
              if tool_name:
                  data["result_tool_name"] = tool_name
              if tool_description:
                  data["result_tool_description"] = tool_description
              if retries is not None:
                  data["output_retries"] = retries
      
          return data
      

      AgentsManifest

      Bases: Schema

      Complete agent configuration manifest defining all available agents.

      This is the root configuration that: - Defines available response types (both inline and imported) - Configures all agent instances and their settings - Sets up custom role definitions and capabilities - Manages environment configurations

      A single manifest can define multiple agents that can work independently or collaborate through the orchestrator.

      Source code in src/llmling_agent/models/manifest.py
       41
       42
       43
       44
       45
       46
       47
       48
       49
       50
       51
       52
       53
       54
       55
       56
       57
       58
       59
       60
       61
       62
       63
       64
       65
       66
       67
       68
       69
       70
       71
       72
       73
       74
       75
       76
       77
       78
       79
       80
       81
       82
       83
       84
       85
       86
       87
       88
       89
       90
       91
       92
       93
       94
       95
       96
       97
       98
       99
      100
      101
      102
      103
      104
      105
      106
      107
      108
      109
      110
      111
      112
      113
      114
      115
      116
      117
      118
      119
      120
      121
      122
      123
      124
      125
      126
      127
      128
      129
      130
      131
      132
      133
      134
      135
      136
      137
      138
      139
      140
      141
      142
      143
      144
      145
      146
      147
      148
      149
      150
      151
      152
      153
      154
      155
      156
      157
      158
      159
      160
      161
      162
      163
      164
      165
      166
      167
      168
      169
      170
      171
      172
      173
      174
      175
      176
      177
      178
      179
      180
      181
      182
      183
      184
      185
      186
      187
      188
      189
      190
      191
      192
      193
      194
      195
      196
      197
      198
      199
      200
      201
      202
      203
      204
      205
      206
      207
      208
      209
      210
      211
      212
      213
      214
      215
      216
      217
      218
      219
      220
      221
      222
      223
      224
      225
      226
      227
      228
      229
      230
      231
      232
      233
      234
      235
      236
      237
      238
      239
      240
      241
      242
      243
      244
      245
      246
      247
      248
      249
      250
      251
      252
      253
      254
      255
      256
      257
      258
      259
      260
      261
      262
      263
      264
      265
      266
      267
      268
      269
      270
      271
      272
      273
      274
      275
      276
      277
      278
      279
      280
      281
      282
      283
      284
      285
      286
      287
      288
      289
      290
      291
      292
      293
      294
      295
      296
      297
      298
      299
      300
      301
      302
      303
      304
      305
      306
      307
      308
      309
      310
      311
      312
      313
      314
      315
      316
      317
      318
      319
      320
      321
      322
      323
      324
      325
      326
      327
      328
      329
      330
      331
      332
      333
      334
      335
      336
      337
      338
      339
      340
      341
      342
      343
      344
      345
      346
      347
      348
      349
      350
      351
      352
      353
      354
      355
      356
      357
      358
      359
      360
      361
      362
      363
      364
      365
      366
      367
      368
      369
      370
      371
      372
      373
      374
      375
      376
      377
      378
      379
      380
      381
      382
      383
      384
      385
      386
      387
      388
      389
      390
      391
      392
      393
      394
      395
      396
      397
      398
      399
      400
      401
      402
      403
      404
      405
      406
      407
      408
      409
      410
      411
      412
      413
      414
      415
      416
      417
      418
      419
      420
      421
      422
      423
      424
      425
      426
      427
      428
      429
      430
      431
      432
      433
      434
      435
      436
      437
      438
      439
      440
      441
      442
      443
      444
      445
      446
      447
      448
      449
      450
      451
      452
      453
      454
      455
      456
      457
      458
      459
      460
      461
      462
      463
      464
      465
      466
      467
      468
      469
      470
      471
      472
      473
      474
      475
      476
      477
      478
      479
      480
      481
      482
      483
      484
      485
      486
      487
      488
      489
      490
      491
      492
      493
      494
      495
      496
      497
      498
      499
      500
      501
      502
      503
      504
      505
      506
      507
      508
      509
      510
      511
      512
      513
      514
      515
      516
      517
      518
      519
      520
      521
      522
      523
      524
      525
      526
      527
      528
      529
      530
      531
      532
      533
      534
      535
      536
      537
      538
      539
      540
      541
      542
      543
      544
      545
      546
      547
      548
      549
      550
      551
      552
      553
      554
      555
      556
      557
      558
      559
      560
      561
      562
      563
      564
      565
      566
      567
      568
      569
      570
      571
      572
      573
      574
      575
      576
      577
      578
      579
      580
      581
      582
      class AgentsManifest(Schema):
          """Complete agent configuration manifest defining all available agents.
      
          This is the root configuration that:
          - Defines available response types (both inline and imported)
          - Configures all agent instances and their settings
          - Sets up custom role definitions and capabilities
          - Manages environment configurations
      
          A single manifest can define multiple agents that can work independently
          or collaborate through the orchestrator.
          """
      
          INHERIT: str | list[str] | None = None
          """Inheritance references."""
      
          resources: dict[str, ResourceConfig | str] = Field(
              default_factory=dict,
              examples=[
                  {"docs": "file://./docs", "data": "s3://bucket/data"},
                  {
                      "api": {
                          "type": "source",
                          "uri": "https://api.example.com",
                          "cached": True,
                      }
                  },
              ],
          )
          """Resource configurations defining available filesystems.
      
          Supports both full config and URI shorthand:
              resources:
                docs: "file://./docs"  # shorthand
                data:  # full config
                  type: "source"
                  uri: "s3://bucket/data"
                  cached: true
          """
      
          agents: dict[str, AgentConfig] = Field(
              default_factory=dict,
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/agent_configuration/"
              },
          )
          """Mapping of agent IDs to their configurations.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/agent_configuration/
          """
      
          teams: dict[str, TeamConfig] = Field(
              default_factory=dict,
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/team_configuration/"
              },
          )
          """Mapping of team IDs to their configurations.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/team_configuration/
          """
      
          storage: StorageConfig = Field(
              default_factory=StorageConfig,
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/storage_configuration/"
              },
          )
          """Storage provider configuration.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/storage_configuration/
          """
      
          observability: ObservabilityConfig = Field(default_factory=ObservabilityConfig)
          """Observability provider configuration."""
      
          conversion: ConversionConfig = Field(default_factory=ConversionConfig)
          """Document conversion configuration."""
      
          responses: dict[str, StructuredResponseConfig] = Field(
              default_factory=dict,
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/response_configuration/"
              },
          )
          """Mapping of response names to their definitions.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/response_configuration/
          """
      
          jobs: dict[str, Job[Any]] = Field(default_factory=dict)
          """Pre-defined jobs, ready to be used by nodes."""
      
          mcp_servers: list[str | MCPServerConfig] = Field(
              default_factory=list,
              examples=[
                  ["uvx some-server"],
                  [{"type": "streamable-http", "url": "http://mcp.example.com"}],
              ],
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/mcp_configuration/"
              },
          )
          """List of MCP server configurations:
      
          These MCP servers are used to provide tools and other resources to the nodes.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/mcp_configuration/
          """
          pool_server: MCPPoolServerConfig = Field(default_factory=MCPPoolServerConfig)
          """Pool server configuration.
      
          This MCP server configuration is used for the pool MCP server,
          which exposes pool functionality to other applications / clients."""
      
          prompts: PromptLibraryConfig = Field(
              default_factory=PromptLibraryConfig,
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/prompt_configuration/"
              },
          )
          """Prompt library configuration.
      
          This configuration defines the prompt library, which is used to provide prompts to the nodes.
      
          Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/prompt_configuration/
          """
      
          commands: dict[str, CommandConfig | str] = Field(
              default_factory=dict,
              examples=[
                  {"check_disk": "df -h", "analyze": "Analyze the current situation"},
                  {
                      "status": {
                          "type": "static",
                          "content": "Show system status",
                      }
                  },
              ],
          )
          """Global command shortcuts for prompt injection.
      
          Supports both shorthand string syntax and full command configurations:
              commands:
                df: "check disk space"  # shorthand -> StaticCommandConfig
                analyze:  # full config
                  type: file
                  path: "./prompts/analysis.md"
          """
      
          model_config = ConfigDict(
              json_schema_extra={
                  "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/manifest_configuration/"
              },
          )
      
          @model_validator(mode="before")
          @classmethod
          def normalize_workers(cls, data: dict[str, Any]) -> dict[str, Any]:
              """Convert string workers to appropriate WorkerConfig for all agents."""
              teams = data.get("teams", {})
              agents = data.get("agents", {})
      
              # Process workers for all agents that have them
              for agent_name, agent_config in agents.items():
                  if isinstance(agent_config, dict):
                      workers = agent_config.get("workers", [])
                  else:
                      workers = agent_config.workers
      
                  if workers:
                      normalized: list[BaseWorkerConfig] = []
      
                      for worker in workers:
                          match worker:
                              case str() as name if name in teams:
                                  # Determine type based on presence in teams/agents
                                  normalized.append(TeamWorkerConfig(name=name))
                              case str() as name if name in agents:
                                  normalized.append(AgentWorkerConfig(name=name))
                              case str():  # Default to agent if type can't be determined
                                  normalized.append(AgentWorkerConfig(name=name))
      
                              case dict() as config:
                                  # If type is explicitly specified, use it
                                  if worker_type := config.get("type"):
                                      match worker_type:
                                          case "team":
                                              normalized.append(TeamWorkerConfig(**config))
                                          case "agent":
                                              normalized.append(AgentWorkerConfig(**config))
                                          case _:
                                              msg = f"Invalid worker type: {worker_type}"
                                              raise ValueError(msg)
                                  else:
                                      # Determine type based on worker name
                                      worker_name = config.get("name")
                                      if not worker_name:
                                          msg = "Worker config missing name"
                                          raise ValueError(msg)
      
                                      if worker_name in teams:
                                          normalized.append(TeamWorkerConfig(**config))
                                      else:
                                          normalized.append(AgentWorkerConfig(**config))
      
                              case BaseWorkerConfig():  # Already normalized
                                  normalized.append(worker)
      
                              case _:
                                  msg = f"Invalid worker configuration: {worker}"
                                  raise ValueError(msg)
      
                      if isinstance(agent_config, dict):
                          agent_config["workers"] = normalized
                      else:
                          # Need to create a new dict with updated workers
                          agent_dict = agent_config.model_dump()
                          agent_dict["workers"] = normalized
                          agents[agent_name] = agent_dict
      
              return data
      
          @cached_property
          def vfs_registry(self) -> VFSRegistry:
              """Get registry with all configured VFS resources."""
              registry = VFSRegistry()
              for name, config in self.resources.items():
                  if isinstance(config, str):
                      # Convert URI shorthand to SourceResourceConfig
                      config = SourceResourceConfig(uri=config)
                  registry.register_from_config(name, config)
              return registry
      
          def clone_agent_config(
              self,
              name: str,
              new_name: str | None = None,
              *,
              template_context: dict[str, Any] | None = None,
              **overrides: Any,
          ) -> str:
              """Create a copy of an agent configuration.
      
              Args:
                  name: Name of agent to clone
                  new_name: Optional new name (auto-generated if None)
                  template_context: Variables for template rendering
                  **overrides: Configuration overrides for the clone
      
              Returns:
                  Name of the new agent
      
              Raises:
                  KeyError: If original agent not found
                  ValueError: If new name already exists or if overrides invalid
              """
              if name not in self.agents:
                  msg = f"Agent {name} not found"
                  raise KeyError(msg)
      
              actual_name = new_name or f"{name}_copy_{len(self.agents)}"
              if actual_name in self.agents:
                  msg = f"Agent {actual_name} already exists"
                  raise ValueError(msg)
      
              config = self.agents[name].model_copy(deep=True)
              for key, value in overrides.items():
                  if not hasattr(config, key):
                      msg = f"Invalid override: {key}"
                      raise ValueError(msg)
                  setattr(config, key, value)
      
              # Handle template rendering if context provided
              if template_context and "name" in template_context and "name" not in overrides:
                  config.model_copy(update={"name": template_context["name"]})
      
              # Note: system_prompts will be rendered during agent creation, not here
              # config.system_prompts remains as PromptConfig objects
              self.agents[actual_name] = config
              return actual_name
      
          @model_validator(mode="before")
          @classmethod
          def resolve_inheritance(cls, data: dict[str, Any]) -> dict[str, Any]:
              """Resolve agent inheritance chains."""
              nodes = data.get("agents", {})
              resolved: dict[str, dict[str, Any]] = {}
              seen: set[str] = set()
      
              def resolve_node(name: str) -> dict[str, Any]:
                  if name in resolved:
                      return resolved[name]
      
                  if name in seen:
                      msg = f"Circular inheritance detected: {name}"
                      raise ValueError(msg)
      
                  seen.add(name)
                  config = (
                      nodes[name].model_copy()
                      if hasattr(nodes[name], "model_copy")
                      else nodes[name].copy()
                  )
                  inherit = config.get("inherits") if isinstance(config, dict) else config.inherits
                  if inherit:
                      if inherit not in nodes:
                          msg = f"Parent agent {inherit} not found"
                          raise ValueError(msg)
      
                      # Get resolved parent config
                      parent = resolve_node(inherit)
                      # Merge parent with child (child overrides parent)
                      merged = parent.copy()
                      merged.update(config)
                      config = merged
      
                  seen.remove(name)
                  resolved[name] = config
                  return config  # type: ignore[no-any-return]
      
              # Resolve all nodes
              for name in nodes:
                  resolved[name] = resolve_node(name)
      
              # Update nodes with resolved configs
              data["agents"] = resolved
              return data
      
          @property
          def node_names(self) -> list[str]:
              """Get list of all agent and team names."""
              return list(self.agents.keys()) + list(self.teams.keys())
      
          @property
          def nodes(self) -> dict[str, Any]:
              """Get all agent and team configurations."""
              return {**self.agents, **self.teams}
      
          def get_mcp_servers(self) -> list[MCPServerConfig]:
              """Get processed MCP server configurations.
      
              Converts string entries to appropriate MCP server configs based on heuristics:
              - URLs ending with "/sse" -> SSE server
              - URLs starting with http(s):// -> HTTP server
              - Everything else -> stdio command
      
              Returns:
                  List of MCPServerConfig instances
      
              Raises:
                  ValueError: If string entry is empty
              """
              return [
                  BaseMCPServerConfig.from_string(cfg) if isinstance(cfg, str) else cfg
                  for cfg in self.mcp_servers
              ]
      
          def get_command_configs(self) -> dict[str, CommandConfig]:
              """Get processed command configurations.
      
              Converts string entries to StaticCommandConfig instances.
      
              Returns:
                  Dict mapping command names to CommandConfig instances
              """
              result: dict[str, CommandConfig] = {}
              for name, config in self.commands.items():
                  if isinstance(config, str):
                      result[name] = StaticCommandConfig(name=name, content=config)
                  else:
                      # Set name if not provided
                      if config.name is None:
                          config.name = name
                      result[name] = config
              return result
      
          @cached_property
          def prompt_manager(self) -> PromptManager:
              """Get prompt manager for this manifest."""
              from llmling_agent.prompts.manager import PromptManager
      
              return PromptManager(self.prompts)
      
          # @model_validator(mode="after")
          # def validate_response_types(self) -> AgentsManifest:
          #     """Ensure all agent output_types exist in responses or are inline."""
          #     for agent_id, agent in self.agents.items():
          #         if (
          #             isinstance(agent.output_type, str)
          #             and agent.output_type not in self.responses
          #         ):
          #             msg = f"'{agent.output_type=}' for '{agent_id=}' not found in responses"
          #             raise ValueError(msg)
          #     return self
      
          def get_agent[TAgentDeps](
              self,
              name: str,
              deps_type: type[TAgentDeps] | None = None,
              input_provider: InputProvider | None = None,
              pool: AgentPool[Any] | None = None,
              event_handlers: list[IndividualEventHandler] | None = None,
          ) -> Agent[TAgentDeps, Any]:
              from llmling_agent import Agent, AgentContext
      
              config = self.agents[name]
              context = AgentContext[TAgentDeps](
                  node_name=name,
                  definition=self,
                  config=config,
                  input_provider=input_provider,
                  pool=pool,
              )
      
              # Resolve system prompts with new PromptConfig types
              from llmling_agent_config.system_prompts import (
                  FilePromptConfig,
                  FunctionPromptConfig,
                  LibraryPromptConfig,
                  StaticPromptConfig,
              )
      
              sys_prompts: list[str] = []
              for prompt in config.system_prompts:
                  match prompt:
                      case (str() as sys_prompt) | StaticPromptConfig(content=sys_prompt):
                          sys_prompts.append(sys_prompt)
                      case FilePromptConfig(path=path, variables=variables):
                          template_path = Path(path)  # Load template from file
                          if not template_path.is_absolute() and config.config_file_path:
                              template_path = Path(config.config_file_path).parent / path
      
                          template_content = template_path.read_text("utf-8")
                          if variables:  # Apply variables if any
                              from jinja2 import Template
      
                              template = Template(template_content)
                              content = template.render(**variables)
                          else:
                              content = template_content
                          sys_prompts.append(content)
                      case LibraryPromptConfig(reference=reference):
                          try:  # Load from library
                              content = self.prompt_manager.get.sync(reference)
                              sys_prompts.append(content)
                          except Exception as e:
                              msg = f"Failed to load library prompt {reference!r} for agent {name}"
                              logger.exception(msg)
                              raise ValueError(msg) from e
                      case FunctionPromptConfig(function=function, arguments=arguments):
                          content = function(**arguments)  # Call function to get prompt content
                          sys_prompts.append(content)
              # Prepare toolsets list with config's tool provider
              toolsets_list = []
              if config_tool_provider := config.get_tool_provider():
                  toolsets_list.append(config_tool_provider)
      
              # Resolve output type using the same two-step process as before
              from llmling_agent.utils.result_utils import to_type
      
              # Step 1: Get agent-specific output type (same as before)
              agent_output_type = self.get_output_type(name) or str
              # Step 2: Resolve it fully with to_type (same as before)
              resolved_output_type = to_type(agent_output_type, self.responses)
      
              # Create agent with runtime and context
              return Agent(
                  context=context,
                  model=config.model
                  if isinstance(config.model, str) or config.model is None
                  else config.model.get_model(),
                  system_prompt=sys_prompts,
                  name=name,
                  deps_type=deps_type,
                  description=config.description,
                  retries=config.retries,
                  session=config.get_session_config(),
                  output_retries=config.output_retries,
                  end_strategy=config.end_strategy,
                  debug=config.debug,
                  input_provider=input_provider,
                  output_type=resolved_output_type,
                  event_handlers=event_handlers,
                  agent_pool=pool,
                  tool_mode=config.tool_mode,
                  knowledge=config.knowledge,
                  toolsets=toolsets_list,
              )
      
          @classmethod
          def from_file(cls, path: JoinablePathLike) -> Self:
              """Load agent configuration from YAML file.
      
              Args:
                  path: Path to the configuration file
      
              Returns:
                  Loaded agent definition
      
              Raises:
                  ValueError: If loading fails
              """
              import yamling
      
              try:
                  data = yamling.load_yaml_file(path, resolve_inherit=True)
                  agent_def = cls.model_validate(data)
                  # Update all agents with the config file path and ensure names
                  agents = {
                      name: config.model_copy(update={"config_file_path": str(path)})
                      for name, config in agent_def.agents.items()
                  }
                  return agent_def.model_copy(update={"agents": agents})
              except Exception as exc:
                  msg = f"Failed to load agent config from {path}"
                  raise ValueError(msg) from exc
      
          @cached_property
          def pool(self) -> AgentPool:
              """Create an agent pool from this manifest.
      
              Returns:
                  Configured agent pool
              """
              from llmling_agent import AgentPool
      
              return AgentPool(manifest=self)
      
          def get_output_type(self, agent_name: str) -> type[Any] | None:
              """Get the resolved result type for an agent.
      
              Returns None if no result type is configured.
              """
              agent_config = self.agents[agent_name]
              if not agent_config.output_type:
                  return None
              logger.debug("Building response model", type=agent_config.output_type)
              if isinstance(agent_config.output_type, str):
                  response_def = self.responses[agent_config.output_type]
                  return response_def.response_schema.get_schema()
              return agent_config.output_type.response_schema.get_schema()
      

      INHERIT class-attribute instance-attribute

      INHERIT: str | list[str] | None = None
      

      Inheritance references.

      agents class-attribute instance-attribute

      agents: dict[str, AgentConfig] = Field(
          default_factory=dict,
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/agent_configuration/"
          },
      )
      

      Mapping of agent IDs to their configurations.

      Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/agent_configuration/

      commands class-attribute instance-attribute

      commands: dict[str, CommandConfig | str] = Field(
          default_factory=dict,
          examples=[
              {"check_disk": "df -h", "analyze": "Analyze the current situation"},
              {"status": {"type": "static", "content": "Show system status"}},
          ],
      )
      

      Global command shortcuts for prompt injection.

      Supports both shorthand string syntax and full command configurations

      commands: df: "check disk space" # shorthand -> StaticCommandConfig analyze: # full config type: file path: "./prompts/analysis.md"

      conversion class-attribute instance-attribute

      conversion: ConversionConfig = Field(default_factory=ConversionConfig)
      

      Document conversion configuration.

      jobs class-attribute instance-attribute

      jobs: dict[str, Job[Any]] = Field(default_factory=dict)
      

      Pre-defined jobs, ready to be used by nodes.

      mcp_servers class-attribute instance-attribute

      mcp_servers: list[str | MCPServerConfig] = Field(
          default_factory=list,
          examples=[
              ["uvx some-server"],
              [{"type": "streamable-http", "url": "http://mcp.example.com"}],
          ],
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/mcp_configuration/"
          },
      )
      

      List of MCP server configurations:

      These MCP servers are used to provide tools and other resources to the nodes.

      Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/mcp_configuration/

      node_names property

      node_names: list[str]
      

      Get list of all agent and team names.

      nodes property

      nodes: dict[str, Any]
      

      Get all agent and team configurations.

      observability class-attribute instance-attribute

      observability: ObservabilityConfig = Field(default_factory=ObservabilityConfig)
      

      Observability provider configuration.

      pool cached property

      pool: AgentPool
      

      Create an agent pool from this manifest.

      Returns:

      Type Description
      AgentPool

      Configured agent pool

      pool_server class-attribute instance-attribute

      pool_server: MCPPoolServerConfig = Field(default_factory=MCPPoolServerConfig)
      

      Pool server configuration.

      This MCP server configuration is used for the pool MCP server, which exposes pool functionality to other applications / clients.

      prompt_manager cached property

      prompt_manager: PromptManager
      

      Get prompt manager for this manifest.

      prompts class-attribute instance-attribute

      prompts: PromptLibraryConfig = Field(
          default_factory=PromptLibraryConfig,
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/prompt_configuration/"
          },
      )
      

      Prompt library configuration.

      This configuration defines the prompt library, which is used to provide prompts to the nodes.

      Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/prompt_configuration/

      resources class-attribute instance-attribute

      resources: dict[str, ResourceConfig | str] = Field(
          default_factory=dict,
          examples=[
              {"docs": "file://./docs", "data": "s3://bucket/data"},
              {"api": {"type": "source", "uri": "https://api.example.com", "cached": True}},
          ],
      )
      

      Resource configurations defining available filesystems.

      Supports both full config and URI shorthand

      resources: docs: "file://./docs" # shorthand data: # full config type: "source" uri: "s3://bucket/data" cached: true

      responses class-attribute instance-attribute

      responses: dict[str, StructuredResponseConfig] = Field(
          default_factory=dict,
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/response_configuration/"
          },
      )
      

      Mapping of response names to their definitions.

      Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/response_configuration/

      storage class-attribute instance-attribute

      storage: StorageConfig = Field(
          default_factory=StorageConfig,
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/storage_configuration/"
          },
      )
      

      teams class-attribute instance-attribute

      teams: dict[str, TeamConfig] = Field(
          default_factory=dict,
          json_schema_extra={
              "documentation_url": "https://phil65.github.io/llmling-agent/YAML%20Configuration/team_configuration/"
          },
      )
      

      Mapping of team IDs to their configurations.

      Docs: https://phil65.github.io/llmling-agent/YAML%20Configuration/team_configuration/

      vfs_registry cached property

      vfs_registry: VFSRegistry
      

      Get registry with all configured VFS resources.

      clone_agent_config

      clone_agent_config(
          name: str,
          new_name: str | None = None,
          *,
          template_context: dict[str, Any] | None = None,
          **overrides: Any
      ) -> str
      

      Create a copy of an agent configuration.

      Parameters:

      Name Type Description Default
      name str

      Name of agent to clone

      required
      new_name str | None

      Optional new name (auto-generated if None)

      None
      template_context dict[str, Any] | None

      Variables for template rendering

      None
      **overrides Any

      Configuration overrides for the clone

      {}

      Returns:

      Type Description
      str

      Name of the new agent

      Raises:

      Type Description
      KeyError

      If original agent not found

      ValueError

      If new name already exists or if overrides invalid

      Source code in src/llmling_agent/models/manifest.py
      275
      276
      277
      278
      279
      280
      281
      282
      283
      284
      285
      286
      287
      288
      289
      290
      291
      292
      293
      294
      295
      296
      297
      298
      299
      300
      301
      302
      303
      304
      305
      306
      307
      308
      309
      310
      311
      312
      313
      314
      315
      316
      317
      318
      319
      320
      321
      def clone_agent_config(
          self,
          name: str,
          new_name: str | None = None,
          *,
          template_context: dict[str, Any] | None = None,
          **overrides: Any,
      ) -> str:
          """Create a copy of an agent configuration.
      
          Args:
              name: Name of agent to clone
              new_name: Optional new name (auto-generated if None)
              template_context: Variables for template rendering
              **overrides: Configuration overrides for the clone
      
          Returns:
              Name of the new agent
      
          Raises:
              KeyError: If original agent not found
              ValueError: If new name already exists or if overrides invalid
          """
          if name not in self.agents:
              msg = f"Agent {name} not found"
              raise KeyError(msg)
      
          actual_name = new_name or f"{name}_copy_{len(self.agents)}"
          if actual_name in self.agents:
              msg = f"Agent {actual_name} already exists"
              raise ValueError(msg)
      
          config = self.agents[name].model_copy(deep=True)
          for key, value in overrides.items():
              if not hasattr(config, key):
                  msg = f"Invalid override: {key}"
                  raise ValueError(msg)
              setattr(config, key, value)
      
          # Handle template rendering if context provided
          if template_context and "name" in template_context and "name" not in overrides:
              config.model_copy(update={"name": template_context["name"]})
      
          # Note: system_prompts will be rendered during agent creation, not here
          # config.system_prompts remains as PromptConfig objects
          self.agents[actual_name] = config
          return actual_name
      

      from_file classmethod

      from_file(path: JoinablePathLike) -> Self
      

      Load agent configuration from YAML file.

      Parameters:

      Name Type Description Default
      path JoinablePathLike

      Path to the configuration file

      required

      Returns:

      Type Description
      Self

      Loaded agent definition

      Raises:

      Type Description
      ValueError

      If loading fails

      Source code in src/llmling_agent/models/manifest.py
      531
      532
      533
      534
      535
      536
      537
      538
      539
      540
      541
      542
      543
      544
      545
      546
      547
      548
      549
      550
      551
      552
      553
      554
      555
      556
      557
      @classmethod
      def from_file(cls, path: JoinablePathLike) -> Self:
          """Load agent configuration from YAML file.
      
          Args:
              path: Path to the configuration file
      
          Returns:
              Loaded agent definition
      
          Raises:
              ValueError: If loading fails
          """
          import yamling
      
          try:
              data = yamling.load_yaml_file(path, resolve_inherit=True)
              agent_def = cls.model_validate(data)
              # Update all agents with the config file path and ensure names
              agents = {
                  name: config.model_copy(update={"config_file_path": str(path)})
                  for name, config in agent_def.agents.items()
              }
              return agent_def.model_copy(update={"agents": agents})
          except Exception as exc:
              msg = f"Failed to load agent config from {path}"
              raise ValueError(msg) from exc
      

      get_command_configs

      get_command_configs() -> dict[str, CommandConfig]
      

      Get processed command configurations.

      Converts string entries to StaticCommandConfig instances.

      Returns:

      Type Description
      dict[str, CommandConfig]

      Dict mapping command names to CommandConfig instances

      Source code in src/llmling_agent/models/manifest.py
      399
      400
      401
      402
      403
      404
      405
      406
      407
      408
      409
      410
      411
      412
      413
      414
      415
      416
      def get_command_configs(self) -> dict[str, CommandConfig]:
          """Get processed command configurations.
      
          Converts string entries to StaticCommandConfig instances.
      
          Returns:
              Dict mapping command names to CommandConfig instances
          """
          result: dict[str, CommandConfig] = {}
          for name, config in self.commands.items():
              if isinstance(config, str):
                  result[name] = StaticCommandConfig(name=name, content=config)
              else:
                  # Set name if not provided
                  if config.name is None:
                      config.name = name
                  result[name] = config
          return result
      

      get_mcp_servers

      get_mcp_servers() -> list[MCPServerConfig]
      

      Get processed MCP server configurations.

      Converts string entries to appropriate MCP server configs based on heuristics: - URLs ending with "/sse" -> SSE server - URLs starting with http(s):// -> HTTP server - Everything else -> stdio command

      Returns:

      Type Description
      list[MCPServerConfig]

      List of MCPServerConfig instances

      Raises:

      Type Description
      ValueError

      If string entry is empty

      Source code in src/llmling_agent/models/manifest.py
      380
      381
      382
      383
      384
      385
      386
      387
      388
      389
      390
      391
      392
      393
      394
      395
      396
      397
      def get_mcp_servers(self) -> list[MCPServerConfig]:
          """Get processed MCP server configurations.
      
          Converts string entries to appropriate MCP server configs based on heuristics:
          - URLs ending with "/sse" -> SSE server
          - URLs starting with http(s):// -> HTTP server
          - Everything else -> stdio command
      
          Returns:
              List of MCPServerConfig instances
      
          Raises:
              ValueError: If string entry is empty
          """
          return [
              BaseMCPServerConfig.from_string(cfg) if isinstance(cfg, str) else cfg
              for cfg in self.mcp_servers
          ]
      

      get_output_type

      get_output_type(agent_name: str) -> type[Any] | None
      

      Get the resolved result type for an agent.

      Returns None if no result type is configured.

      Source code in src/llmling_agent/models/manifest.py
      570
      571
      572
      573
      574
      575
      576
      577
      578
      579
      580
      581
      582
      def get_output_type(self, agent_name: str) -> type[Any] | None:
          """Get the resolved result type for an agent.
      
          Returns None if no result type is configured.
          """
          agent_config = self.agents[agent_name]
          if not agent_config.output_type:
              return None
          logger.debug("Building response model", type=agent_config.output_type)
          if isinstance(agent_config.output_type, str):
              response_def = self.responses[agent_config.output_type]
              return response_def.response_schema.get_schema()
          return agent_config.output_type.response_schema.get_schema()
      

      normalize_workers classmethod

      normalize_workers(data: dict[str, Any]) -> dict[str, Any]
      

      Convert string workers to appropriate WorkerConfig for all agents.

      Source code in src/llmling_agent/models/manifest.py
      197
      198
      199
      200
      201
      202
      203
      204
      205
      206
      207
      208
      209
      210
      211
      212
      213
      214
      215
      216
      217
      218
      219
      220
      221
      222
      223
      224
      225
      226
      227
      228
      229
      230
      231
      232
      233
      234
      235
      236
      237
      238
      239
      240
      241
      242
      243
      244
      245
      246
      247
      248
      249
      250
      251
      252
      253
      254
      255
      256
      257
      258
      259
      260
      261
      262
      @model_validator(mode="before")
      @classmethod
      def normalize_workers(cls, data: dict[str, Any]) -> dict[str, Any]:
          """Convert string workers to appropriate WorkerConfig for all agents."""
          teams = data.get("teams", {})
          agents = data.get("agents", {})
      
          # Process workers for all agents that have them
          for agent_name, agent_config in agents.items():
              if isinstance(agent_config, dict):
                  workers = agent_config.get("workers", [])
              else:
                  workers = agent_config.workers
      
              if workers:
                  normalized: list[BaseWorkerConfig] = []
      
                  for worker in workers:
                      match worker:
                          case str() as name if name in teams:
                              # Determine type based on presence in teams/agents
                              normalized.append(TeamWorkerConfig(name=name))
                          case str() as name if name in agents:
                              normalized.append(AgentWorkerConfig(name=name))
                          case str():  # Default to agent if type can't be determined
                              normalized.append(AgentWorkerConfig(name=name))
      
                          case dict() as config:
                              # If type is explicitly specified, use it
                              if worker_type := config.get("type"):
                                  match worker_type:
                                      case "team":
                                          normalized.append(TeamWorkerConfig(**config))
                                      case "agent":
                                          normalized.append(AgentWorkerConfig(**config))
                                      case _:
                                          msg = f"Invalid worker type: {worker_type}"
                                          raise ValueError(msg)
                              else:
                                  # Determine type based on worker name
                                  worker_name = config.get("name")
                                  if not worker_name:
                                      msg = "Worker config missing name"
                                      raise ValueError(msg)
      
                                  if worker_name in teams:
                                      normalized.append(TeamWorkerConfig(**config))
                                  else:
                                      normalized.append(AgentWorkerConfig(**config))
      
                          case BaseWorkerConfig():  # Already normalized
                              normalized.append(worker)
      
                          case _:
                              msg = f"Invalid worker configuration: {worker}"
                              raise ValueError(msg)
      
                  if isinstance(agent_config, dict):
                      agent_config["workers"] = normalized
                  else:
                      # Need to create a new dict with updated workers
                      agent_dict = agent_config.model_dump()
                      agent_dict["workers"] = normalized
                      agents[agent_name] = agent_dict
      
          return data
      

      resolve_inheritance classmethod

      resolve_inheritance(data: dict[str, Any]) -> dict[str, Any]
      

      Resolve agent inheritance chains.

      Source code in src/llmling_agent/models/manifest.py
      323
      324
      325
      326
      327
      328
      329
      330
      331
      332
      333
      334
      335
      336
      337
      338
      339
      340
      341
      342
      343
      344
      345
      346
      347
      348
      349
      350
      351
      352
      353
      354
      355
      356
      357
      358
      359
      360
      361
      362
      363
      364
      365
      366
      367
      368
      @model_validator(mode="before")
      @classmethod
      def resolve_inheritance(cls, data: dict[str, Any]) -> dict[str, Any]:
          """Resolve agent inheritance chains."""
          nodes = data.get("agents", {})
          resolved: dict[str, dict[str, Any]] = {}
          seen: set[str] = set()
      
          def resolve_node(name: str) -> dict[str, Any]:
              if name in resolved:
                  return resolved[name]
      
              if name in seen:
                  msg = f"Circular inheritance detected: {name}"
                  raise ValueError(msg)
      
              seen.add(name)
              config = (
                  nodes[name].model_copy()
                  if hasattr(nodes[name], "model_copy")
                  else nodes[name].copy()
              )
              inherit = config.get("inherits") if isinstance(config, dict) else config.inherits
              if inherit:
                  if inherit not in nodes:
                      msg = f"Parent agent {inherit} not found"
                      raise ValueError(msg)
      
                  # Get resolved parent config
                  parent = resolve_node(inherit)
                  # Merge parent with child (child overrides parent)
                  merged = parent.copy()
                  merged.update(config)
                  config = merged
      
              seen.remove(name)
              resolved[name] = config
              return config  # type: ignore[no-any-return]
      
          # Resolve all nodes
          for name in nodes:
              resolved[name] = resolve_node(name)
      
          # Update nodes with resolved configs
          data["agents"] = resolved
          return data