Skip to content

stdlib_provider

Class info

Classes

Name Children Inherits
InputProvider
llmling_agent.ui.base
Base class for handling all UI interactions.
StdlibInputProvider
llmling_agent.ui.stdlib_provider
Input provider using only Python stdlib functionality.
    ToolError
    llmling_agent.tools.exceptions
    Tool-related errors.

      🛈 DocStrings

      Stdlib input provider.

      StdlibInputProvider

      Bases: InputProvider

      Input provider using only Python stdlib functionality.

      Source code in src/llmling_agent/ui/stdlib_provider.py
       26
       27
       28
       29
       30
       31
       32
       33
       34
       35
       36
       37
       38
       39
       40
       41
       42
       43
       44
       45
       46
       47
       48
       49
       50
       51
       52
       53
       54
       55
       56
       57
       58
       59
       60
       61
       62
       63
       64
       65
       66
       67
       68
       69
       70
       71
       72
       73
       74
       75
       76
       77
       78
       79
       80
       81
       82
       83
       84
       85
       86
       87
       88
       89
       90
       91
       92
       93
       94
       95
       96
       97
       98
       99
      100
      101
      102
      103
      104
      105
      106
      107
      108
      109
      110
      111
      112
      113
      114
      115
      116
      117
      118
      119
      120
      121
      122
      123
      124
      125
      126
      127
      128
      class StdlibInputProvider(InputProvider):
          """Input provider using only Python stdlib functionality."""
      
          async def get_text_input(
              self,
              context: AgentContext,
              prompt: str,
              message_history: list[ChatMessage] | None = None,
          ) -> str:
              return input(f"{prompt}\n> ")
      
          async def get_structured_input(
              self,
              context: AgentContext,
              prompt: str,
              output_type: type[BaseModel],
              message_history: list[ChatMessage] | None = None,
          ) -> BaseModel:
              """Get structured input, with promptantic and fallback handling."""
              if result := await _get_promptantic_result(output_type):
                  return result
      
              # Fallback: Get raw input and validate
              prompt = f"{prompt}\n(Please provide response as {output_type.__name__})"
              raw_input = await self.get_input(context, prompt, message_history=message_history)
              try:
                  return output_type.model_validate_json(raw_input)
              except Exception as e:
                  msg = f"Invalid response format: {e}"
                  raise ToolError(msg) from e
      
          async def get_tool_confirmation(
              self,
              context: AgentContext,
              tool: Tool,
              args: dict[str, Any],
              message_history: list[ChatMessage] | None = None,
          ) -> ConfirmationResult:
              import anyenv
      
              agent_name = context.node_name
              prompt = dedent(f"""
                  Tool Execution Confirmation
                  -------------------------
                  Tool: {tool.name}
                  Description: {tool.description or "No description"}
                  Agent: {agent_name}
      
                  Arguments:
                  {anyenv.dump_json(args, indent=True)}
      
                  Options:
                  - y: allow execution
                  - n/skip: skip this tool
                  - abort: abort current run
                  - quit: abort entire chain
                  """).strip()
      
              response = input(f"{prompt}\nChoice [y/n/abort/quit]: ").lower()
              match response:
                  case "y" | "yes":
                      return "allow"
                  case "abort":
                      return "abort_run"
                  case "quit":
                      return "abort_chain"
                  case _:
                      return "skip"
      
          async def get_elicitation(
              self,
              context: AgentContext,
              params: types.ElicitRequestParams,
              message_history: list[ChatMessage] | None = None,
          ) -> types.ElicitResult | types.ErrorData:
              """Get user response to elicitation request using stdlib input."""
              try:
                  print(f"\n{params.message}")
      
                  # Handle structured input with schema
                  print("Please provide response as JSON:")
                  if params.requestedSchema:
                      import anyenv
      
                      schema_json = anyenv.dump_json(params.requestedSchema, indent=True)
                      print(f"Expected schema:\n{schema_json}")
                  response = input("> ")
                  try:
                      import anyenv
      
                      content = anyenv.load_json(response, return_type=dict)
                      return types.ElicitResult(action="accept", content=content)
                  except anyenv.JsonLoadError as e:
                      return types.ErrorData(
                          code=types.INVALID_REQUEST, message=f"Invalid JSON: {e}"
                      )
      
              except KeyboardInterrupt:
                  return types.ElicitResult(action="cancel")
              except Exception as e:  # noqa: BLE001
                  return types.ErrorData(
                      code=types.INVALID_REQUEST, message=f"Elicitation failed: {e}"
                  )
      

      get_elicitation async

      get_elicitation(
          context: AgentContext,
          params: ElicitRequestParams,
          message_history: list[ChatMessage] | None = None,
      ) -> ElicitResult | ErrorData
      

      Get user response to elicitation request using stdlib input.

      Source code in src/llmling_agent/ui/stdlib_provider.py
       95
       96
       97
       98
       99
      100
      101
      102
      103
      104
      105
      106
      107
      108
      109
      110
      111
      112
      113
      114
      115
      116
      117
      118
      119
      120
      121
      122
      123
      124
      125
      126
      127
      128
      async def get_elicitation(
          self,
          context: AgentContext,
          params: types.ElicitRequestParams,
          message_history: list[ChatMessage] | None = None,
      ) -> types.ElicitResult | types.ErrorData:
          """Get user response to elicitation request using stdlib input."""
          try:
              print(f"\n{params.message}")
      
              # Handle structured input with schema
              print("Please provide response as JSON:")
              if params.requestedSchema:
                  import anyenv
      
                  schema_json = anyenv.dump_json(params.requestedSchema, indent=True)
                  print(f"Expected schema:\n{schema_json}")
              response = input("> ")
              try:
                  import anyenv
      
                  content = anyenv.load_json(response, return_type=dict)
                  return types.ElicitResult(action="accept", content=content)
              except anyenv.JsonLoadError as e:
                  return types.ErrorData(
                      code=types.INVALID_REQUEST, message=f"Invalid JSON: {e}"
                  )
      
          except KeyboardInterrupt:
              return types.ElicitResult(action="cancel")
          except Exception as e:  # noqa: BLE001
              return types.ErrorData(
                  code=types.INVALID_REQUEST, message=f"Elicitation failed: {e}"
              )
      

      get_structured_input async

      get_structured_input(
          context: AgentContext,
          prompt: str,
          output_type: type[BaseModel],
          message_history: list[ChatMessage] | None = None,
      ) -> BaseModel
      

      Get structured input, with promptantic and fallback handling.

      Source code in src/llmling_agent/ui/stdlib_provider.py
      37
      38
      39
      40
      41
      42
      43
      44
      45
      46
      47
      48
      49
      50
      51
      52
      53
      54
      55
      async def get_structured_input(
          self,
          context: AgentContext,
          prompt: str,
          output_type: type[BaseModel],
          message_history: list[ChatMessage] | None = None,
      ) -> BaseModel:
          """Get structured input, with promptantic and fallback handling."""
          if result := await _get_promptantic_result(output_type):
              return result
      
          # Fallback: Get raw input and validate
          prompt = f"{prompt}\n(Please provide response as {output_type.__name__})"
          raw_input = await self.get_input(context, prompt, message_history=message_history)
          try:
              return output_type.model_validate_json(raw_input)
          except Exception as e:
              msg = f"Invalid response format: {e}"
              raise ToolError(msg) from e