Skip to content

messages

Class info

Classes

Name Children Inherits
AgentResponse
llmling_agent.messaging.messages
Result from an agent's execution.
    ChatMessage
    llmling_agent.messaging.messages
    Common message format for all UI types.
      TeamResponse
      llmling_agent.messaging.messages
      Results from a team execution.
        TokenCost
        llmling_agent.messaging.messages
        Combined token and cost tracking.

          🛈 DocStrings

          Message and token usage models.

          AgentResponse dataclass

          Result from an agent's execution.

          Source code in src/llmling_agent/messaging/messages.py
          419
          420
          421
          422
          423
          424
          425
          426
          427
          428
          429
          430
          431
          432
          433
          434
          435
          436
          437
          438
          439
          440
          441
          442
          443
          @dataclass
          class AgentResponse[TResult]:
              """Result from an agent's execution."""
          
              agent_name: str
              """Name of the agent that produced this result"""
          
              message: ChatMessage[TResult] | None
              """The actual message with content and metadata"""
          
              timing: float | None = None
              """Time taken by this agent in seconds"""
          
              error: str | None = None
              """Error message if agent failed"""
          
              @property
              def success(self) -> bool:
                  """Whether the agent completed successfully."""
                  return self.error is None
          
              @property
              def response(self) -> TResult | None:
                  """Convenient access to message content."""
                  return self.message.content if self.message else None
          

          agent_name instance-attribute

          agent_name: str
          

          Name of the agent that produced this result

          error class-attribute instance-attribute

          error: str | None = None
          

          Error message if agent failed

          message instance-attribute

          message: ChatMessage[TResult] | None
          

          The actual message with content and metadata

          response property

          response: TResult | None
          

          Convenient access to message content.

          success property

          success: bool
          

          Whether the agent completed successfully.

          timing class-attribute instance-attribute

          timing: float | None = None
          

          Time taken by this agent in seconds

          ChatMessage dataclass

          Common message format for all UI types.

          Generically typed with: ChatMessage[Type of Content] The type can either be str or a BaseModel subclass.

          Source code in src/llmling_agent/messaging/messages.py
          151
          152
          153
          154
          155
          156
          157
          158
          159
          160
          161
          162
          163
          164
          165
          166
          167
          168
          169
          170
          171
          172
          173
          174
          175
          176
          177
          178
          179
          180
          181
          182
          183
          184
          185
          186
          187
          188
          189
          190
          191
          192
          193
          194
          195
          196
          197
          198
          199
          200
          201
          202
          203
          204
          205
          206
          207
          208
          209
          210
          211
          212
          213
          214
          215
          216
          217
          218
          219
          220
          221
          222
          223
          224
          225
          226
          227
          228
          229
          230
          231
          232
          233
          234
          235
          236
          237
          238
          239
          240
          241
          242
          243
          244
          245
          246
          247
          248
          249
          250
          251
          252
          253
          254
          255
          256
          257
          258
          259
          260
          261
          262
          263
          264
          265
          266
          267
          268
          269
          270
          271
          272
          273
          274
          275
          276
          277
          278
          279
          280
          281
          282
          283
          284
          285
          286
          287
          288
          289
          290
          291
          292
          293
          294
          295
          296
          297
          298
          299
          300
          301
          302
          303
          304
          305
          306
          307
          308
          309
          310
          311
          312
          313
          314
          315
          316
          317
          318
          319
          320
          321
          322
          323
          324
          325
          326
          327
          328
          329
          330
          331
          332
          333
          334
          335
          336
          337
          338
          339
          340
          341
          342
          343
          344
          345
          346
          347
          348
          349
          350
          351
          352
          353
          354
          355
          356
          357
          358
          359
          360
          361
          362
          363
          364
          365
          366
          367
          368
          369
          370
          371
          372
          373
          374
          375
          376
          377
          378
          379
          380
          381
          382
          383
          384
          385
          386
          387
          388
          389
          390
          391
          392
          393
          394
          395
          396
          397
          398
          399
          400
          401
          402
          403
          404
          405
          406
          407
          408
          409
          410
          411
          412
          413
          414
          415
          416
          @dataclass
          class ChatMessage[TContent]:
              """Common message format for all UI types.
          
              Generically typed with: ChatMessage[Type of Content]
              The type can either be str or a BaseModel subclass.
              """
          
              content: TContent
              """Message content, typed as TContent (either str or BaseModel)."""
          
              role: MessageRole
              """Role of the message sender (user/assistant)."""
          
              metadata: SimpleJsonType = field(default_factory=dict)
              """Additional metadata about the message."""
          
              timestamp: datetime = field(default_factory=get_now)
              """When this message was created."""
          
              cost_info: TokenCost | None = None
              """Token usage and costs for this specific message if available."""
          
              message_id: str = field(default_factory=lambda: str(uuid4()))
              """Unique identifier for this message."""
          
              conversation_id: str | None = None
              """ID of the conversation this message belongs to."""
          
              response_time: float | None = None
              """Time it took the LLM to respond."""
          
              tool_calls: list[ToolCallInfo] = field(default_factory=list)
              """List of tool calls made during message generation."""
          
              associated_messages: list[ChatMessage[Any]] = field(default_factory=list)
              """List of messages which were generated during the the creation of this messsage."""
          
              name: str | None = None
              """Display name for the message sender in UI."""
          
              forwarded_from: list[str] = field(default_factory=list)
              """List of agent names (the chain) that forwarded this message to the sender."""
          
              provider_details: dict[str, Any] = field(default_factory=dict)
              """Provider specific metadata / extra information."""
          
              parts: Sequence[ModelResponsePart | ModelRequestPart] = field(default_factory=list)
              """The parts of the model message."""
          
              usage: RequestUsage = field(default_factory=RequestUsage)
              """Usage information for the request.
          
              This has a default to make tests easier,
              and to support loading old messages where usage will be missing.
              """
          
              model_name: str | None = None
              """The name of the model that generated the response."""
          
              provider_name: str | None = None
              """The name of the LLM provider that generated the response."""
          
              provider_response_id: str | None = None
              """request ID as specified by the model provider.
          
              This can be used to track the specific request to the model."""
          
              finish_reason: FinishReason | None = None
              """Reason the model finished generating the response.
          
              Normalized to OpenTelemetry values."""
          
              @property
              def kind(self) -> Literal["request", "response"]:
                  """Role of the message."""
                  match self.role:
                      case "assistant":
                          return "response"
                      case "user":
                          return "request"
          
              def to_pydantic_ai(self) -> ModelRequest | ModelResponse:
                  """Convert this message to a Pydantic model."""
                  match self.kind:
                      case "request":
                          return ModelRequest(parts=self.parts, instructions=None)  # type: ignore
                      case "response":
                          return ModelResponse(
                              parts=self.parts,  # type: ignore
                              usage=self.usage,
                              model_name=self.model_name,
                              timestamp=self.timestamp,
                              provider_name=self.provider_name,
                              provider_details=self.provider_details,
                              finish_reason=self.finish_reason,
                              provider_response_id=self.provider_response_id,
                          )
          
              @classmethod
              def from_pydantic_ai[TContentType](
                  cls,
                  content: TContentType,
                  message: ModelRequest | ModelResponse,
                  conversation_id: str | None = None,
                  name: str | None = None,
                  message_id: str | None = None,
                  forwarded_from: list[str] | None = None,
              ) -> ChatMessage[TContentType]:
                  """Convert a Pydantic model to a ChatMessage."""
                  match message:
                      case ModelRequest(parts=parts, instructions=_instructions):
                          return ChatMessage(
                              parts=parts,
                              content=content,
                              role="user" if message.kind == "request" else "assistant",
                              message_id=message_id or str(uuid.uuid4()),
                              # instructions=instructions,
                              forwarded_from=forwarded_from or [],
                              name=name,
                          )
                      case ModelResponse(
                          parts=parts,
                          usage=usage,
                          model_name=model_name,
                          timestamp=timestamp,
                          provider_name=provider_name,
                          provider_details=provider_details,
                          finish_reason=finish_reason,
                          provider_response_id=provider_response_id,
                      ):
                          return ChatMessage(
                              role="user" if message.kind == "request" else "assistant",
                              content=content,
                              parts=parts,
                              usage=usage,
                              message_id=message_id or str(uuid.uuid4()),
                              conversation_id=conversation_id,
                              model_name=model_name,
                              timestamp=timestamp,
                              provider_name=provider_name,
                              provider_details=provider_details or {},
                              finish_reason=finish_reason,
                              provider_response_id=provider_response_id,
                              name=name,
                              forwarded_from=forwarded_from or [],
                          )
                      case _:
                          msg = f"Unknown message kind: {message.kind}"
                          raise ValueError(msg)
          
              def forwarded(self, previous_message: ChatMessage[Any]) -> Self:
                  """Create new message showing it was forwarded from another message.
          
                  Args:
                      previous_message: The message that led to this one's creation
          
                  Returns:
                      New message with updated chain showing the path through previous message
                  """
                  from_ = [*previous_message.forwarded_from, previous_message.name or "unknown"]
                  return replace(self, forwarded_from=from_)
          
              def to_text_message(self) -> ChatMessage[str]:
                  """Convert this message to a text-only version."""
                  return dataclasses.replace(self, content=str(self.content))  # type: ignore
          
              def to_request(self) -> Self:
                  """Convert this message to a request message.
          
                  If the message is already a request (user role), this is a no-op.
                  If it's a response (assistant role), converts response parts to user content.
          
                  Returns:
                      New ChatMessage with role='user' and converted parts
                  """
                  if self.role == "user":
                      # Already a request, return as-is
                      return self
          
                  # Convert response parts to user content
                  converted_parts: list[Any] = []
                  user_content: list[UserContent] = []
          
                  for part in self.parts:
                      match part:
                          case TextPart(content=text_content):
                              # Text parts become user content strings
                              user_content.append(text_content)
                          case FilePart(content=binary_content):
                              # File parts (images, etc.) become user content directly
                              user_content.append(binary_content)
                          case _:
                              # Other parts (tool calls, etc.) are kept as-is for now
                              # Could be extended to handle more conversion cases
                              pass
          
                  # Create new UserPromptPart with converted content
                  if user_content:
                      if len(user_content) == 1 and isinstance(user_content[0], str):
                          # Single string content
                          converted_parts = [UserPromptPart(content=user_content[0])]
                      else:
                          # Multi-modal content
                          converted_parts = [UserPromptPart(content=user_content)]
                  else:
                      # Fallback to text representation if no convertible parts
                      converted_parts = [UserPromptPart(content=str(self.content))]
          
                  return replace(self, role="user", parts=converted_parts, cost_info=None)
          
              @property
              def data(self) -> TContent:
                  """Get content as typed data. Provides compat to AgentRunResult."""
                  return self.content
          
              def format(
                  self,
                  style: FormatStyle = "simple",
                  *,
                  template: str | None = None,
                  variables: dict[str, Any] | None = None,
                  show_metadata: bool = False,
                  show_costs: bool = False,
              ) -> str:
                  """Format message with configurable style.
          
                  Args:
                      style: Predefined style or "custom" for custom template
                      template: Custom Jinja template (required if style="custom")
                      variables: Additional variables for template rendering
                      show_metadata: Whether to include metadata
                      show_costs: Whether to include cost information
          
                  Raises:
                      ValueError: If style is "custom" but no template provided
                              or if style is invalid
                  """
                  from jinjarope import Environment
                  import yamling
          
                  env = Environment(trim_blocks=True, lstrip_blocks=True)
                  env.filters["to_yaml"] = yamling.dump_yaml
          
                  match style:
                      case "custom":
                          if not template:
                              msg = "Custom style requires a template"
                              raise ValueError(msg)
                          template_str = template
                      case _ if style in MESSAGE_TEMPLATES:
                          template_str = MESSAGE_TEMPLATES[style]
                      case _:
                          msg = f"Invalid style: {style}"
                          raise ValueError(msg)
                  template_obj = env.from_string(template_str)
                  vars_ = {
                      **(self.__dict__),
                      "show_metadata": show_metadata,
                      "show_costs": show_costs,
                  }
                  print(vars_)
                  if variables:
                      vars_.update(variables)
          
                  return template_obj.render(**vars_)
          

          associated_messages class-attribute instance-attribute

          associated_messages: list[ChatMessage[Any]] = field(default_factory=list)
          

          List of messages which were generated during the the creation of this messsage.

          content instance-attribute

          content: TContent
          

          Message content, typed as TContent (either str or BaseModel).

          conversation_id class-attribute instance-attribute

          conversation_id: str | None = None
          

          ID of the conversation this message belongs to.

          cost_info class-attribute instance-attribute

          cost_info: TokenCost | None = None
          

          Token usage and costs for this specific message if available.

          data property

          data: TContent
          

          Get content as typed data. Provides compat to AgentRunResult.

          finish_reason class-attribute instance-attribute

          finish_reason: FinishReason | None = None
          

          Reason the model finished generating the response.

          Normalized to OpenTelemetry values.

          forwarded_from class-attribute instance-attribute

          forwarded_from: list[str] = field(default_factory=list)
          

          List of agent names (the chain) that forwarded this message to the sender.

          kind property

          kind: Literal['request', 'response']
          

          Role of the message.

          message_id class-attribute instance-attribute

          message_id: str = field(default_factory=lambda: str(uuid4()))
          

          Unique identifier for this message.

          metadata class-attribute instance-attribute

          metadata: SimpleJsonType = field(default_factory=dict)
          

          Additional metadata about the message.

          model_name class-attribute instance-attribute

          model_name: str | None = None
          

          The name of the model that generated the response.

          name class-attribute instance-attribute

          name: str | None = None
          

          Display name for the message sender in UI.

          parts class-attribute instance-attribute

          parts: Sequence[ModelResponsePart | ModelRequestPart] = field(default_factory=list)
          

          The parts of the model message.

          provider_details class-attribute instance-attribute

          provider_details: dict[str, Any] = field(default_factory=dict)
          

          Provider specific metadata / extra information.

          provider_name class-attribute instance-attribute

          provider_name: str | None = None
          

          The name of the LLM provider that generated the response.

          provider_response_id class-attribute instance-attribute

          provider_response_id: str | None = None
          

          request ID as specified by the model provider.

          This can be used to track the specific request to the model.

          response_time class-attribute instance-attribute

          response_time: float | None = None
          

          Time it took the LLM to respond.

          role instance-attribute

          role: MessageRole
          

          Role of the message sender (user/assistant).

          timestamp class-attribute instance-attribute

          timestamp: datetime = field(default_factory=get_now)
          

          When this message was created.

          tool_calls class-attribute instance-attribute

          tool_calls: list[ToolCallInfo] = field(default_factory=list)
          

          List of tool calls made during message generation.

          usage class-attribute instance-attribute

          usage: RequestUsage = field(default_factory=RequestUsage)
          

          Usage information for the request.

          This has a default to make tests easier, and to support loading old messages where usage will be missing.

          format

          format(
              style: FormatStyle = "simple",
              *,
              template: str | None = None,
              variables: dict[str, Any] | None = None,
              show_metadata: bool = False,
              show_costs: bool = False,
          ) -> str
          

          Format message with configurable style.

          Parameters:

          Name Type Description Default
          style FormatStyle

          Predefined style or "custom" for custom template

          'simple'
          template str | None

          Custom Jinja template (required if style="custom")

          None
          variables dict[str, Any] | None

          Additional variables for template rendering

          None
          show_metadata bool

          Whether to include metadata

          False
          show_costs bool

          Whether to include cost information

          False

          Raises:

          Type Description
          ValueError

          If style is "custom" but no template provided or if style is invalid

          Source code in src/llmling_agent/messaging/messages.py
          367
          368
          369
          370
          371
          372
          373
          374
          375
          376
          377
          378
          379
          380
          381
          382
          383
          384
          385
          386
          387
          388
          389
          390
          391
          392
          393
          394
          395
          396
          397
          398
          399
          400
          401
          402
          403
          404
          405
          406
          407
          408
          409
          410
          411
          412
          413
          414
          415
          416
          def format(
              self,
              style: FormatStyle = "simple",
              *,
              template: str | None = None,
              variables: dict[str, Any] | None = None,
              show_metadata: bool = False,
              show_costs: bool = False,
          ) -> str:
              """Format message with configurable style.
          
              Args:
                  style: Predefined style or "custom" for custom template
                  template: Custom Jinja template (required if style="custom")
                  variables: Additional variables for template rendering
                  show_metadata: Whether to include metadata
                  show_costs: Whether to include cost information
          
              Raises:
                  ValueError: If style is "custom" but no template provided
                          or if style is invalid
              """
              from jinjarope import Environment
              import yamling
          
              env = Environment(trim_blocks=True, lstrip_blocks=True)
              env.filters["to_yaml"] = yamling.dump_yaml
          
              match style:
                  case "custom":
                      if not template:
                          msg = "Custom style requires a template"
                          raise ValueError(msg)
                      template_str = template
                  case _ if style in MESSAGE_TEMPLATES:
                      template_str = MESSAGE_TEMPLATES[style]
                  case _:
                      msg = f"Invalid style: {style}"
                      raise ValueError(msg)
              template_obj = env.from_string(template_str)
              vars_ = {
                  **(self.__dict__),
                  "show_metadata": show_metadata,
                  "show_costs": show_costs,
              }
              print(vars_)
              if variables:
                  vars_.update(variables)
          
              return template_obj.render(**vars_)
          

          forwarded

          forwarded(previous_message: ChatMessage[Any]) -> Self
          

          Create new message showing it was forwarded from another message.

          Parameters:

          Name Type Description Default
          previous_message ChatMessage[Any]

          The message that led to this one's creation

          required

          Returns:

          Type Description
          Self

          New message with updated chain showing the path through previous message

          Source code in src/llmling_agent/messaging/messages.py
          302
          303
          304
          305
          306
          307
          308
          309
          310
          311
          312
          def forwarded(self, previous_message: ChatMessage[Any]) -> Self:
              """Create new message showing it was forwarded from another message.
          
              Args:
                  previous_message: The message that led to this one's creation
          
              Returns:
                  New message with updated chain showing the path through previous message
              """
              from_ = [*previous_message.forwarded_from, previous_message.name or "unknown"]
              return replace(self, forwarded_from=from_)
          

          from_pydantic_ai classmethod

          from_pydantic_ai(
              content: TContentType,
              message: ModelRequest | ModelResponse,
              conversation_id: str | None = None,
              name: str | None = None,
              message_id: str | None = None,
              forwarded_from: list[str] | None = None,
          ) -> ChatMessage[TContentType]
          

          Convert a Pydantic model to a ChatMessage.

          Source code in src/llmling_agent/messaging/messages.py
          250
          251
          252
          253
          254
          255
          256
          257
          258
          259
          260
          261
          262
          263
          264
          265
          266
          267
          268
          269
          270
          271
          272
          273
          274
          275
          276
          277
          278
          279
          280
          281
          282
          283
          284
          285
          286
          287
          288
          289
          290
          291
          292
          293
          294
          295
          296
          297
          298
          299
          300
          @classmethod
          def from_pydantic_ai[TContentType](
              cls,
              content: TContentType,
              message: ModelRequest | ModelResponse,
              conversation_id: str | None = None,
              name: str | None = None,
              message_id: str | None = None,
              forwarded_from: list[str] | None = None,
          ) -> ChatMessage[TContentType]:
              """Convert a Pydantic model to a ChatMessage."""
              match message:
                  case ModelRequest(parts=parts, instructions=_instructions):
                      return ChatMessage(
                          parts=parts,
                          content=content,
                          role="user" if message.kind == "request" else "assistant",
                          message_id=message_id or str(uuid.uuid4()),
                          # instructions=instructions,
                          forwarded_from=forwarded_from or [],
                          name=name,
                      )
                  case ModelResponse(
                      parts=parts,
                      usage=usage,
                      model_name=model_name,
                      timestamp=timestamp,
                      provider_name=provider_name,
                      provider_details=provider_details,
                      finish_reason=finish_reason,
                      provider_response_id=provider_response_id,
                  ):
                      return ChatMessage(
                          role="user" if message.kind == "request" else "assistant",
                          content=content,
                          parts=parts,
                          usage=usage,
                          message_id=message_id or str(uuid.uuid4()),
                          conversation_id=conversation_id,
                          model_name=model_name,
                          timestamp=timestamp,
                          provider_name=provider_name,
                          provider_details=provider_details or {},
                          finish_reason=finish_reason,
                          provider_response_id=provider_response_id,
                          name=name,
                          forwarded_from=forwarded_from or [],
                      )
                  case _:
                      msg = f"Unknown message kind: {message.kind}"
                      raise ValueError(msg)
          

          to_pydantic_ai

          to_pydantic_ai() -> ModelRequest | ModelResponse
          

          Convert this message to a Pydantic model.

          Source code in src/llmling_agent/messaging/messages.py
          233
          234
          235
          236
          237
          238
          239
          240
          241
          242
          243
          244
          245
          246
          247
          248
          def to_pydantic_ai(self) -> ModelRequest | ModelResponse:
              """Convert this message to a Pydantic model."""
              match self.kind:
                  case "request":
                      return ModelRequest(parts=self.parts, instructions=None)  # type: ignore
                  case "response":
                      return ModelResponse(
                          parts=self.parts,  # type: ignore
                          usage=self.usage,
                          model_name=self.model_name,
                          timestamp=self.timestamp,
                          provider_name=self.provider_name,
                          provider_details=self.provider_details,
                          finish_reason=self.finish_reason,
                          provider_response_id=self.provider_response_id,
                      )
          

          to_request

          to_request() -> Self
          

          Convert this message to a request message.

          If the message is already a request (user role), this is a no-op. If it's a response (assistant role), converts response parts to user content.

          Returns:

          Type Description
          Self

          New ChatMessage with role='user' and converted parts

          Source code in src/llmling_agent/messaging/messages.py
          318
          319
          320
          321
          322
          323
          324
          325
          326
          327
          328
          329
          330
          331
          332
          333
          334
          335
          336
          337
          338
          339
          340
          341
          342
          343
          344
          345
          346
          347
          348
          349
          350
          351
          352
          353
          354
          355
          356
          357
          358
          359
          360
          def to_request(self) -> Self:
              """Convert this message to a request message.
          
              If the message is already a request (user role), this is a no-op.
              If it's a response (assistant role), converts response parts to user content.
          
              Returns:
                  New ChatMessage with role='user' and converted parts
              """
              if self.role == "user":
                  # Already a request, return as-is
                  return self
          
              # Convert response parts to user content
              converted_parts: list[Any] = []
              user_content: list[UserContent] = []
          
              for part in self.parts:
                  match part:
                      case TextPart(content=text_content):
                          # Text parts become user content strings
                          user_content.append(text_content)
                      case FilePart(content=binary_content):
                          # File parts (images, etc.) become user content directly
                          user_content.append(binary_content)
                      case _:
                          # Other parts (tool calls, etc.) are kept as-is for now
                          # Could be extended to handle more conversion cases
                          pass
          
              # Create new UserPromptPart with converted content
              if user_content:
                  if len(user_content) == 1 and isinstance(user_content[0], str):
                      # Single string content
                      converted_parts = [UserPromptPart(content=user_content[0])]
                  else:
                      # Multi-modal content
                      converted_parts = [UserPromptPart(content=user_content)]
              else:
                  # Fallback to text representation if no convertible parts
                  converted_parts = [UserPromptPart(content=str(self.content))]
          
              return replace(self, role="user", parts=converted_parts, cost_info=None)
          

          to_text_message

          to_text_message() -> ChatMessage[str]
          

          Convert this message to a text-only version.

          Source code in src/llmling_agent/messaging/messages.py
          314
          315
          316
          def to_text_message(self) -> ChatMessage[str]:
              """Convert this message to a text-only version."""
              return dataclasses.replace(self, content=str(self.content))  # type: ignore
          

          TeamResponse

          Bases: list[AgentResponse[Any]]

          Results from a team execution.

          Source code in src/llmling_agent/messaging/messages.py
          446
          447
          448
          449
          450
          451
          452
          453
          454
          455
          456
          457
          458
          459
          460
          461
          462
          463
          464
          465
          466
          467
          468
          469
          470
          471
          472
          473
          474
          475
          476
          477
          478
          479
          480
          481
          482
          483
          484
          485
          486
          487
          488
          489
          490
          491
          492
          493
          494
          495
          496
          497
          498
          499
          class TeamResponse[TMessageContent](list[AgentResponse[Any]]):
              """Results from a team execution."""
          
              def __init__(
                  self,
                  responses: list[AgentResponse[TMessageContent]],
                  start_time: datetime | None = None,
                  errors: dict[str, Exception] | None = None,
              ):
                  super().__init__(responses)
                  self.start_time = start_time or get_now()
                  self.end_time = get_now()
                  self.errors = errors or {}
          
              @property
              def duration(self) -> float:
                  """Get execution duration in seconds."""
                  return (self.end_time - self.start_time).total_seconds()
          
              @property
              def success(self) -> bool:
                  """Whether all agents completed successfully."""
                  return not bool(self.errors)
          
              @property
              def failed_agents(self) -> list[str]:
                  """Names of agents that failed."""
                  return list(self.errors.keys())
          
              def by_agent(self, name: str) -> AgentResponse[TMessageContent] | None:
                  """Get response from specific agent."""
                  return next((r for r in self if r.agent_name == name), None)
          
              def format_durations(self) -> str:
                  """Format execution times."""
                  parts = [f"{r.agent_name}: {r.timing:.2f}s" for r in self if r.timing is not None]
                  return f"Individual times: {', '.join(parts)}\nTotal time: {self.duration:.2f}s"
          
              # TODO: could keep TResultContent for len(messages) == 1
              def to_chat_message(self) -> ChatMessage[str]:
                  """Convert team response to a single chat message."""
                  # Combine all responses into one structured message
                  content = "\n\n".join(
                      f"[{response.agent_name}]: {response.message.content}"
                      for response in self
                      if response.message
                  )
                  meta = {
                      "type": "team_response",
                      "agents": [r.agent_name for r in self],
                      "duration": self.duration,
                      "success_count": len(self),
                  }
                  return ChatMessage(content=content, role="assistant", metadata=meta)  # type: ignore
          

          duration property

          duration: float
          

          Get execution duration in seconds.

          failed_agents property

          failed_agents: list[str]
          

          Names of agents that failed.

          success property

          success: bool
          

          Whether all agents completed successfully.

          by_agent

          by_agent(name: str) -> AgentResponse[TMessageContent] | None
          

          Get response from specific agent.

          Source code in src/llmling_agent/messaging/messages.py
          475
          476
          477
          def by_agent(self, name: str) -> AgentResponse[TMessageContent] | None:
              """Get response from specific agent."""
              return next((r for r in self if r.agent_name == name), None)
          

          format_durations

          format_durations() -> str
          

          Format execution times.

          Source code in src/llmling_agent/messaging/messages.py
          479
          480
          481
          482
          def format_durations(self) -> str:
              """Format execution times."""
              parts = [f"{r.agent_name}: {r.timing:.2f}s" for r in self if r.timing is not None]
              return f"Individual times: {', '.join(parts)}\nTotal time: {self.duration:.2f}s"
          

          to_chat_message

          to_chat_message() -> ChatMessage[str]
          

          Convert team response to a single chat message.

          Source code in src/llmling_agent/messaging/messages.py
          485
          486
          487
          488
          489
          490
          491
          492
          493
          494
          495
          496
          497
          498
          499
          def to_chat_message(self) -> ChatMessage[str]:
              """Convert team response to a single chat message."""
              # Combine all responses into one structured message
              content = "\n\n".join(
                  f"[{response.agent_name}]: {response.message.content}"
                  for response in self
                  if response.message
              )
              meta = {
                  "type": "team_response",
                  "agents": [r.agent_name for r in self],
                  "duration": self.duration,
                  "success_count": len(self),
              }
              return ChatMessage(content=content, role="assistant", metadata=meta)  # type: ignore
          

          TokenCost dataclass

          Combined token and cost tracking.

          Source code in src/llmling_agent/messaging/messages.py
          100
          101
          102
          103
          104
          105
          106
          107
          108
          109
          110
          111
          112
          113
          114
          115
          116
          117
          118
          119
          120
          121
          122
          123
          124
          125
          126
          127
          128
          129
          130
          131
          132
          133
          134
          135
          136
          137
          138
          139
          140
          141
          142
          143
          144
          145
          146
          147
          148
          @dataclass(frozen=True)
          class TokenCost:
              """Combined token and cost tracking."""
          
              token_usage: RunUsage
              """Token counts for prompt and completion"""
              total_cost: Decimal
              """Total cost in USD"""
          
              @classmethod
              async def from_usage(cls, usage: RunUsage | None, model: str) -> TokenCost | None:
                  """Create result from usage data.
          
                  Args:
                      usage: Token counts from model response
                      model: Name of the model used
          
          
                  Returns:
                      TokenCost if usage data available, None otherwise
                  """
                  if not (
                      usage and usage.input_tokens is not None and usage.output_tokens is not None
                  ):
                      logger.debug("Missing token counts in Usage object")
                      return None
                  logger.debug("Token usage", usage=usage)
          
                  # return cls(token_usage=token_usage, total_cost=Decimal(total_cost))
                  if model in {"None", "test"}:
                      price = Decimal(0)
                  else:
                      parts = model.split(":", 1)
                      try:
                          price_data = calc_price(
                              usage,
                              model_ref=parts[1] if len(parts) > 1 else parts[0],
                              provider_id=parts[0] if len(parts) > 1 else "openai",
                          )
                          price = price_data.total_price
                      except Exception:  # noqa: BLE001
                          cost = await tokonomics.calculate_token_cost(
                              model,
                              usage.input_tokens,
                              usage.output_tokens,
                          )
                          price = Decimal(cost.total_cost if cost else 0)
          
                  return cls(token_usage=usage, total_cost=price)
          

          token_usage instance-attribute

          token_usage: RunUsage
          

          Token counts for prompt and completion

          total_cost instance-attribute

          total_cost: Decimal
          

          Total cost in USD

          from_usage async classmethod

          from_usage(usage: RunUsage | None, model: str) -> TokenCost | None
          

          Create result from usage data.

          Parameters:

          Name Type Description Default
          usage RunUsage | None

          Token counts from model response

          required
          model str

          Name of the model used

          required

          Returns:

          Type Description
          TokenCost | None

          TokenCost if usage data available, None otherwise

          Source code in src/llmling_agent/messaging/messages.py
          109
          110
          111
          112
          113
          114
          115
          116
          117
          118
          119
          120
          121
          122
          123
          124
          125
          126
          127
          128
          129
          130
          131
          132
          133
          134
          135
          136
          137
          138
          139
          140
          141
          142
          143
          144
          145
          146
          147
          148
          @classmethod
          async def from_usage(cls, usage: RunUsage | None, model: str) -> TokenCost | None:
              """Create result from usage data.
          
              Args:
                  usage: Token counts from model response
                  model: Name of the model used
          
          
              Returns:
                  TokenCost if usage data available, None otherwise
              """
              if not (
                  usage and usage.input_tokens is not None and usage.output_tokens is not None
              ):
                  logger.debug("Missing token counts in Usage object")
                  return None
              logger.debug("Token usage", usage=usage)
          
              # return cls(token_usage=token_usage, total_cost=Decimal(total_cost))
              if model in {"None", "test"}:
                  price = Decimal(0)
              else:
                  parts = model.split(":", 1)
                  try:
                      price_data = calc_price(
                          usage,
                          model_ref=parts[1] if len(parts) > 1 else parts[0],
                          provider_id=parts[0] if len(parts) > 1 else "openai",
                      )
                      price = price_data.total_price
                  except Exception:  # noqa: BLE001
                      cost = await tokonomics.calculate_token_cost(
                          model,
                          usage.input_tokens,
                          usage.output_tokens,
                      )
                      price = Decimal(cost.total_cost if cost else 0)
          
              return cls(token_usage=usage, total_cost=price)