Skip to content

message_container

Class info

Classes

Name Children Inherits
ChatMessage
llmling_agent.messaging.messages
Common message format for all UI types.
    ChatMessageContainer
    llmling_agent.messaging.message_container
    Container for tracking and managing chat messages.

      🛈 DocStrings

      Message container with statistics and formatting capabilities.

      ChatMessageContainer

      Bases: EventedList[ChatMessage[Any]]

      Container for tracking and managing chat messages.

      Extends EventedList to provide: - Message statistics (tokens, costs) - History formatting - Token-aware context window management - Role-based filtering

      Source code in src/llmling_agent/messaging/message_container.py
       28
       29
       30
       31
       32
       33
       34
       35
       36
       37
       38
       39
       40
       41
       42
       43
       44
       45
       46
       47
       48
       49
       50
       51
       52
       53
       54
       55
       56
       57
       58
       59
       60
       61
       62
       63
       64
       65
       66
       67
       68
       69
       70
       71
       72
       73
       74
       75
       76
       77
       78
       79
       80
       81
       82
       83
       84
       85
       86
       87
       88
       89
       90
       91
       92
       93
       94
       95
       96
       97
       98
       99
      100
      101
      102
      103
      104
      105
      106
      107
      108
      109
      110
      111
      112
      113
      114
      115
      116
      117
      118
      119
      120
      121
      122
      123
      124
      125
      126
      127
      128
      129
      130
      131
      132
      133
      134
      135
      136
      137
      138
      139
      140
      141
      142
      143
      144
      145
      146
      147
      148
      149
      150
      151
      152
      153
      154
      155
      156
      157
      158
      159
      160
      161
      162
      163
      164
      165
      166
      167
      168
      169
      170
      171
      172
      173
      174
      175
      176
      177
      178
      179
      180
      181
      182
      183
      184
      185
      186
      187
      188
      189
      190
      191
      192
      193
      194
      195
      196
      197
      198
      199
      200
      201
      202
      203
      204
      205
      206
      207
      208
      209
      210
      211
      212
      213
      214
      215
      216
      217
      218
      219
      220
      221
      222
      223
      224
      225
      226
      227
      228
      229
      230
      231
      232
      233
      234
      235
      236
      237
      238
      239
      240
      241
      242
      243
      244
      245
      246
      247
      248
      249
      250
      251
      252
      253
      254
      255
      256
      257
      258
      259
      260
      261
      262
      263
      264
      265
      266
      class ChatMessageContainer(EventedList[ChatMessage[Any]]):
          """Container for tracking and managing chat messages.
      
          Extends EventedList to provide:
          - Message statistics (tokens, costs)
          - History formatting
          - Token-aware context window management
          - Role-based filtering
          """
      
          def get_message_tokens(self, message: ChatMessage[Any]) -> int:
              """Get token count for a single message.
      
              Uses cost_info if available, falls back to tiktoken estimation.
      
              Args:
                  message: Message to count tokens for
      
              Returns:
                  Token count for the message
              """
              if message.cost_info:
                  return message.cost_info.token_usage["total"]
              return count_tokens(str(message.content), message.model)
      
          def get_history_tokens(self, fallback_model: str | None = None) -> int:
              """Get total token count for all messages.
      
              Uses cost_info when available, falls back to tiktoken estimation
              for messages without usage information.
      
              Returns:
                  Total token count across all messages
              """
              # Use cost_info if available
              total = sum(msg.cost_info.token_usage["total"] for msg in self if msg.cost_info)
      
              # For messages without cost_info, estimate using tiktoken
              if msgs := [msg for msg in self if not msg.cost_info]:
                  if fallback_model:
                      model_name = fallback_model
                  else:
                      model_name = next((m.model for m in self if m.model), "gpt-3.5-turbo")
                  contents = [str(msg.content) for msg in msgs]
                  total += sum(batch_count_tokens(contents, model_name))
      
              return total
      
          def get_total_cost(self) -> float:
              """Calculate total cost in USD across all messages.
      
              Only includes messages with cost information.
      
              Returns:
                  Total cost in USD
              """
              return sum(float(msg.cost_info.total_cost) for msg in self if msg.cost_info)
      
          @property
          def last_message(self) -> ChatMessage[Any] | None:
              """Get most recent message or None if empty."""
              return self[-1] if self else None
      
          def format(
              self,
              *,
              style: FormatStyle = "simple",
              **kwargs: Any,
          ) -> str:
              """Format conversation history with configurable style.
      
              Args:
                  style: Formatting style to use
                  **kwargs: Additional formatting options passed to message.format()
      
              Returns:
                  Formatted conversation history as string
              """
              return "\n".join(msg.format(style=style, **kwargs) for msg in self)
      
          def filter_by_role(
              self,
              role: MessageRole,
              *,
              max_messages: int | None = None,
          ) -> list[ChatMessage[Any]]:
              """Get messages with specific role.
      
              Args:
                  role: Role to filter by (user/assistant/system)
                  max_messages: Optional limit on number of messages to return
      
              Returns:
                  List of messages with matching role
              """
              messages = [msg for msg in self if msg.role == role]
              if max_messages:
                  messages = messages[-max_messages:]
              return messages
      
          def get_context_window(
              self,
              *,
              max_tokens: int | None = None,
              max_messages: int | None = None,
              include_system: bool = True,
          ) -> list[ChatMessage[Any]]:
              """Get messages respecting token and message limits.
      
              Args:
                  max_tokens: Optional token limit for window
                  max_messages: Optional message count limit
                  include_system: Whether to include system messages
      
              Returns:
                  List of messages fitting within constraints
              """
              # Filter system messages if needed
              history: Sequence[ChatMessage[Any]] = self
              if not include_system:
                  history = [msg for msg in self if msg.role != "system"]
      
              # Apply message limit if specified
              if max_messages:
                  history = history[-max_messages:]
      
              # Apply token limit if specified
              if max_tokens:
                  token_count = 0
                  filtered: list[Any] = []
      
                  # Work backwards from most recent
                  for msg in reversed(history):
                      msg_tokens = self.get_message_tokens(msg)
                      if token_count + msg_tokens > max_tokens:
                          break
                      token_count += msg_tokens
                      filtered.insert(0, msg)
                  history = filtered
      
              return list(history)
      
          def get_between(
              self,
              *,
              start_time: datetime | None = None,
              end_time: datetime | None = None,
          ) -> list[ChatMessage[Any]]:
              """Get messages within a time range.
      
              Args:
                  start_time: Optional start of range
                  end_time: Optional end of range
      
              Returns:
                  List of messages within the time range
              """
              messages = list(self)
              if start_time:
                  messages = [msg for msg in messages if msg.timestamp >= start_time]
              if end_time:
                  messages = [msg for msg in messages if msg.timestamp <= end_time]
              return messages
      
          def _build_flow_dag(self, message: ChatMessage[Any]) -> DAGNode | None:
              """Build DAG from message flow.
      
              Args:
                  message: Message to build flow DAG for
      
              Returns:
                  Root DAGNode of the graph
              """
              from bigtree import DAGNode
      
              # Get messages from this conversation
              conv_messages = [
                  msg for msg in self if msg.conversation_id == message.conversation_id
              ]
      
              # First create all nodes
              nodes: dict[str, DAGNode] = {}
      
              for msg in conv_messages:
                  if msg.forwarded_from:
                      chain = [*msg.forwarded_from, msg.name or "unknown"]
                      for name in chain:
                          if name not in nodes:
                              nodes[name] = DAGNode(name)
      
              # Then set up parent relationships
              for msg in conv_messages:
                  if msg.forwarded_from:
                      chain = [*msg.forwarded_from, msg.name or "unknown"]
                      # Connect consecutive nodes
                      for parent_name, child_name in itertools.pairwise(chain):
                          parent = nodes[parent_name]
                          child = nodes[child_name]
                          if parent not in child.parents:
                              child.parents = [*child.parents, parent]
      
              # Find root nodes (those without parents)
              roots = [node for node in nodes.values() if not node.parents]
              if not roots:
                  return None
              return roots[0]  # Return first root for now
      
          def to_mermaid_graph(
              self,
              message: ChatMessage[Any],
              *,
              title: str = "",
              theme: str | None = None,
              rankdir: Literal["TB", "BT", "LR", "RL"] = "LR",
          ) -> str:
              """Convert message flow to mermaid graph."""
              from bigtree import dag_to_list
      
              dag = self._build_flow_dag(message)
              if not dag:
                  return ""
      
              # Get list of connections
              connections = dag_to_list(dag)
      
              # Convert to mermaid
              lines = ["```mermaid"]
              if title:
                  lines.extend(["---", f"title: {title}", "---"])
              if theme:
                  lines.append(f'%%{{ init: {{ "theme": "{theme}" }} }}%%')
              lines.append(f"flowchart {rankdir}")
      
              # Add connections
              for parent, child in connections:
                  lines.append(f"    {parent}-->{child}")
      
              lines.append("```")
              return "\n".join(lines)
      

      last_message property

      last_message: ChatMessage[Any] | None
      

      Get most recent message or None if empty.

      _build_flow_dag

      _build_flow_dag(message: ChatMessage[Any]) -> DAGNode | None
      

      Build DAG from message flow.

      Parameters:

      Name Type Description Default
      message ChatMessage[Any]

      Message to build flow DAG for

      required

      Returns:

      Type Description
      DAGNode | None

      Root DAGNode of the graph

      Source code in src/llmling_agent/messaging/message_container.py
      192
      193
      194
      195
      196
      197
      198
      199
      200
      201
      202
      203
      204
      205
      206
      207
      208
      209
      210
      211
      212
      213
      214
      215
      216
      217
      218
      219
      220
      221
      222
      223
      224
      225
      226
      227
      228
      229
      230
      231
      232
      233
      def _build_flow_dag(self, message: ChatMessage[Any]) -> DAGNode | None:
          """Build DAG from message flow.
      
          Args:
              message: Message to build flow DAG for
      
          Returns:
              Root DAGNode of the graph
          """
          from bigtree import DAGNode
      
          # Get messages from this conversation
          conv_messages = [
              msg for msg in self if msg.conversation_id == message.conversation_id
          ]
      
          # First create all nodes
          nodes: dict[str, DAGNode] = {}
      
          for msg in conv_messages:
              if msg.forwarded_from:
                  chain = [*msg.forwarded_from, msg.name or "unknown"]
                  for name in chain:
                      if name not in nodes:
                          nodes[name] = DAGNode(name)
      
          # Then set up parent relationships
          for msg in conv_messages:
              if msg.forwarded_from:
                  chain = [*msg.forwarded_from, msg.name or "unknown"]
                  # Connect consecutive nodes
                  for parent_name, child_name in itertools.pairwise(chain):
                      parent = nodes[parent_name]
                      child = nodes[child_name]
                      if parent not in child.parents:
                          child.parents = [*child.parents, parent]
      
          # Find root nodes (those without parents)
          roots = [node for node in nodes.values() if not node.parents]
          if not roots:
              return None
          return roots[0]  # Return first root for now
      

      filter_by_role

      filter_by_role(
          role: MessageRole, *, max_messages: int | None = None
      ) -> list[ChatMessage[Any]]
      

      Get messages with specific role.

      Parameters:

      Name Type Description Default
      role MessageRole

      Role to filter by (user/assistant/system)

      required
      max_messages int | None

      Optional limit on number of messages to return

      None

      Returns:

      Type Description
      list[ChatMessage[Any]]

      List of messages with matching role

      Source code in src/llmling_agent/messaging/message_container.py
      108
      109
      110
      111
      112
      113
      114
      115
      116
      117
      118
      119
      120
      121
      122
      123
      124
      125
      126
      def filter_by_role(
          self,
          role: MessageRole,
          *,
          max_messages: int | None = None,
      ) -> list[ChatMessage[Any]]:
          """Get messages with specific role.
      
          Args:
              role: Role to filter by (user/assistant/system)
              max_messages: Optional limit on number of messages to return
      
          Returns:
              List of messages with matching role
          """
          messages = [msg for msg in self if msg.role == role]
          if max_messages:
              messages = messages[-max_messages:]
          return messages
      

      format

      format(*, style: FormatStyle = 'simple', **kwargs: Any) -> str
      

      Format conversation history with configurable style.

      Parameters:

      Name Type Description Default
      style FormatStyle

      Formatting style to use

      'simple'
      **kwargs Any

      Additional formatting options passed to message.format()

      {}

      Returns:

      Type Description
      str

      Formatted conversation history as string

      Source code in src/llmling_agent/messaging/message_container.py
       91
       92
       93
       94
       95
       96
       97
       98
       99
      100
      101
      102
      103
      104
      105
      106
      def format(
          self,
          *,
          style: FormatStyle = "simple",
          **kwargs: Any,
      ) -> str:
          """Format conversation history with configurable style.
      
          Args:
              style: Formatting style to use
              **kwargs: Additional formatting options passed to message.format()
      
          Returns:
              Formatted conversation history as string
          """
          return "\n".join(msg.format(style=style, **kwargs) for msg in self)
      

      get_between

      get_between(
          *, start_time: datetime | None = None, end_time: datetime | None = None
      ) -> list[ChatMessage[Any]]
      

      Get messages within a time range.

      Parameters:

      Name Type Description Default
      start_time datetime | None

      Optional start of range

      None
      end_time datetime | None

      Optional end of range

      None

      Returns:

      Type Description
      list[ChatMessage[Any]]

      List of messages within the time range

      Source code in src/llmling_agent/messaging/message_container.py
      170
      171
      172
      173
      174
      175
      176
      177
      178
      179
      180
      181
      182
      183
      184
      185
      186
      187
      188
      189
      190
      def get_between(
          self,
          *,
          start_time: datetime | None = None,
          end_time: datetime | None = None,
      ) -> list[ChatMessage[Any]]:
          """Get messages within a time range.
      
          Args:
              start_time: Optional start of range
              end_time: Optional end of range
      
          Returns:
              List of messages within the time range
          """
          messages = list(self)
          if start_time:
              messages = [msg for msg in messages if msg.timestamp >= start_time]
          if end_time:
              messages = [msg for msg in messages if msg.timestamp <= end_time]
          return messages
      

      get_context_window

      get_context_window(
          *,
          max_tokens: int | None = None,
          max_messages: int | None = None,
          include_system: bool = True,
      ) -> list[ChatMessage[Any]]
      

      Get messages respecting token and message limits.

      Parameters:

      Name Type Description Default
      max_tokens int | None

      Optional token limit for window

      None
      max_messages int | None

      Optional message count limit

      None
      include_system bool

      Whether to include system messages

      True

      Returns:

      Type Description
      list[ChatMessage[Any]]

      List of messages fitting within constraints

      Source code in src/llmling_agent/messaging/message_container.py
      128
      129
      130
      131
      132
      133
      134
      135
      136
      137
      138
      139
      140
      141
      142
      143
      144
      145
      146
      147
      148
      149
      150
      151
      152
      153
      154
      155
      156
      157
      158
      159
      160
      161
      162
      163
      164
      165
      166
      167
      168
      def get_context_window(
          self,
          *,
          max_tokens: int | None = None,
          max_messages: int | None = None,
          include_system: bool = True,
      ) -> list[ChatMessage[Any]]:
          """Get messages respecting token and message limits.
      
          Args:
              max_tokens: Optional token limit for window
              max_messages: Optional message count limit
              include_system: Whether to include system messages
      
          Returns:
              List of messages fitting within constraints
          """
          # Filter system messages if needed
          history: Sequence[ChatMessage[Any]] = self
          if not include_system:
              history = [msg for msg in self if msg.role != "system"]
      
          # Apply message limit if specified
          if max_messages:
              history = history[-max_messages:]
      
          # Apply token limit if specified
          if max_tokens:
              token_count = 0
              filtered: list[Any] = []
      
              # Work backwards from most recent
              for msg in reversed(history):
                  msg_tokens = self.get_message_tokens(msg)
                  if token_count + msg_tokens > max_tokens:
                      break
                  token_count += msg_tokens
                  filtered.insert(0, msg)
              history = filtered
      
          return list(history)
      

      get_history_tokens

      get_history_tokens(fallback_model: str | None = None) -> int
      

      Get total token count for all messages.

      Uses cost_info when available, falls back to tiktoken estimation for messages without usage information.

      Returns:

      Type Description
      int

      Total token count across all messages

      Source code in src/llmling_agent/messaging/message_container.py
      53
      54
      55
      56
      57
      58
      59
      60
      61
      62
      63
      64
      65
      66
      67
      68
      69
      70
      71
      72
      73
      74
      def get_history_tokens(self, fallback_model: str | None = None) -> int:
          """Get total token count for all messages.
      
          Uses cost_info when available, falls back to tiktoken estimation
          for messages without usage information.
      
          Returns:
              Total token count across all messages
          """
          # Use cost_info if available
          total = sum(msg.cost_info.token_usage["total"] for msg in self if msg.cost_info)
      
          # For messages without cost_info, estimate using tiktoken
          if msgs := [msg for msg in self if not msg.cost_info]:
              if fallback_model:
                  model_name = fallback_model
              else:
                  model_name = next((m.model for m in self if m.model), "gpt-3.5-turbo")
              contents = [str(msg.content) for msg in msgs]
              total += sum(batch_count_tokens(contents, model_name))
      
          return total
      

      get_message_tokens

      get_message_tokens(message: ChatMessage[Any]) -> int
      

      Get token count for a single message.

      Uses cost_info if available, falls back to tiktoken estimation.

      Parameters:

      Name Type Description Default
      message ChatMessage[Any]

      Message to count tokens for

      required

      Returns:

      Type Description
      int

      Token count for the message

      Source code in src/llmling_agent/messaging/message_container.py
      38
      39
      40
      41
      42
      43
      44
      45
      46
      47
      48
      49
      50
      51
      def get_message_tokens(self, message: ChatMessage[Any]) -> int:
          """Get token count for a single message.
      
          Uses cost_info if available, falls back to tiktoken estimation.
      
          Args:
              message: Message to count tokens for
      
          Returns:
              Token count for the message
          """
          if message.cost_info:
              return message.cost_info.token_usage["total"]
          return count_tokens(str(message.content), message.model)
      

      get_total_cost

      get_total_cost() -> float
      

      Calculate total cost in USD across all messages.

      Only includes messages with cost information.

      Returns:

      Type Description
      float

      Total cost in USD

      Source code in src/llmling_agent/messaging/message_container.py
      76
      77
      78
      79
      80
      81
      82
      83
      84
      def get_total_cost(self) -> float:
          """Calculate total cost in USD across all messages.
      
          Only includes messages with cost information.
      
          Returns:
              Total cost in USD
          """
          return sum(float(msg.cost_info.total_cost) for msg in self if msg.cost_info)
      

      to_mermaid_graph

      to_mermaid_graph(
          message: ChatMessage[Any],
          *,
          title: str = "",
          theme: str | None = None,
          rankdir: Literal["TB", "BT", "LR", "RL"] = "LR",
      ) -> str
      

      Convert message flow to mermaid graph.

      Source code in src/llmling_agent/messaging/message_container.py
      235
      236
      237
      238
      239
      240
      241
      242
      243
      244
      245
      246
      247
      248
      249
      250
      251
      252
      253
      254
      255
      256
      257
      258
      259
      260
      261
      262
      263
      264
      265
      266
      def to_mermaid_graph(
          self,
          message: ChatMessage[Any],
          *,
          title: str = "",
          theme: str | None = None,
          rankdir: Literal["TB", "BT", "LR", "RL"] = "LR",
      ) -> str:
          """Convert message flow to mermaid graph."""
          from bigtree import dag_to_list
      
          dag = self._build_flow_dag(message)
          if not dag:
              return ""
      
          # Get list of connections
          connections = dag_to_list(dag)
      
          # Convert to mermaid
          lines = ["```mermaid"]
          if title:
              lines.extend(["---", f"title: {title}", "---"])
          if theme:
              lines.append(f'%%{{ init: {{ "theme": "{theme}" }} }}%%')
          lines.append(f"flowchart {rankdir}")
      
          # Add connections
          for parent, child in connections:
              lines.append(f"    {parent}-->{child}")
      
          lines.append("```")
          return "\n".join(lines)