Skip to content

ChatMessage

Base classes

Name Children Inherits
Generic
typing
Abstract base class for generic types.

⋔ Inheritance diagram

graph TD
  94350421322800["messages.ChatMessage"]
  94350360566400["typing.Generic"]
  140709601677504["builtins.object"]
  94350360566400 --> 94350421322800
  140709601677504 --> 94350360566400

🛈 DocStrings

Common message format for all UI types.

Generically typed with: ChatMessage[Type of Content] The type can either be str or a BaseModel subclass.

Source code in src/llmling_agent/models/messages.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
@dataclass
class ChatMessage[TContent]:
    """Common message format for all UI types.

    Generically typed with: ChatMessage[Type of Content]
    The type can either be str or a BaseModel subclass.
    """

    content: TContent
    """Message content, typed as TContent (either str or BaseModel)."""

    role: MessageRole
    """Role of the message sender (user/assistant/system)."""

    model: str | None = None
    """Name of the model that generated this message."""

    metadata: JsonObject = field(default_factory=dict)
    """Additional metadata about the message."""

    timestamp: datetime = field(default_factory=datetime.now)
    """When this message was created."""

    cost_info: TokenCost | None = None
    """Token usage and costs for this specific message if available."""

    message_id: str = field(default_factory=lambda: str(uuid4()))
    """Unique identifier for this message."""

    response_time: float | None = None
    """Time it took the LLM to respond."""

    tool_calls: list[ToolCallInfo] = field(default_factory=list)
    """List of tool calls made during message generation."""

    name: str | None = None
    """Display name for the message sender in UI."""

    forwarded_from: list[str] = field(default_factory=list)
    """List of agent names (the chain) that forwarded this message to the sender."""

    def to_text_message(self) -> ChatMessage[str]:
        """Convert this message to a text-only version."""
        return dataclasses.replace(self, content=str(self.content))  # type: ignore

    def _get_content_str(self) -> str:
        """Get string representation of content."""
        match self.content:
            case str():
                return self.content
            case BaseModel():
                return self.content.model_dump_json(indent=2)
            case _:
                msg = f"Unexpected content type: {type(self.content)}"
                raise ValueError(msg)

    def to_gradio_format(self) -> tuple[str | None, str | None]:
        """Convert to Gradio chatbot format."""
        content_str = self._get_content_str()
        match self.role:
            case "user":
                return (content_str, None)
            case "assistant":
                return (None, content_str)
            case "system":
                return (None, f"System: {content_str}")

    @property
    def data(self) -> TContent:
        """Get content as typed data. Provides compat to RunResult."""
        return self.content

    def format(
        self,
        style: Literal["simple", "detailed", "markdown"] = "simple",
        *,
        show_metadata: bool = False,
        show_costs: bool = False,
    ) -> str:
        """Format message with configurable style."""
        match style:
            case "simple":
                return self._format_simple()
            case "detailed":
                return self._format_detailed(show_metadata, show_costs)
            case "markdown":
                return self._format_markdown(show_metadata, show_costs)
            case _:
                msg = f"Invalid style: {style}"
                raise ValueError(msg)

    def _format_simple(self) -> str:
        """Basic format: sender and message."""
        sender = self.name or self.role.title()
        return f"{sender}: {self.content}"

    def _format_detailed(self, show_metadata: bool, show_costs: bool) -> str:
        """Detailed format with optional metadata and costs."""
        ts = self.timestamp.strftime("%Y-%m-%d %H:%M:%S")
        name = self.name or self.role.title()
        parts = [f"From: {name}", f"Time: {ts}", "-" * 40, f"{self.content}", "-" * 40]

        if show_costs and self.cost_info:
            parts.extend([
                f"Tokens: {self.cost_info.token_usage['total']:,}",
                f"Cost: ${self.cost_info.total_cost:.4f}",
            ])
            if self.response_time:
                parts.append(f"Response time: {self.response_time:.2f}s")

        if show_metadata and self.metadata:
            parts.append("Metadata:")
            parts.extend(f"  {k}: {v}" for k, v in self.metadata.items())
        if self.forwarded_from:
            forwarded_from = " -> ".join(self.forwarded_from)
            parts.append(f"Forwarded via: {forwarded_from}")

        return "\n".join(parts)

    def _format_markdown(self, show_metadata: bool, show_costs: bool) -> str:
        """Markdown format for rich display."""
        name = self.name or self.role.title()
        timestamp = self.timestamp.strftime("%Y-%m-%d %H:%M:%S")
        parts = [f"## {name}", f"*{timestamp}*", "", str(self.content), ""]

        if show_costs and self.cost_info:
            parts.extend([
                "---",
                "**Stats:**",
                f"- Tokens: {self.cost_info.token_usage['total']:,}",
                f"- Cost: ${self.cost_info.total_cost:.4f}",
            ])
            if self.response_time:
                parts.append(f"- Response time: {self.response_time:.2f}s")

        if show_metadata and self.metadata:
            meta = yamling.dump_yaml(self.metadata)
            parts.extend(["", "**Metadata:**", "```", meta, "```"])

        if self.forwarded_from:
            parts.append(f"\n*Forwarded via: {' → '.join(self.forwarded_from)}*")

        return "\n".join(parts)

content instance-attribute

content: TContent

Message content, typed as TContent (either str or BaseModel).

cost_info class-attribute instance-attribute

cost_info: TokenCost | None = None

Token usage and costs for this specific message if available.

data property

data: TContent

Get content as typed data. Provides compat to RunResult.

forwarded_from class-attribute instance-attribute

forwarded_from: list[str] = field(default_factory=list)

List of agent names (the chain) that forwarded this message to the sender.

message_id class-attribute instance-attribute

message_id: str = field(default_factory=lambda: str(uuid4()))

Unique identifier for this message.

metadata class-attribute instance-attribute

metadata: JsonObject = field(default_factory=dict)

Additional metadata about the message.

model class-attribute instance-attribute

model: str | None = None

Name of the model that generated this message.

name class-attribute instance-attribute

name: str | None = None

Display name for the message sender in UI.

response_time class-attribute instance-attribute

response_time: float | None = None

Time it took the LLM to respond.

role instance-attribute

role: MessageRole

Role of the message sender (user/assistant/system).

timestamp class-attribute instance-attribute

timestamp: datetime = field(default_factory=now)

When this message was created.

tool_calls class-attribute instance-attribute

tool_calls: list[ToolCallInfo] = field(default_factory=list)

List of tool calls made during message generation.

format

format(
    style: Literal["simple", "detailed", "markdown"] = "simple",
    *,
    show_metadata: bool = False,
    show_costs: bool = False,
) -> str

Format message with configurable style.

Source code in src/llmling_agent/models/messages.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
def format(
    self,
    style: Literal["simple", "detailed", "markdown"] = "simple",
    *,
    show_metadata: bool = False,
    show_costs: bool = False,
) -> str:
    """Format message with configurable style."""
    match style:
        case "simple":
            return self._format_simple()
        case "detailed":
            return self._format_detailed(show_metadata, show_costs)
        case "markdown":
            return self._format_markdown(show_metadata, show_costs)
        case _:
            msg = f"Invalid style: {style}"
            raise ValueError(msg)

to_gradio_format

to_gradio_format() -> tuple[str | None, str | None]

Convert to Gradio chatbot format.

Source code in src/llmling_agent/models/messages.py
147
148
149
150
151
152
153
154
155
156
def to_gradio_format(self) -> tuple[str | None, str | None]:
    """Convert to Gradio chatbot format."""
    content_str = self._get_content_str()
    match self.role:
        case "user":
            return (content_str, None)
        case "assistant":
            return (None, content_str)
        case "system":
            return (None, f"System: {content_str}")

to_text_message

to_text_message() -> ChatMessage[str]

Convert this message to a text-only version.

Source code in src/llmling_agent/models/messages.py
132
133
134
def to_text_message(self) -> ChatMessage[str]:
    """Convert this message to a text-only version."""
    return dataclasses.replace(self, content=str(self.content))  # type: ignore

Show source on GitHub