Skip to content

conversation

Class info

Classes

Name Children Inherits
ChatMessage
llmling_agent.messaging.messages
Common message format for all UI types.
    ChatMessageContainer
    llmling_agent.messaging.message_container
    Container for tracking and managing chat messages.
      ConversationManager
      llmling_agent.agent.conversation
      Manages conversation state and system prompts.
        MemoryConfig
        llmling_agent.models.session
        Configuration for agent memory and history handling.
          SessionQuery
          llmling_agent.models.session
          Query configuration for session recovery.

            🛈 DocStrings

            Conversation management for LLMling agent.

            ConversationManager

            Manages conversation state and system prompts.

            Source code in src/llmling_agent/agent/conversation.py
             51
             52
             53
             54
             55
             56
             57
             58
             59
             60
             61
             62
             63
             64
             65
             66
             67
             68
             69
             70
             71
             72
             73
             74
             75
             76
             77
             78
             79
             80
             81
             82
             83
             84
             85
             86
             87
             88
             89
             90
             91
             92
             93
             94
             95
             96
             97
             98
             99
            100
            101
            102
            103
            104
            105
            106
            107
            108
            109
            110
            111
            112
            113
            114
            115
            116
            117
            118
            119
            120
            121
            122
            123
            124
            125
            126
            127
            128
            129
            130
            131
            132
            133
            134
            135
            136
            137
            138
            139
            140
            141
            142
            143
            144
            145
            146
            147
            148
            149
            150
            151
            152
            153
            154
            155
            156
            157
            158
            159
            160
            161
            162
            163
            164
            165
            166
            167
            168
            169
            170
            171
            172
            173
            174
            175
            176
            177
            178
            179
            180
            181
            182
            183
            184
            185
            186
            187
            188
            189
            190
            191
            192
            193
            194
            195
            196
            197
            198
            199
            200
            201
            202
            203
            204
            205
            206
            207
            208
            209
            210
            211
            212
            213
            214
            215
            216
            217
            218
            219
            220
            221
            222
            223
            224
            225
            226
            227
            228
            229
            230
            231
            232
            233
            234
            235
            236
            237
            238
            239
            240
            241
            242
            243
            244
            245
            246
            247
            248
            249
            250
            251
            252
            253
            254
            255
            256
            257
            258
            259
            260
            261
            262
            263
            264
            265
            266
            267
            268
            269
            270
            271
            272
            273
            274
            275
            276
            277
            278
            279
            280
            281
            282
            283
            284
            285
            286
            287
            288
            289
            290
            291
            292
            293
            294
            295
            296
            297
            298
            299
            300
            301
            302
            303
            304
            305
            306
            307
            308
            309
            310
            311
            312
            313
            314
            315
            316
            317
            318
            319
            320
            321
            322
            323
            324
            325
            326
            327
            328
            329
            330
            331
            332
            333
            334
            335
            336
            337
            338
            339
            340
            341
            342
            343
            344
            345
            346
            347
            348
            349
            350
            351
            352
            353
            354
            355
            356
            357
            358
            359
            360
            361
            362
            363
            364
            365
            366
            367
            368
            369
            370
            371
            372
            373
            374
            375
            376
            377
            378
            379
            380
            381
            382
            383
            384
            385
            386
            387
            388
            389
            390
            391
            392
            393
            394
            395
            396
            397
            398
            399
            400
            401
            402
            403
            404
            405
            406
            407
            408
            409
            410
            411
            412
            413
            414
            415
            416
            417
            418
            419
            420
            421
            422
            423
            424
            425
            426
            427
            428
            429
            430
            431
            432
            433
            434
            435
            436
            437
            438
            439
            440
            441
            442
            443
            444
            445
            446
            447
            448
            449
            450
            451
            452
            453
            454
            455
            456
            457
            458
            459
            460
            461
            462
            463
            464
            465
            466
            467
            468
            469
            470
            471
            472
            473
            474
            475
            476
            477
            478
            479
            480
            481
            482
            483
            484
            485
            486
            487
            488
            489
            490
            491
            492
            493
            494
            495
            496
            497
            498
            499
            500
            501
            502
            503
            504
            505
            506
            507
            508
            509
            510
            511
            512
            513
            514
            515
            516
            517
            518
            519
            520
            521
            522
            523
            524
            525
            class ConversationManager:
                """Manages conversation state and system prompts."""
            
                @dataclass(frozen=True)
                class HistoryCleared:
                    """Emitted when chat history is cleared."""
            
                    session_id: str
                    timestamp: datetime = field(default_factory=datetime.now)
            
                history_cleared = Signal(HistoryCleared)
            
                def __init__(
                    self,
                    agent: Agent[Any],
                    session_config: MemoryConfig | None = None,
                    *,
                    resources: Sequence[Resource | PromptType | str] = (),
                ):
                    """Initialize conversation manager.
            
                    Args:
                        agent: instance to manage
                        session_config: Optional MemoryConfig
                        resources: Optional paths to load as context
                    """
                    self._agent = agent
                    self.chat_messages = ChatMessageContainer()
                    self._last_messages: list[ChatMessage] = []
                    self._pending_messages: deque[ChatMessage] = deque()
                    self._config = session_config
                    self._resources = list(resources)  # Store for async loading
                    # Generate new ID if none provided
                    self.id = str(uuid4())
            
                    if session_config is not None and session_config.session is not None:
                        storage = self._agent.context.storage
                        self._current_history = storage.filter_messages_sync(session_config.session)
                        if session_config.session.name:
                            self.id = session_config.session.name
            
                    # Note: max_messages and max_tokens will be handled in add_message/get_history
                    # to maintain the rolling window during conversation
            
                def get_initialization_tasks(self) -> list[Coroutine[Any, Any, Any]]:
                    """Get all initialization coroutines."""
                    self._resources = []  # Clear so we dont load again on async init
                    return [self.load_context_source(source) for source in self._resources]
            
                async def __aenter__(self) -> Self:
                    """Initialize when used standalone."""
                    if tasks := self.get_initialization_tasks():
                        await asyncio.gather(*tasks)
                    return self
            
                async def __aexit__(
                    self,
                    exc_type: type[BaseException] | None,
                    exc_val: BaseException | None,
                    exc_tb: TracebackType | None,
                ):
                    """Clean up any pending messages."""
                    self._pending_messages.clear()
            
                def __bool__(self) -> bool:
                    return bool(self._pending_messages) or bool(self.chat_messages)
            
                def __repr__(self) -> str:
                    return f"ConversationManager(id={self.id!r})"
            
                def __prompt__(self) -> str:
                    if not self.chat_messages:
                        return "No conversation history"
            
                    last_msgs = self.chat_messages[-2:]
                    parts = ["Recent conversation:"]
                    parts.extend(msg.format() for msg in last_msgs)
                    return "\n".join(parts)
            
                @overload
                def __getitem__(self, key: int) -> ChatMessage[Any]: ...
            
                @overload
                def __getitem__(self, key: slice | str) -> list[ChatMessage[Any]]: ...
            
                def __getitem__(
                    self, key: int | slice | str
                ) -> ChatMessage[Any] | list[ChatMessage[Any]]:
                    """Access conversation history.
            
                    Args:
                        key: Either:
                            - Integer index for single message
                            - Slice for message range
                            - Agent name for conversation history with that agent
                    """
                    match key:
                        case int():
                            return self.chat_messages[key]
                        case slice():
                            return list(self.chat_messages[key])
                        case str():
                            query = SessionQuery(name=key)
                            return self._agent.context.storage.filter_messages_sync(query=query)
            
                def __contains__(self, item: Any) -> bool:
                    """Check if item is in history."""
                    return item in self.chat_messages
            
                def __len__(self) -> int:
                    """Get length of history."""
                    return len(self.chat_messages)
            
                def get_message_tokens(self, message: ChatMessage) -> int:
                    """Get token count for a single message."""
                    content = "\n".join(message.format())
                    return count_tokens(content, self._agent.model_name)
            
                async def format_history(
                    self,
                    *,
                    max_tokens: int | None = None,
                    include_system: bool = False,
                    format_template: str | None = None,
                    num_messages: int | None = None,  # Add this parameter
                ) -> str:
                    """Format conversation history as a single context message.
            
                    Args:
                        max_tokens: Optional limit to include only last N tokens
                        include_system: Whether to include system messages
                        format_template: Optional custom format (defaults to agent/message pairs)
                        num_messages: Optional limit to include only last N messages
                    """
                    template = format_template or "Agent {agent}: {content}\n"
                    messages: list[str] = []
                    token_count = 0
            
                    # Get messages, optionally limited
                    history: Sequence[ChatMessage[Any]] = self.chat_messages
                    if num_messages:
                        history = history[-num_messages:]
            
                    if max_tokens:
                        history = list(reversed(history))  # Start from newest when token limited
            
                    for msg in history:
                        # Check role directly from ChatMessage
                        if not include_system and msg.role == "system":
                            continue
                        name = msg.name or msg.role.title()
                        formatted = template.format(agent=name, content=str(msg.content))
            
                        if max_tokens:
                            # Count tokens in this message
                            if msg.cost_info:
                                msg_tokens = msg.cost_info.token_usage["total"]
                            else:
                                # Fallback to tiktoken if no cost info
                                msg_tokens = self.get_message_tokens(msg)
            
                            if token_count + msg_tokens > max_tokens:
                                break
                            token_count += msg_tokens
                            # Add to front since we're going backwards
                            messages.insert(0, formatted)
                        else:
                            messages.append(formatted)
            
                    return "\n".join(messages)
            
                async def load_context_source(self, source: Resource | PromptType | str):
                    """Load context from a single source."""
                    try:
                        match source:
                            case str():
                                await self.add_context_from_path(source)
                            case BaseResource():
                                await self.add_context_from_resource(source)
                            case BasePrompt():
                                await self.add_context_from_prompt(source)
                    except Exception:
                        msg = "Failed to load context from %s"
                        logger.exception(msg, "file" if isinstance(source, str) else source.type)
            
                def load_history_from_database(
                    self,
                    session: SessionIdType | SessionQuery = None,
                    *,
                    since: datetime | None = None,
                    until: datetime | None = None,
                    roles: set[MessageRole] | None = None,
                    limit: int | None = None,
                ):
                    """Load conversation history from database.
            
                    Args:
                        session: Session ID or query config
                        since: Only include messages after this time (override)
                        until: Only include messages before this time (override)
                        roles: Only include messages with these roles (override)
                        limit: Maximum number of messages to return (override)
                    """
                    storage = self._agent.context.storage
                    match session:
                        case SessionQuery() as query:
                            # Override query params if provided
                            if since is not None or until is not None or roles or limit:
                                update = {
                                    "since": since.isoformat() if since else None,
                                    "until": until.isoformat() if until else None,
                                    "roles": roles,
                                    "limit": limit,
                                }
                                query = query.model_copy(update=update)
                            if query.name:
                                self.id = query.name
                        case str() | UUID():
                            self.id = str(session)
                            query = SessionQuery(
                                name=self.id,
                                since=since.isoformat() if since else None,
                                until=until.isoformat() if until else None,
                                roles=roles,
                                limit=limit,
                            )
                        case None:
                            # Use current session ID
                            query = SessionQuery(
                                name=self.id,
                                since=since.isoformat() if since else None,
                                until=until.isoformat() if until else None,
                                roles=roles,
                                limit=limit,
                            )
                        case _:
                            msg = f"Invalid type for session: {type(session)}"
                            raise ValueError(msg)
                    self.chat_messages.clear()
                    self.chat_messages.extend(storage.filter_messages_sync(query))
            
                def get_history(
                    self,
                    include_pending: bool = True,
                    do_filter: bool = True,
                ) -> list[ChatMessage]:
                    """Get conversation history.
            
                    Args:
                        include_pending: Whether to include pending messages
                        do_filter: Whether to apply memory config limits (max_tokens, max_messages)
            
                    Returns:
                        Filtered list of messages in chronological order
                    """
                    if include_pending and self._pending_messages:
                        self.chat_messages.extend(self._pending_messages)
                        self._pending_messages.clear()
            
                    # 2. Start with original history
                    history: Sequence[ChatMessage[Any]] = self.chat_messages
            
                    # 3. Only filter if needed
                    if do_filter and self._config:
                        # First filter by message count (simple slice)
                        if self._config.max_messages:
                            history = history[-self._config.max_messages :]
            
                        # Then filter by tokens if needed
                        if self._config.max_tokens:
                            token_count = 0
                            filtered = []
                            # Collect messages from newest to oldest until we hit the limit
                            for msg in reversed(history):
                                msg_tokens = self.get_message_tokens(msg)
                                if token_count + msg_tokens > self._config.max_tokens:
                                    break
                                token_count += msg_tokens
                                filtered.append(msg)
                            history = list(reversed(filtered))
            
                    return list(history)
            
                def get_pending_messages(self) -> list[ChatMessage]:
                    """Get messages that will be included in next interaction."""
                    return list(self._pending_messages)
            
                def clear_pending(self):
                    """Clear pending messages without adding them to history."""
                    self._pending_messages.clear()
            
                def set_history(self, history: list[ChatMessage]):
                    """Update conversation history after run."""
                    self.chat_messages.clear()
                    self.chat_messages.extend(history)
            
                def clear(self):
                    """Clear conversation history and prompts."""
                    self.chat_messages = ChatMessageContainer()
                    self._last_messages = []
                    event = self.HistoryCleared(session_id=str(self.id))
                    self.history_cleared.emit(event)
            
                @asynccontextmanager
                async def temporary_state(
                    self,
                    history: list[AnyPromptType] | SessionQuery | None = None,
                    *,
                    replace_history: bool = False,
                ) -> AsyncIterator[Self]:
                    """Temporarily set conversation history.
            
                    Args:
                        history: Optional list of prompts to use as temporary history.
                                Can be strings, BasePrompts, or other prompt types.
                        replace_history: If True, only use provided history. If False, append
                                to existing history.
                    """
                    from toprompt import to_prompt
            
                    old_history = self.chat_messages.copy()
            
                    try:
                        messages: Sequence[ChatMessage[Any]] = ChatMessageContainer()
                        if history is not None:
                            if isinstance(history, SessionQuery):
                                messages = await self._agent.context.storage.filter_messages(history)
                            else:
                                messages = [
                                    ChatMessage(content=await to_prompt(p), role="user")
                                    for p in history
                                ]
            
                        if replace_history:
                            self.chat_messages = ChatMessageContainer(messages)
                        else:
                            self.chat_messages.extend(messages)
            
                        yield self
            
                    finally:
                        self.chat_messages = old_history
            
                def add_chat_messages(self, messages: Sequence[ChatMessage]):
                    """Add new messages to history and update last_messages."""
                    self._last_messages = list(messages)
                    self.chat_messages.extend(messages)
            
                @property
                def last_run_messages(self) -> list[ChatMessage]:
                    """Get messages from the last run converted to our format."""
                    return self._last_messages
            
                def add_context_message(
                    self,
                    content: str,
                    source: str | None = None,
                    **metadata: Any,
                ):
                    """Add a context message.
            
                    Args:
                        content: Text content to add
                        source: Description of content source
                        **metadata: Additional metadata to include with the message
                    """
                    meta_str = ""
                    if metadata:
                        meta_str = "\n".join(f"{k}: {v}" for k, v in metadata.items())
                        meta_str = f"\nMetadata:\n{meta_str}\n"
            
                    header = f"Content from {source}:" if source else "Additional context:"
                    formatted = f"{header}{meta_str}\n{content}\n"
            
                    chat_message = ChatMessage[str](
                        content=formatted,
                        role="user",
                        name="user",
                        model=self._agent.model_name,
                        metadata=metadata,
                    )
                    self._pending_messages.append(chat_message)
                    # Emit as user message - will trigger logging through existing flow
                    self._agent.message_received.emit(chat_message)
            
                async def add_context_from_path(
                    self,
                    path: StrPath,
                    *,
                    convert_to_md: bool = False,
                    **metadata: Any,
                ):
                    """Add file or URL content as context message.
            
                    Args:
                        path: Any UPath-supported path
                        convert_to_md: Whether to convert content to markdown
                        **metadata: Additional metadata to include with the message
            
                    Raises:
                        ValueError: If content cannot be loaded or converted
                    """
                    from upath import UPath
            
                    path_obj = UPath(path)
                    if convert_to_md:
                        content = await self._agent.context.converter.convert_file(path)
                        source = f"markdown:{path_obj.name}"
                    else:
                        content = await read_path(path)
                        source = f"{path_obj.protocol}:{path_obj.name}"
                    self.add_context_message(content, source=source, **metadata)
            
                async def add_context_from_resource(self, resource: Resource | str):
                    """Add content from a LLMling resource."""
                    if not self._agent.runtime:
                        msg = "No runtime available"
                        raise RuntimeError(msg)
            
                    if isinstance(resource, str):
                        content = await self._agent.runtime.load_resource(resource)
                        self.add_context_message(
                            str(content.content),
                            source=f"Resource {resource}",
                            mime_type=content.metadata.mime_type,
                            **content.metadata.extra,
                        )
                    else:
                        loader = self._agent.runtime._loader_registry.get_loader(resource)
                        async for content in loader.load(resource):
                            self.add_context_message(
                                str(content.content),
                                source=f"{resource.type}:{resource.uri}",
                                mime_type=content.metadata.mime_type,
                                **content.metadata.extra,
                            )
            
                async def add_context_from_prompt(
                    self,
                    prompt: PromptType,
                    metadata: dict[str, Any] | None = None,
                    **kwargs: Any,
                ):
                    """Add rendered prompt content as context message.
            
                    Args:
                        prompt: LLMling prompt (static, dynamic, or file-based)
                        metadata: Additional metadata to include with the message
                        kwargs: Optional kwargs for prompt formatting
                    """
                    try:
                        # Format the prompt using LLMling's prompt system
                        messages = await prompt.format(kwargs)
                        # Extract text content from all messages
                        content = "\n\n".join(msg.get_text_content() for msg in messages)
            
                        self.add_context_message(
                            content,
                            source=f"prompt:{prompt.name or prompt.type}",
                            prompt_args=kwargs,
                            **(metadata or {}),
                        )
                    except Exception as e:
                        msg = f"Failed to format prompt: {e}"
                        raise ValueError(msg) from e
            
                def get_history_tokens(self) -> int:
                    """Get token count for current history."""
                    # Use cost_info if available
                    return self.chat_messages.get_history_tokens(self._agent.model_name)
            
                def get_pending_tokens(self) -> int:
                    """Get token count for pending messages."""
                    text = "\n".join(msg.format() for msg in self._pending_messages)
                    return count_tokens(text, self._agent.model_name)
            

            last_run_messages property

            last_run_messages: list[ChatMessage]
            

            Get messages from the last run converted to our format.

            HistoryCleared dataclass

            Emitted when chat history is cleared.

            Source code in src/llmling_agent/agent/conversation.py
            54
            55
            56
            57
            58
            59
            @dataclass(frozen=True)
            class HistoryCleared:
                """Emitted when chat history is cleared."""
            
                session_id: str
                timestamp: datetime = field(default_factory=datetime.now)
            

            __aenter__ async

            __aenter__() -> Self
            

            Initialize when used standalone.

            Source code in src/llmling_agent/agent/conversation.py
            100
            101
            102
            103
            104
            async def __aenter__(self) -> Self:
                """Initialize when used standalone."""
                if tasks := self.get_initialization_tasks():
                    await asyncio.gather(*tasks)
                return self
            

            __aexit__ async

            __aexit__(
                exc_type: type[BaseException] | None,
                exc_val: BaseException | None,
                exc_tb: TracebackType | None,
            )
            

            Clean up any pending messages.

            Source code in src/llmling_agent/agent/conversation.py
            106
            107
            108
            109
            110
            111
            112
            113
            async def __aexit__(
                self,
                exc_type: type[BaseException] | None,
                exc_val: BaseException | None,
                exc_tb: TracebackType | None,
            ):
                """Clean up any pending messages."""
                self._pending_messages.clear()
            

            __contains__

            __contains__(item: Any) -> bool
            

            Check if item is in history.

            Source code in src/llmling_agent/agent/conversation.py
            156
            157
            158
            def __contains__(self, item: Any) -> bool:
                """Check if item is in history."""
                return item in self.chat_messages
            

            __getitem__

            __getitem__(key: int) -> ChatMessage[Any]
            
            __getitem__(key: slice | str) -> list[ChatMessage[Any]]
            
            __getitem__(key: int | slice | str) -> ChatMessage[Any] | list[ChatMessage[Any]]
            

            Access conversation history.

            Parameters:

            Name Type Description Default
            key int | slice | str

            Either: - Integer index for single message - Slice for message range - Agent name for conversation history with that agent

            required
            Source code in src/llmling_agent/agent/conversation.py
            136
            137
            138
            139
            140
            141
            142
            143
            144
            145
            146
            147
            148
            149
            150
            151
            152
            153
            154
            def __getitem__(
                self, key: int | slice | str
            ) -> ChatMessage[Any] | list[ChatMessage[Any]]:
                """Access conversation history.
            
                Args:
                    key: Either:
                        - Integer index for single message
                        - Slice for message range
                        - Agent name for conversation history with that agent
                """
                match key:
                    case int():
                        return self.chat_messages[key]
                    case slice():
                        return list(self.chat_messages[key])
                    case str():
                        query = SessionQuery(name=key)
                        return self._agent.context.storage.filter_messages_sync(query=query)
            

            __init__

            __init__(
                agent: Agent[Any],
                session_config: MemoryConfig | None = None,
                *,
                resources: Sequence[Resource | PromptType | str] = (),
            )
            

            Initialize conversation manager.

            Parameters:

            Name Type Description Default
            agent Agent[Any]

            instance to manage

            required
            session_config MemoryConfig | None

            Optional MemoryConfig

            None
            resources Sequence[Resource | PromptType | str]

            Optional paths to load as context

            ()
            Source code in src/llmling_agent/agent/conversation.py
            63
            64
            65
            66
            67
            68
            69
            70
            71
            72
            73
            74
            75
            76
            77
            78
            79
            80
            81
            82
            83
            84
            85
            86
            87
            88
            89
            90
            def __init__(
                self,
                agent: Agent[Any],
                session_config: MemoryConfig | None = None,
                *,
                resources: Sequence[Resource | PromptType | str] = (),
            ):
                """Initialize conversation manager.
            
                Args:
                    agent: instance to manage
                    session_config: Optional MemoryConfig
                    resources: Optional paths to load as context
                """
                self._agent = agent
                self.chat_messages = ChatMessageContainer()
                self._last_messages: list[ChatMessage] = []
                self._pending_messages: deque[ChatMessage] = deque()
                self._config = session_config
                self._resources = list(resources)  # Store for async loading
                # Generate new ID if none provided
                self.id = str(uuid4())
            
                if session_config is not None and session_config.session is not None:
                    storage = self._agent.context.storage
                    self._current_history = storage.filter_messages_sync(session_config.session)
                    if session_config.session.name:
                        self.id = session_config.session.name
            

            __len__

            __len__() -> int
            

            Get length of history.

            Source code in src/llmling_agent/agent/conversation.py
            160
            161
            162
            def __len__(self) -> int:
                """Get length of history."""
                return len(self.chat_messages)
            

            add_chat_messages

            add_chat_messages(messages: Sequence[ChatMessage])
            

            Add new messages to history and update last_messages.

            Source code in src/llmling_agent/agent/conversation.py
            394
            395
            396
            397
            def add_chat_messages(self, messages: Sequence[ChatMessage]):
                """Add new messages to history and update last_messages."""
                self._last_messages = list(messages)
                self.chat_messages.extend(messages)
            

            add_context_from_path async

            add_context_from_path(path: StrPath, *, convert_to_md: bool = False, **metadata: Any)
            

            Add file or URL content as context message.

            Parameters:

            Name Type Description Default
            path StrPath

            Any UPath-supported path

            required
            convert_to_md bool

            Whether to convert content to markdown

            False
            **metadata Any

            Additional metadata to include with the message

            {}

            Raises:

            Type Description
            ValueError

            If content cannot be loaded or converted

            Source code in src/llmling_agent/agent/conversation.py
            436
            437
            438
            439
            440
            441
            442
            443
            444
            445
            446
            447
            448
            449
            450
            451
            452
            453
            454
            455
            456
            457
            458
            459
            460
            461
            462
            async def add_context_from_path(
                self,
                path: StrPath,
                *,
                convert_to_md: bool = False,
                **metadata: Any,
            ):
                """Add file or URL content as context message.
            
                Args:
                    path: Any UPath-supported path
                    convert_to_md: Whether to convert content to markdown
                    **metadata: Additional metadata to include with the message
            
                Raises:
                    ValueError: If content cannot be loaded or converted
                """
                from upath import UPath
            
                path_obj = UPath(path)
                if convert_to_md:
                    content = await self._agent.context.converter.convert_file(path)
                    source = f"markdown:{path_obj.name}"
                else:
                    content = await read_path(path)
                    source = f"{path_obj.protocol}:{path_obj.name}"
                self.add_context_message(content, source=source, **metadata)
            

            add_context_from_prompt async

            add_context_from_prompt(
                prompt: PromptType, metadata: dict[str, Any] | None = None, **kwargs: Any
            )
            

            Add rendered prompt content as context message.

            Parameters:

            Name Type Description Default
            prompt PromptType

            LLMling prompt (static, dynamic, or file-based)

            required
            metadata dict[str, Any] | None

            Additional metadata to include with the message

            None
            kwargs Any

            Optional kwargs for prompt formatting

            {}
            Source code in src/llmling_agent/agent/conversation.py
            488
            489
            490
            491
            492
            493
            494
            495
            496
            497
            498
            499
            500
            501
            502
            503
            504
            505
            506
            507
            508
            509
            510
            511
            512
            513
            514
            515
            async def add_context_from_prompt(
                self,
                prompt: PromptType,
                metadata: dict[str, Any] | None = None,
                **kwargs: Any,
            ):
                """Add rendered prompt content as context message.
            
                Args:
                    prompt: LLMling prompt (static, dynamic, or file-based)
                    metadata: Additional metadata to include with the message
                    kwargs: Optional kwargs for prompt formatting
                """
                try:
                    # Format the prompt using LLMling's prompt system
                    messages = await prompt.format(kwargs)
                    # Extract text content from all messages
                    content = "\n\n".join(msg.get_text_content() for msg in messages)
            
                    self.add_context_message(
                        content,
                        source=f"prompt:{prompt.name or prompt.type}",
                        prompt_args=kwargs,
                        **(metadata or {}),
                    )
                except Exception as e:
                    msg = f"Failed to format prompt: {e}"
                    raise ValueError(msg) from e
            

            add_context_from_resource async

            add_context_from_resource(resource: Resource | str)
            

            Add content from a LLMling resource.

            Source code in src/llmling_agent/agent/conversation.py
            464
            465
            466
            467
            468
            469
            470
            471
            472
            473
            474
            475
            476
            477
            478
            479
            480
            481
            482
            483
            484
            485
            486
            async def add_context_from_resource(self, resource: Resource | str):
                """Add content from a LLMling resource."""
                if not self._agent.runtime:
                    msg = "No runtime available"
                    raise RuntimeError(msg)
            
                if isinstance(resource, str):
                    content = await self._agent.runtime.load_resource(resource)
                    self.add_context_message(
                        str(content.content),
                        source=f"Resource {resource}",
                        mime_type=content.metadata.mime_type,
                        **content.metadata.extra,
                    )
                else:
                    loader = self._agent.runtime._loader_registry.get_loader(resource)
                    async for content in loader.load(resource):
                        self.add_context_message(
                            str(content.content),
                            source=f"{resource.type}:{resource.uri}",
                            mime_type=content.metadata.mime_type,
                            **content.metadata.extra,
                        )
            

            add_context_message

            add_context_message(content: str, source: str | None = None, **metadata: Any)
            

            Add a context message.

            Parameters:

            Name Type Description Default
            content str

            Text content to add

            required
            source str | None

            Description of content source

            None
            **metadata Any

            Additional metadata to include with the message

            {}
            Source code in src/llmling_agent/agent/conversation.py
            404
            405
            406
            407
            408
            409
            410
            411
            412
            413
            414
            415
            416
            417
            418
            419
            420
            421
            422
            423
            424
            425
            426
            427
            428
            429
            430
            431
            432
            433
            434
            def add_context_message(
                self,
                content: str,
                source: str | None = None,
                **metadata: Any,
            ):
                """Add a context message.
            
                Args:
                    content: Text content to add
                    source: Description of content source
                    **metadata: Additional metadata to include with the message
                """
                meta_str = ""
                if metadata:
                    meta_str = "\n".join(f"{k}: {v}" for k, v in metadata.items())
                    meta_str = f"\nMetadata:\n{meta_str}\n"
            
                header = f"Content from {source}:" if source else "Additional context:"
                formatted = f"{header}{meta_str}\n{content}\n"
            
                chat_message = ChatMessage[str](
                    content=formatted,
                    role="user",
                    name="user",
                    model=self._agent.model_name,
                    metadata=metadata,
                )
                self._pending_messages.append(chat_message)
                # Emit as user message - will trigger logging through existing flow
                self._agent.message_received.emit(chat_message)
            

            clear

            clear()
            

            Clear conversation history and prompts.

            Source code in src/llmling_agent/agent/conversation.py
            347
            348
            349
            350
            351
            352
            def clear(self):
                """Clear conversation history and prompts."""
                self.chat_messages = ChatMessageContainer()
                self._last_messages = []
                event = self.HistoryCleared(session_id=str(self.id))
                self.history_cleared.emit(event)
            

            clear_pending

            clear_pending()
            

            Clear pending messages without adding them to history.

            Source code in src/llmling_agent/agent/conversation.py
            338
            339
            340
            def clear_pending(self):
                """Clear pending messages without adding them to history."""
                self._pending_messages.clear()
            

            format_history async

            format_history(
                *,
                max_tokens: int | None = None,
                include_system: bool = False,
                format_template: str | None = None,
                num_messages: int | None = None,
            ) -> str
            

            Format conversation history as a single context message.

            Parameters:

            Name Type Description Default
            max_tokens int | None

            Optional limit to include only last N tokens

            None
            include_system bool

            Whether to include system messages

            False
            format_template str | None

            Optional custom format (defaults to agent/message pairs)

            None
            num_messages int | None

            Optional limit to include only last N messages

            None
            Source code in src/llmling_agent/agent/conversation.py
            169
            170
            171
            172
            173
            174
            175
            176
            177
            178
            179
            180
            181
            182
            183
            184
            185
            186
            187
            188
            189
            190
            191
            192
            193
            194
            195
            196
            197
            198
            199
            200
            201
            202
            203
            204
            205
            206
            207
            208
            209
            210
            211
            212
            213
            214
            215
            216
            217
            218
            219
            220
            async def format_history(
                self,
                *,
                max_tokens: int | None = None,
                include_system: bool = False,
                format_template: str | None = None,
                num_messages: int | None = None,  # Add this parameter
            ) -> str:
                """Format conversation history as a single context message.
            
                Args:
                    max_tokens: Optional limit to include only last N tokens
                    include_system: Whether to include system messages
                    format_template: Optional custom format (defaults to agent/message pairs)
                    num_messages: Optional limit to include only last N messages
                """
                template = format_template or "Agent {agent}: {content}\n"
                messages: list[str] = []
                token_count = 0
            
                # Get messages, optionally limited
                history: Sequence[ChatMessage[Any]] = self.chat_messages
                if num_messages:
                    history = history[-num_messages:]
            
                if max_tokens:
                    history = list(reversed(history))  # Start from newest when token limited
            
                for msg in history:
                    # Check role directly from ChatMessage
                    if not include_system and msg.role == "system":
                        continue
                    name = msg.name or msg.role.title()
                    formatted = template.format(agent=name, content=str(msg.content))
            
                    if max_tokens:
                        # Count tokens in this message
                        if msg.cost_info:
                            msg_tokens = msg.cost_info.token_usage["total"]
                        else:
                            # Fallback to tiktoken if no cost info
                            msg_tokens = self.get_message_tokens(msg)
            
                        if token_count + msg_tokens > max_tokens:
                            break
                        token_count += msg_tokens
                        # Add to front since we're going backwards
                        messages.insert(0, formatted)
                    else:
                        messages.append(formatted)
            
                return "\n".join(messages)
            

            get_history

            get_history(include_pending: bool = True, do_filter: bool = True) -> list[ChatMessage]
            

            Get conversation history.

            Parameters:

            Name Type Description Default
            include_pending bool

            Whether to include pending messages

            True
            do_filter bool

            Whether to apply memory config limits (max_tokens, max_messages)

            True

            Returns:

            Type Description
            list[ChatMessage]

            Filtered list of messages in chronological order

            Source code in src/llmling_agent/agent/conversation.py
            292
            293
            294
            295
            296
            297
            298
            299
            300
            301
            302
            303
            304
            305
            306
            307
            308
            309
            310
            311
            312
            313
            314
            315
            316
            317
            318
            319
            320
            321
            322
            323
            324
            325
            326
            327
            328
            329
            330
            331
            332
            def get_history(
                self,
                include_pending: bool = True,
                do_filter: bool = True,
            ) -> list[ChatMessage]:
                """Get conversation history.
            
                Args:
                    include_pending: Whether to include pending messages
                    do_filter: Whether to apply memory config limits (max_tokens, max_messages)
            
                Returns:
                    Filtered list of messages in chronological order
                """
                if include_pending and self._pending_messages:
                    self.chat_messages.extend(self._pending_messages)
                    self._pending_messages.clear()
            
                # 2. Start with original history
                history: Sequence[ChatMessage[Any]] = self.chat_messages
            
                # 3. Only filter if needed
                if do_filter and self._config:
                    # First filter by message count (simple slice)
                    if self._config.max_messages:
                        history = history[-self._config.max_messages :]
            
                    # Then filter by tokens if needed
                    if self._config.max_tokens:
                        token_count = 0
                        filtered = []
                        # Collect messages from newest to oldest until we hit the limit
                        for msg in reversed(history):
                            msg_tokens = self.get_message_tokens(msg)
                            if token_count + msg_tokens > self._config.max_tokens:
                                break
                            token_count += msg_tokens
                            filtered.append(msg)
                        history = list(reversed(filtered))
            
                return list(history)
            

            get_history_tokens

            get_history_tokens() -> int
            

            Get token count for current history.

            Source code in src/llmling_agent/agent/conversation.py
            517
            518
            519
            520
            def get_history_tokens(self) -> int:
                """Get token count for current history."""
                # Use cost_info if available
                return self.chat_messages.get_history_tokens(self._agent.model_name)
            

            get_initialization_tasks

            get_initialization_tasks() -> list[Coroutine[Any, Any, Any]]
            

            Get all initialization coroutines.

            Source code in src/llmling_agent/agent/conversation.py
            95
            96
            97
            98
            def get_initialization_tasks(self) -> list[Coroutine[Any, Any, Any]]:
                """Get all initialization coroutines."""
                self._resources = []  # Clear so we dont load again on async init
                return [self.load_context_source(source) for source in self._resources]
            

            get_message_tokens

            get_message_tokens(message: ChatMessage) -> int
            

            Get token count for a single message.

            Source code in src/llmling_agent/agent/conversation.py
            164
            165
            166
            167
            def get_message_tokens(self, message: ChatMessage) -> int:
                """Get token count for a single message."""
                content = "\n".join(message.format())
                return count_tokens(content, self._agent.model_name)
            

            get_pending_messages

            get_pending_messages() -> list[ChatMessage]
            

            Get messages that will be included in next interaction.

            Source code in src/llmling_agent/agent/conversation.py
            334
            335
            336
            def get_pending_messages(self) -> list[ChatMessage]:
                """Get messages that will be included in next interaction."""
                return list(self._pending_messages)
            

            get_pending_tokens

            get_pending_tokens() -> int
            

            Get token count for pending messages.

            Source code in src/llmling_agent/agent/conversation.py
            522
            523
            524
            525
            def get_pending_tokens(self) -> int:
                """Get token count for pending messages."""
                text = "\n".join(msg.format() for msg in self._pending_messages)
                return count_tokens(text, self._agent.model_name)
            

            load_context_source async

            load_context_source(source: Resource | PromptType | str)
            

            Load context from a single source.

            Source code in src/llmling_agent/agent/conversation.py
            222
            223
            224
            225
            226
            227
            228
            229
            230
            231
            232
            233
            234
            async def load_context_source(self, source: Resource | PromptType | str):
                """Load context from a single source."""
                try:
                    match source:
                        case str():
                            await self.add_context_from_path(source)
                        case BaseResource():
                            await self.add_context_from_resource(source)
                        case BasePrompt():
                            await self.add_context_from_prompt(source)
                except Exception:
                    msg = "Failed to load context from %s"
                    logger.exception(msg, "file" if isinstance(source, str) else source.type)
            

            load_history_from_database

            load_history_from_database(
                session: SessionIdType | SessionQuery = None,
                *,
                since: datetime | None = None,
                until: datetime | None = None,
                roles: set[MessageRole] | None = None,
                limit: int | None = None,
            )
            

            Load conversation history from database.

            Parameters:

            Name Type Description Default
            session SessionIdType | SessionQuery

            Session ID or query config

            None
            since datetime | None

            Only include messages after this time (override)

            None
            until datetime | None

            Only include messages before this time (override)

            None
            roles set[MessageRole] | None

            Only include messages with these roles (override)

            None
            limit int | None

            Maximum number of messages to return (override)

            None
            Source code in src/llmling_agent/agent/conversation.py
            236
            237
            238
            239
            240
            241
            242
            243
            244
            245
            246
            247
            248
            249
            250
            251
            252
            253
            254
            255
            256
            257
            258
            259
            260
            261
            262
            263
            264
            265
            266
            267
            268
            269
            270
            271
            272
            273
            274
            275
            276
            277
            278
            279
            280
            281
            282
            283
            284
            285
            286
            287
            288
            289
            290
            def load_history_from_database(
                self,
                session: SessionIdType | SessionQuery = None,
                *,
                since: datetime | None = None,
                until: datetime | None = None,
                roles: set[MessageRole] | None = None,
                limit: int | None = None,
            ):
                """Load conversation history from database.
            
                Args:
                    session: Session ID or query config
                    since: Only include messages after this time (override)
                    until: Only include messages before this time (override)
                    roles: Only include messages with these roles (override)
                    limit: Maximum number of messages to return (override)
                """
                storage = self._agent.context.storage
                match session:
                    case SessionQuery() as query:
                        # Override query params if provided
                        if since is not None or until is not None or roles or limit:
                            update = {
                                "since": since.isoformat() if since else None,
                                "until": until.isoformat() if until else None,
                                "roles": roles,
                                "limit": limit,
                            }
                            query = query.model_copy(update=update)
                        if query.name:
                            self.id = query.name
                    case str() | UUID():
                        self.id = str(session)
                        query = SessionQuery(
                            name=self.id,
                            since=since.isoformat() if since else None,
                            until=until.isoformat() if until else None,
                            roles=roles,
                            limit=limit,
                        )
                    case None:
                        # Use current session ID
                        query = SessionQuery(
                            name=self.id,
                            since=since.isoformat() if since else None,
                            until=until.isoformat() if until else None,
                            roles=roles,
                            limit=limit,
                        )
                    case _:
                        msg = f"Invalid type for session: {type(session)}"
                        raise ValueError(msg)
                self.chat_messages.clear()
                self.chat_messages.extend(storage.filter_messages_sync(query))
            

            set_history

            set_history(history: list[ChatMessage])
            

            Update conversation history after run.

            Source code in src/llmling_agent/agent/conversation.py
            342
            343
            344
            345
            def set_history(self, history: list[ChatMessage]):
                """Update conversation history after run."""
                self.chat_messages.clear()
                self.chat_messages.extend(history)
            

            temporary_state async

            temporary_state(
                history: list[AnyPromptType] | SessionQuery | None = None,
                *,
                replace_history: bool = False,
            ) -> AsyncIterator[Self]
            

            Temporarily set conversation history.

            Parameters:

            Name Type Description Default
            history list[AnyPromptType] | SessionQuery | None

            Optional list of prompts to use as temporary history. Can be strings, BasePrompts, or other prompt types.

            None
            replace_history bool

            If True, only use provided history. If False, append to existing history.

            False
            Source code in src/llmling_agent/agent/conversation.py
            354
            355
            356
            357
            358
            359
            360
            361
            362
            363
            364
            365
            366
            367
            368
            369
            370
            371
            372
            373
            374
            375
            376
            377
            378
            379
            380
            381
            382
            383
            384
            385
            386
            387
            388
            389
            390
            391
            392
            @asynccontextmanager
            async def temporary_state(
                self,
                history: list[AnyPromptType] | SessionQuery | None = None,
                *,
                replace_history: bool = False,
            ) -> AsyncIterator[Self]:
                """Temporarily set conversation history.
            
                Args:
                    history: Optional list of prompts to use as temporary history.
                            Can be strings, BasePrompts, or other prompt types.
                    replace_history: If True, only use provided history. If False, append
                            to existing history.
                """
                from toprompt import to_prompt
            
                old_history = self.chat_messages.copy()
            
                try:
                    messages: Sequence[ChatMessage[Any]] = ChatMessageContainer()
                    if history is not None:
                        if isinstance(history, SessionQuery):
                            messages = await self._agent.context.storage.filter_messages(history)
                        else:
                            messages = [
                                ChatMessage(content=await to_prompt(p), role="user")
                                for p in history
                            ]
            
                    if replace_history:
                        self.chat_messages = ChatMessageContainer(messages)
                    else:
                        self.chat_messages.extend(messages)
            
                    yield self
            
                finally:
                    self.chat_messages = old_history
            

            _to_base_prompt

            _to_base_prompt(prompt: PromptInput) -> BasePrompt
            

            Convert input to BasePrompt instance.

            Source code in src/llmling_agent/agent/conversation.py
            41
            42
            43
            44
            45
            46
            47
            48
            def _to_base_prompt(prompt: PromptInput) -> BasePrompt:
                """Convert input to BasePrompt instance."""
                if isinstance(prompt, str):
                    msg = PromptMessage(role="system", content=prompt)
                    return StaticPrompt(
                        name="System prompt", description="System prompt", messages=[msg]
                    )
                return prompt