feat(server): add conversation mtime tracking for real-time updates
Add conversation_mtime_ns field to session state that tracks the actual
modification time of conversation files. This enables more responsive
dashboard updates by detecting changes that occur between hook events
(e.g., during streaming tool execution).
Changes:
- state.py: Add _get_conversation_mtime() to stat conversation files
and include mtime_ns in session payloads when available
- conversation.py: Add stable message IDs (claude-{session}-{n} format)
for React key stability and message deduplication
- control.py: Fix FIFO eviction for dismissed_codex_ids - set.pop()
removes arbitrary element, now uses dict with insertion-order iteration
- context.py: Update dismissed_codex_ids type from set to dict
The mtime approach complements existing last_event_at tracking:
- last_event_at: Changes on hook events (session boundaries)
- conversation_mtime_ns: Changes on every file write (real-time)
Dashboard can now detect mid-session conversation updates without
waiting for the next hook event.
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -62,7 +62,8 @@ _codex_transcript_cache = {}
|
|||||||
_CODEX_CACHE_MAX = 200
|
_CODEX_CACHE_MAX = 200
|
||||||
|
|
||||||
# Codex sessions dismissed during this server lifetime (prevents re-discovery)
|
# Codex sessions dismissed during this server lifetime (prevents re-discovery)
|
||||||
_dismissed_codex_ids = set()
|
# Uses dict (not set) for O(1) lookup + FIFO eviction via insertion order (Python 3.7+)
|
||||||
|
_dismissed_codex_ids = {}
|
||||||
_DISMISSED_MAX = 500
|
_DISMISSED_MAX = 500
|
||||||
|
|
||||||
# Serialize state collection because it mutates session files/caches.
|
# Serialize state collection because it mutates session files/caches.
|
||||||
|
|||||||
@@ -16,10 +16,11 @@ class SessionControlMixin:
|
|||||||
safe_id = os.path.basename(session_id)
|
safe_id = os.path.basename(session_id)
|
||||||
session_file = SESSIONS_DIR / f"{safe_id}.json"
|
session_file = SESSIONS_DIR / f"{safe_id}.json"
|
||||||
# Track dismissed Codex sessions to prevent re-discovery
|
# Track dismissed Codex sessions to prevent re-discovery
|
||||||
# Evict oldest entries if set is full (prevents unbounded growth)
|
# Evict oldest entries via FIFO (dict maintains insertion order in Python 3.7+)
|
||||||
while len(_dismissed_codex_ids) >= _DISMISSED_MAX:
|
while len(_dismissed_codex_ids) >= _DISMISSED_MAX:
|
||||||
_dismissed_codex_ids.pop()
|
oldest_key = next(iter(_dismissed_codex_ids))
|
||||||
_dismissed_codex_ids.add(safe_id)
|
del _dismissed_codex_ids[oldest_key]
|
||||||
|
_dismissed_codex_ids[safe_id] = True
|
||||||
session_file.unlink(missing_ok=True)
|
session_file.unlink(missing_ok=True)
|
||||||
self._send_json(200, {"ok": True})
|
self._send_json(200, {"ok": True})
|
||||||
|
|
||||||
|
|||||||
@@ -39,6 +39,7 @@ class ConversationMixin:
|
|||||||
def _parse_claude_conversation(self, session_id, project_dir):
|
def _parse_claude_conversation(self, session_id, project_dir):
|
||||||
"""Parse Claude Code JSONL conversation format."""
|
"""Parse Claude Code JSONL conversation format."""
|
||||||
messages = []
|
messages = []
|
||||||
|
msg_id = 0
|
||||||
|
|
||||||
conv_file = self._get_claude_conversation_file(session_id, project_dir)
|
conv_file = self._get_claude_conversation_file(session_id, project_dir)
|
||||||
|
|
||||||
@@ -58,10 +59,12 @@ class ConversationMixin:
|
|||||||
# Only include actual human messages (strings), not tool results (arrays)
|
# Only include actual human messages (strings), not tool results (arrays)
|
||||||
if content and isinstance(content, str):
|
if content and isinstance(content, str):
|
||||||
messages.append({
|
messages.append({
|
||||||
|
"id": f"claude-{session_id[:8]}-{msg_id}",
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": content,
|
"content": content,
|
||||||
"timestamp": entry.get("timestamp", ""),
|
"timestamp": entry.get("timestamp", ""),
|
||||||
})
|
})
|
||||||
|
msg_id += 1
|
||||||
|
|
||||||
elif msg_type == "assistant":
|
elif msg_type == "assistant":
|
||||||
# Assistant messages have structured content
|
# Assistant messages have structured content
|
||||||
@@ -90,6 +93,7 @@ class ConversationMixin:
|
|||||||
text_parts.append(part)
|
text_parts.append(part)
|
||||||
if text_parts or tool_calls or thinking_parts:
|
if text_parts or tool_calls or thinking_parts:
|
||||||
msg = {
|
msg = {
|
||||||
|
"id": f"claude-{session_id[:8]}-{msg_id}",
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
"content": "\n".join(text_parts) if text_parts else "",
|
"content": "\n".join(text_parts) if text_parts else "",
|
||||||
"timestamp": entry.get("timestamp", ""),
|
"timestamp": entry.get("timestamp", ""),
|
||||||
@@ -99,6 +103,7 @@ class ConversationMixin:
|
|||||||
if thinking_parts:
|
if thinking_parts:
|
||||||
msg["thinking"] = "\n\n".join(thinking_parts)
|
msg["thinking"] = "\n\n".join(thinking_parts)
|
||||||
messages.append(msg)
|
messages.append(msg)
|
||||||
|
msg_id += 1
|
||||||
|
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
continue
|
continue
|
||||||
@@ -117,6 +122,7 @@ class ConversationMixin:
|
|||||||
"""
|
"""
|
||||||
messages = []
|
messages = []
|
||||||
pending_tool_calls = [] # Accumulate tool calls to attach to next assistant message
|
pending_tool_calls = [] # Accumulate tool calls to attach to next assistant message
|
||||||
|
msg_id = 0
|
||||||
|
|
||||||
conv_file = self._find_codex_transcript_file(session_id)
|
conv_file = self._find_codex_transcript_file(session_id)
|
||||||
|
|
||||||
@@ -161,19 +167,23 @@ class ConversationMixin:
|
|||||||
# Flush any pending tool calls first
|
# Flush any pending tool calls first
|
||||||
if pending_tool_calls:
|
if pending_tool_calls:
|
||||||
messages.append({
|
messages.append({
|
||||||
|
"id": f"codex-{session_id[:8]}-{msg_id}",
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
"content": "",
|
"content": "",
|
||||||
"tool_calls": pending_tool_calls,
|
"tool_calls": pending_tool_calls,
|
||||||
"timestamp": timestamp,
|
"timestamp": timestamp,
|
||||||
})
|
})
|
||||||
|
msg_id += 1
|
||||||
pending_tool_calls = []
|
pending_tool_calls = []
|
||||||
# Add thinking as assistant message
|
# Add thinking as assistant message
|
||||||
messages.append({
|
messages.append({
|
||||||
|
"id": f"codex-{session_id[:8]}-{msg_id}",
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
"content": "",
|
"content": "",
|
||||||
"thinking": "\n".join(thinking_text),
|
"thinking": "\n".join(thinking_text),
|
||||||
"timestamp": timestamp,
|
"timestamp": timestamp,
|
||||||
})
|
})
|
||||||
|
msg_id += 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Handle message (user/assistant text)
|
# Handle message (user/assistant text)
|
||||||
@@ -208,19 +218,24 @@ class ConversationMixin:
|
|||||||
# Flush any pending tool calls before user message
|
# Flush any pending tool calls before user message
|
||||||
if pending_tool_calls:
|
if pending_tool_calls:
|
||||||
messages.append({
|
messages.append({
|
||||||
|
"id": f"codex-{session_id[:8]}-{msg_id}",
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
"content": "",
|
"content": "",
|
||||||
"tool_calls": pending_tool_calls,
|
"tool_calls": pending_tool_calls,
|
||||||
"timestamp": timestamp,
|
"timestamp": timestamp,
|
||||||
})
|
})
|
||||||
|
msg_id += 1
|
||||||
pending_tool_calls = []
|
pending_tool_calls = []
|
||||||
messages.append({
|
messages.append({
|
||||||
|
"id": f"codex-{session_id[:8]}-{msg_id}",
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": "\n".join(text_parts),
|
"content": "\n".join(text_parts),
|
||||||
"timestamp": timestamp,
|
"timestamp": timestamp,
|
||||||
})
|
})
|
||||||
|
msg_id += 1
|
||||||
elif role == "assistant":
|
elif role == "assistant":
|
||||||
msg = {
|
msg = {
|
||||||
|
"id": f"codex-{session_id[:8]}-{msg_id}",
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
"content": "\n".join(text_parts) if text_parts else "",
|
"content": "\n".join(text_parts) if text_parts else "",
|
||||||
"timestamp": timestamp,
|
"timestamp": timestamp,
|
||||||
@@ -231,6 +246,7 @@ class ConversationMixin:
|
|||||||
pending_tool_calls = []
|
pending_tool_calls = []
|
||||||
if text_parts or msg.get("tool_calls"):
|
if text_parts or msg.get("tool_calls"):
|
||||||
messages.append(msg)
|
messages.append(msg)
|
||||||
|
msg_id += 1
|
||||||
|
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
continue
|
continue
|
||||||
@@ -238,6 +254,7 @@ class ConversationMixin:
|
|||||||
# Flush any remaining pending tool calls
|
# Flush any remaining pending tool calls
|
||||||
if pending_tool_calls:
|
if pending_tool_calls:
|
||||||
messages.append({
|
messages.append({
|
||||||
|
"id": f"codex-{session_id[:8]}-{msg_id}",
|
||||||
"role": "assistant",
|
"role": "assistant",
|
||||||
"content": "",
|
"content": "",
|
||||||
"tool_calls": pending_tool_calls,
|
"tool_calls": pending_tool_calls,
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ import json
|
|||||||
import subprocess
|
import subprocess
|
||||||
import time
|
import time
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from amc_server.context import (
|
from amc_server.context import (
|
||||||
EVENTS_DIR,
|
EVENTS_DIR,
|
||||||
@@ -119,6 +120,11 @@ class StateMixin:
|
|||||||
if context_usage:
|
if context_usage:
|
||||||
data["context_usage"] = context_usage
|
data["context_usage"] = context_usage
|
||||||
|
|
||||||
|
# Track conversation file mtime for real-time update detection
|
||||||
|
conv_mtime = self._get_conversation_mtime(data)
|
||||||
|
if conv_mtime:
|
||||||
|
data["conversation_mtime_ns"] = conv_mtime
|
||||||
|
|
||||||
sessions.append(data)
|
sessions.append(data)
|
||||||
except (json.JSONDecodeError, OSError):
|
except (json.JSONDecodeError, OSError):
|
||||||
continue
|
continue
|
||||||
@@ -166,6 +172,38 @@ class StateMixin:
|
|||||||
|
|
||||||
return None # Return None on error (don't clean up if we can't verify)
|
return None # Return None on error (don't clean up if we can't verify)
|
||||||
|
|
||||||
|
def _get_conversation_mtime(self, session_data):
|
||||||
|
"""Get the conversation file's mtime for real-time change detection."""
|
||||||
|
agent = session_data.get("agent")
|
||||||
|
|
||||||
|
if agent == "claude":
|
||||||
|
conv_file = self._get_claude_conversation_file(
|
||||||
|
session_data.get("session_id", ""),
|
||||||
|
session_data.get("project_dir", ""),
|
||||||
|
)
|
||||||
|
if conv_file:
|
||||||
|
try:
|
||||||
|
return conv_file.stat().st_mtime_ns
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
elif agent == "codex":
|
||||||
|
transcript_path = session_data.get("transcript_path", "")
|
||||||
|
if transcript_path:
|
||||||
|
try:
|
||||||
|
return Path(transcript_path).stat().st_mtime_ns
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
# Fallback to discovery
|
||||||
|
transcript_file = self._find_codex_transcript_file(session_data.get("session_id", ""))
|
||||||
|
if transcript_file:
|
||||||
|
try:
|
||||||
|
return transcript_file.stat().st_mtime_ns
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
def _cleanup_stale(self, sessions):
|
def _cleanup_stale(self, sessions):
|
||||||
"""Remove orphan event logs >24h and stale 'starting' sessions >1h."""
|
"""Remove orphan event logs >24h and stale 'starting' sessions >1h."""
|
||||||
active_ids = {s.get("session_id") for s in sessions if s.get("session_id")}
|
active_ids = {s.get("session_id") for s in sessions if s.get("session_id")}
|
||||||
|
|||||||
Reference in New Issue
Block a user