fix(codex): parse tool calls and reasoning in conversation API

The Codex conversation parser was only handling "message" payload types,
missing tool calls entirely. Codex uses separate response_items:

  - function_call: tool invocations with name, arguments, call_id
  - reasoning: thinking summaries (encrypted content, visible summary)
  - message: user/assistant text (previously the only type handled)

Changes:
- Parse function_call payloads and accumulate as tool_calls array
- Attach tool_calls to the next assistant message, or flush standalone
- Parse reasoning payloads and extract summary text as thinking
- Add _parse_codex_arguments() helper to handle JSON string arguments

This fixes the dashboard not showing Codex tool calls like exec_command,
read_file, etc.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
teernisse
2026-02-25 15:20:08 -05:00
parent be2dd6a4fb
commit 7cf51427b7

View File

@@ -108,8 +108,15 @@ class ConversationMixin:
return messages return messages
def _parse_codex_conversation(self, session_id): def _parse_codex_conversation(self, session_id):
"""Parse Codex JSONL conversation format.""" """Parse Codex JSONL conversation format.
Codex uses separate response_items for different content types:
- message: user/assistant text messages
- function_call: tool invocations (name, arguments, call_id)
- reasoning: thinking summaries (encrypted content, visible summary)
"""
messages = [] messages = []
pending_tool_calls = [] # Accumulate tool calls to attach to next assistant message
conv_file = self._find_codex_transcript_file(session_id) conv_file = self._find_codex_transcript_file(session_id)
@@ -123,16 +130,54 @@ class ConversationMixin:
if not isinstance(entry, dict): if not isinstance(entry, dict):
continue continue
# Codex format: type="response_item", payload.type="message"
if entry.get("type") != "response_item": if entry.get("type") != "response_item":
continue continue
payload = entry.get("payload", {}) payload = entry.get("payload", {})
if not isinstance(payload, dict): if not isinstance(payload, dict):
continue continue
if payload.get("type") != "message":
payload_type = payload.get("type")
timestamp = entry.get("timestamp", "")
# Handle function_call (tool invocations)
if payload_type == "function_call":
tool_call = {
"name": payload.get("name", "unknown"),
"input": self._parse_codex_arguments(payload.get("arguments", "{}")),
}
pending_tool_calls.append(tool_call)
continue continue
# Handle reasoning (thinking summaries)
if payload_type == "reasoning":
summary_parts = payload.get("summary", [])
if summary_parts:
thinking_text = []
for part in summary_parts:
if isinstance(part, dict) and part.get("type") == "summary_text":
thinking_text.append(part.get("text", ""))
if thinking_text:
# Flush any pending tool calls first
if pending_tool_calls:
messages.append({
"role": "assistant",
"content": "",
"tool_calls": pending_tool_calls,
"timestamp": timestamp,
})
pending_tool_calls = []
# Add thinking as assistant message
messages.append({
"role": "assistant",
"content": "",
"thinking": "\n".join(thinking_text),
"timestamp": timestamp,
})
continue
# Handle message (user/assistant text)
if payload_type == "message":
role = payload.get("role", "") role = payload.get("role", "")
content_parts = payload.get("content", []) content_parts = payload.get("content", [])
if not isinstance(content_parts, list): if not isinstance(content_parts, list):
@@ -146,7 +191,6 @@ class ConversationMixin:
text_parts = [] text_parts = []
for part in content_parts: for part in content_parts:
if isinstance(part, dict): if isinstance(part, dict):
# Codex uses "input_text" for user, "output_text" for assistant
text = part.get("text", "") text = part.get("text", "")
if text: if text:
# Skip injected context (AGENTS.md, environment, permissions) # Skip injected context (AGENTS.md, environment, permissions)
@@ -160,16 +204,58 @@ class ConversationMixin:
continue continue
text_parts.append(text) text_parts.append(text)
if text_parts and role in ("user", "assistant"): if role == "user" and text_parts:
# Flush any pending tool calls before user message
if pending_tool_calls:
messages.append({ messages.append({
"role": role, "role": "assistant",
"content": "\n".join(text_parts), "content": "",
"timestamp": entry.get("timestamp", ""), "tool_calls": pending_tool_calls,
"timestamp": timestamp,
}) })
pending_tool_calls = []
messages.append({
"role": "user",
"content": "\n".join(text_parts),
"timestamp": timestamp,
})
elif role == "assistant":
msg = {
"role": "assistant",
"content": "\n".join(text_parts) if text_parts else "",
"timestamp": timestamp,
}
# Attach any pending tool calls to this assistant message
if pending_tool_calls:
msg["tool_calls"] = pending_tool_calls
pending_tool_calls = []
if text_parts or msg.get("tool_calls"):
messages.append(msg)
except json.JSONDecodeError: except json.JSONDecodeError:
continue continue
# Flush any remaining pending tool calls
if pending_tool_calls:
messages.append({
"role": "assistant",
"content": "",
"tool_calls": pending_tool_calls,
"timestamp": "",
})
except OSError: except OSError:
pass pass
return messages return messages
def _parse_codex_arguments(self, arguments_str):
"""Parse Codex function_call arguments (JSON string or dict)."""
if isinstance(arguments_str, dict):
return arguments_str
if isinstance(arguments_str, str):
try:
return json.loads(arguments_str)
except json.JSONDecodeError:
return {"raw": arguments_str}
return {}