refactor(server): extract context.py into focused modules

Split the monolithic context.py (117 lines) into five purpose-specific
modules following single-responsibility principle:

- config.py: Server-level constants (DATA_DIR, SESSIONS_DIR, PORT,
  STALE_EVENT_AGE, _state_lock)
- agents.py: Agent-specific paths and caches (CLAUDE_PROJECTS_DIR,
  CODEX_SESSIONS_DIR, discovery caches)
- auth.py: Authentication token generation/validation for spawn endpoint
- spawn_config.py: Spawn feature configuration (PENDING_SPAWNS_DIR,
  rate limiting, projects watcher thread)
- zellij.py: Zellij binary resolution and session management constants

This refactoring improves:
- Code navigation: Find relevant constants by domain, not alphabetically
- Testing: Each module can be tested in isolation
- Import clarity: Mixins import only what they need
- Future maintenance: Changes to one domain don't risk breaking others

All mixins updated to import from new module locations. Tests updated
to use new import paths.

Includes PROPOSED_CODE_FILE_REORGANIZATION_PLAN.md documenting the
rationale and mapping from old to new locations.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
teernisse
2026-02-27 11:05:39 -05:00
parent 69175f08f9
commit 1fb4a82b39
20 changed files with 683 additions and 191 deletions

View File

@@ -3,7 +3,9 @@ import os
import subprocess
import time
from amc_server.context import SESSIONS_DIR, ZELLIJ_BIN, ZELLIJ_PLUGIN, _DISMISSED_MAX, _dismissed_codex_ids
from amc_server.agents import _DISMISSED_MAX, _dismissed_codex_ids
from amc_server.config import SESSIONS_DIR
from amc_server.zellij import ZELLIJ_BIN, ZELLIJ_PLUGIN
from amc_server.logging_utils import LOGGER

View File

@@ -1,7 +1,7 @@
import json
import os
from amc_server.context import EVENTS_DIR
from amc_server.config import EVENTS_DIR
class ConversationMixin:

View File

@@ -5,18 +5,99 @@ import subprocess
import time
from datetime import datetime, timezone
from amc_server.context import (
from amc_server.agents import (
CODEX_ACTIVE_WINDOW,
CODEX_SESSIONS_DIR,
SESSIONS_DIR,
_CODEX_CACHE_MAX,
_codex_pane_cache,
_codex_transcript_cache,
_dismissed_codex_ids,
)
from amc_server.config import SESSIONS_DIR
from amc_server.spawn_config import PENDING_SPAWNS_DIR
from amc_server.logging_utils import LOGGER
def _parse_session_timestamp(session_ts):
"""Parse Codex session timestamp to Unix time. Returns None on failure."""
if not session_ts:
return None
try:
# Codex uses ISO format, possibly with Z suffix or +00:00
ts_str = session_ts.replace('Z', '+00:00')
dt = datetime.fromisoformat(ts_str)
return dt.timestamp()
except (ValueError, TypeError, AttributeError):
return None
def _match_pending_spawn(session_cwd, session_start_ts):
"""Match a Codex session to a pending spawn by CWD and timestamp.
Args:
session_cwd: The CWD of the Codex session
session_start_ts: The session's START timestamp (ISO string from Codex metadata)
IMPORTANT: Must be session start time, not file mtime, to avoid false
matches with pre-existing sessions that were recently active.
Returns:
spawn_id if matched (and deletes the pending file), None otherwise
"""
if not PENDING_SPAWNS_DIR.exists():
return None
normalized_cwd = os.path.normpath(session_cwd) if session_cwd else ""
if not normalized_cwd:
return None
# Parse session start time - if we can't parse it, we can't safely match
session_start_unix = _parse_session_timestamp(session_start_ts)
if session_start_unix is None:
return None
try:
for pending_file in PENDING_SPAWNS_DIR.glob('*.json'):
try:
data = json.loads(pending_file.read_text())
if not isinstance(data, dict):
continue
# Check agent type (only match codex to codex)
if data.get('agent_type') != 'codex':
continue
# Check CWD match
pending_path = os.path.normpath(data.get('project_path', ''))
if normalized_cwd != pending_path:
continue
# Check timing: session must have STARTED after spawn was initiated
# Using session start time (not mtime) prevents false matches with
# pre-existing sessions that happen to be recently active
spawn_ts = data.get('timestamp', 0)
if session_start_unix < spawn_ts:
continue
# Match found - claim the spawn_id and delete the pending file
spawn_id = data.get('spawn_id')
try:
pending_file.unlink()
except OSError:
pass
LOGGER.info(
'Matched Codex session (cwd=%s) to pending spawn_id=%s',
session_cwd, spawn_id,
)
return spawn_id
except (json.JSONDecodeError, OSError):
continue
except OSError:
pass
return None
class SessionDiscoveryMixin:
def _discover_active_codex_sessions(self):
"""Find active Codex sessions and create/update session files with Zellij pane info."""
@@ -131,6 +212,13 @@ class SessionDiscoveryMixin:
session_ts = payload.get("timestamp", "")
last_event_at = datetime.fromtimestamp(mtime, tz=timezone.utc).isoformat()
# Check for spawn_id: preserve existing, or match to pending spawn
# Use session_ts (start time) not mtime to avoid false matches
# with pre-existing sessions that were recently active
spawn_id = existing.get("spawn_id")
if not spawn_id:
spawn_id = _match_pending_spawn(cwd, session_ts)
session_data = {
"session_id": session_id,
"agent": "codex",
@@ -145,6 +233,8 @@ class SessionDiscoveryMixin:
"zellij_pane": zellij_pane or existing.get("zellij_pane", ""),
"transcript_path": str(jsonl_file),
}
if spawn_id:
session_data["spawn_id"] = spawn_id
if context_usage:
session_data["context_usage"] = context_usage
elif existing.get("context_usage"):

View File

@@ -1,8 +1,8 @@
import json
import urllib.parse
import amc_server.context as ctx
from amc_server.context import DASHBOARD_DIR
import amc_server.auth as auth
from amc_server.config import DASHBOARD_DIR
from amc_server.logging_utils import LOGGER
@@ -148,10 +148,10 @@ class HttpMixin:
content_type = content_types.get(ext, "application/octet-stream")
# Inject auth token into index.html for spawn endpoint security
if file_path == "index.html" and ctx._auth_token:
if file_path == "index.html" and auth._auth_token:
content = content.replace(
b"<!-- AMC_AUTH_TOKEN -->",
f'<script>window.AMC_AUTH_TOKEN = "{ctx._auth_token}";</script>'.encode(),
f'<script>window.AMC_AUTH_TOKEN = "{auth._auth_token}";</script>'.encode(),
)
# No caching during development

View File

@@ -2,7 +2,7 @@ import json
import os
from pathlib import Path
from amc_server.context import (
from amc_server.agents import (
CLAUDE_PROJECTS_DIR,
CODEX_SESSIONS_DIR,
_CODEX_CACHE_MAX,

View File

@@ -37,7 +37,7 @@ class SkillsMixin:
Checks SKILL.md (canonical) first, then falls back to skill.md,
prompt.md, README.md for description extraction. Parses YAML
frontmatter if present to extract the description field.
frontmatter if present to extract name and description fields.
Returns:
List of {name: str, description: str} dicts.
@@ -53,35 +53,41 @@ class SkillsMixin:
if not skill_dir.is_dir() or skill_dir.name.startswith("."):
continue
description = ""
# Check files in priority order
meta = {"name": "", "description": ""}
# Check files in priority order, accumulating metadata
# (earlier files take precedence for each field)
for md_name in ["SKILL.md", "skill.md", "prompt.md", "README.md"]:
md_file = skill_dir / md_name
if md_file.exists():
try:
content = md_file.read_text()
description = self._extract_description(content)
if description:
parsed = self._parse_frontmatter(content)
if not meta["name"] and parsed["name"]:
meta["name"] = parsed["name"]
if not meta["description"] and parsed["description"]:
meta["description"] = parsed["description"]
if meta["description"]:
break
except OSError:
pass
skills.append({
"name": skill_dir.name,
"description": description or f"Skill: {skill_dir.name}",
"name": meta["name"] or skill_dir.name,
"description": meta["description"] or f"Skill: {skill_dir.name}",
})
return skills
def _extract_description(self, content: str) -> str:
"""Extract description from markdown content.
def _parse_frontmatter(self, content: str) -> dict:
"""Extract name and description from markdown YAML frontmatter.
Handles YAML frontmatter (looks for 'description:' field) and
falls back to first meaningful line after frontmatter.
Returns:
Dict with 'name' and 'description' keys (both str, may be empty).
"""
result = {"name": "", "description": ""}
lines = content.splitlines()
if not lines:
return ""
return result
# Check for YAML frontmatter
frontmatter_end = 0
@@ -91,33 +97,37 @@ class SkillsMixin:
if stripped == "---":
frontmatter_end = i + 1
break
# Look for description field in frontmatter
if stripped.startswith("description:"):
# Extract value after colon
desc = stripped[len("description:"):].strip()
# Remove quotes if present
if desc.startswith('"') and desc.endswith('"'):
desc = desc[1:-1]
elif desc.startswith("'") and desc.endswith("'"):
desc = desc[1:-1]
# Handle YAML multi-line indicators (>- or |-)
if desc in (">-", "|-", ">", "|", ""):
# Multi-line: read the next indented line
if i + 1 < len(lines):
next_line = lines[i + 1].strip()
if next_line and not next_line.startswith("---"):
return next_line[:100]
elif desc:
return desc[:100]
# Check each known frontmatter field
for field in ("name", "description"):
if stripped.startswith(f"{field}:"):
val = stripped[len(field) + 1:].strip()
# Remove quotes if present
if val.startswith('"') and val.endswith('"'):
val = val[1:-1]
elif val.startswith("'") and val.endswith("'"):
val = val[1:-1]
# Handle YAML multi-line indicators (>- or |-)
if val in (">-", "|-", ">", "|", ""):
if i + 1 < len(lines):
next_line = lines[i + 1].strip()
if next_line and not next_line.startswith("---"):
val = next_line
else:
val = ""
else:
val = ""
if val:
result[field] = val[:100]
# Fall back to first meaningful line after frontmatter
for line in lines[frontmatter_end:]:
stripped = line.strip()
# Skip empty lines, headers, comments, and frontmatter delimiters
if stripped and not stripped.startswith("#") and not stripped.startswith("<!--") and stripped != "---":
return stripped[:100]
# Fall back to first meaningful line for description
if not result["description"]:
for line in lines[frontmatter_end:]:
stripped = line.strip()
if stripped and not stripped.startswith("#") and not stripped.startswith("<!--") and stripped != "---":
result["description"] = stripped[:100]
break
return ""
return result
def _enumerate_codex_skills(self) -> list[dict]:
"""Enumerate Codex skills from cache and user directory.
@@ -161,19 +171,19 @@ class SkillsMixin:
if not skill_dir.is_dir() or skill_dir.name.startswith("."):
continue
description = ""
# Check SKILL.md for description
meta = {"name": "", "description": ""}
# Check SKILL.md for metadata
skill_md = skill_dir / "SKILL.md"
if skill_md.exists():
try:
content = skill_md.read_text()
description = self._extract_description(content)
meta = self._parse_frontmatter(content)
except OSError:
pass
skills.append({
"name": skill_dir.name,
"description": description or f"User skill: {skill_dir.name}",
"name": meta["name"] or skill_dir.name,
"description": meta["description"] or f"User skill: {skill_dir.name}",
})
return skills

View File

@@ -5,13 +5,52 @@ import subprocess
import time
import uuid
from amc_server.context import (
PROJECTS_DIR, SESSIONS_DIR, ZELLIJ_BIN, ZELLIJ_SESSION,
from amc_server.auth import validate_auth_token
from amc_server.config import SESSIONS_DIR
from amc_server.spawn_config import (
PENDING_SPAWNS_DIR, PENDING_SPAWN_TTL,
PROJECTS_DIR,
_spawn_lock, _spawn_timestamps, SPAWN_COOLDOWN_SEC,
validate_auth_token,
)
from amc_server.zellij import ZELLIJ_BIN, ZELLIJ_SESSION
from amc_server.logging_utils import LOGGER
def _write_pending_spawn(spawn_id, project_path, agent_type):
"""Write a pending spawn record for later correlation by discovery.
This enables Codex session correlation since env vars don't propagate
through Zellij's pane spawn mechanism.
"""
PENDING_SPAWNS_DIR.mkdir(parents=True, exist_ok=True)
pending_file = PENDING_SPAWNS_DIR / f'{spawn_id}.json'
data = {
'spawn_id': spawn_id,
'project_path': str(project_path),
'agent_type': agent_type,
'timestamp': time.time(),
}
try:
pending_file.write_text(json.dumps(data))
except OSError:
LOGGER.warning('Failed to write pending spawn file for %s', spawn_id)
def _cleanup_stale_pending_spawns():
"""Remove pending spawn files older than PENDING_SPAWN_TTL."""
if not PENDING_SPAWNS_DIR.exists():
return
now = time.time()
try:
for f in PENDING_SPAWNS_DIR.glob('*.json'):
try:
if now - f.stat().st_mtime > PENDING_SPAWN_TTL:
f.unlink()
except OSError:
continue
except OSError:
pass
# Agent commands (AC-8, AC-9: full autonomous permissions)
AGENT_COMMANDS = {
'claude': ['claude', '--dangerously-skip-permissions'],
@@ -215,6 +254,16 @@ class SpawnMixin:
def _spawn_agent_in_project_tab(self, project, project_path, agent_type, spawn_id):
"""Spawn an agent in a project-named Zellij tab."""
# Clean up stale pending spawns opportunistically
_cleanup_stale_pending_spawns()
# For Codex, write pending spawn record before launching.
# Zellij doesn't propagate env vars to pane commands, so discovery
# will match the session to this record by CWD + timestamp.
# (Claude doesn't need this - amc-hook writes spawn_id directly)
if agent_type == 'codex':
_write_pending_spawn(spawn_id, project_path, agent_type)
# Check session exists
if not self._check_zellij_session_exists():
return {