Split the monolithic context.py (117 lines) into five purpose-specific modules following single-responsibility principle: - config.py: Server-level constants (DATA_DIR, SESSIONS_DIR, PORT, STALE_EVENT_AGE, _state_lock) - agents.py: Agent-specific paths and caches (CLAUDE_PROJECTS_DIR, CODEX_SESSIONS_DIR, discovery caches) - auth.py: Authentication token generation/validation for spawn endpoint - spawn_config.py: Spawn feature configuration (PENDING_SPAWNS_DIR, rate limiting, projects watcher thread) - zellij.py: Zellij binary resolution and session management constants This refactoring improves: - Code navigation: Find relevant constants by domain, not alphabetically - Testing: Each module can be tested in isolation - Import clarity: Mixins import only what they need - Future maintenance: Changes to one domain don't risk breaking others All mixins updated to import from new module locations. Tests updated to use new import paths. Includes PROPOSED_CODE_FILE_REORGANIZATION_PLAN.md documenting the rationale and mapping from old to new locations. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
190 lines
7.4 KiB
Python
190 lines
7.4 KiB
Python
"""SkillsMixin: Enumerate available skills for Claude and Codex agents.
|
|
|
|
Skills are agent-global (not session-specific), loaded from well-known
|
|
filesystem locations for each agent type.
|
|
"""
|
|
|
|
import json
|
|
from pathlib import Path
|
|
|
|
|
|
class SkillsMixin:
|
|
"""Mixin for enumerating agent skills for autocomplete."""
|
|
|
|
def _serve_skills(self, agent: str) -> None:
|
|
"""Serve autocomplete config for an agent type.
|
|
|
|
Args:
|
|
agent: Agent type ('claude' or 'codex')
|
|
|
|
Response JSON:
|
|
{trigger: '/' or '$', skills: [{name, description}, ...]}
|
|
"""
|
|
if agent == "codex":
|
|
trigger = "$"
|
|
skills = self._enumerate_codex_skills()
|
|
else: # Default to claude
|
|
trigger = "/"
|
|
skills = self._enumerate_claude_skills()
|
|
|
|
# Sort alphabetically by name (case-insensitive)
|
|
skills.sort(key=lambda s: s["name"].lower())
|
|
|
|
self._send_json(200, {"trigger": trigger, "skills": skills})
|
|
|
|
def _enumerate_claude_skills(self) -> list[dict]:
|
|
"""Enumerate Claude skills from ~/.claude/skills/.
|
|
|
|
Checks SKILL.md (canonical) first, then falls back to skill.md,
|
|
prompt.md, README.md for description extraction. Parses YAML
|
|
frontmatter if present to extract name and description fields.
|
|
|
|
Returns:
|
|
List of {name: str, description: str} dicts.
|
|
Empty list if directory doesn't exist or enumeration fails.
|
|
"""
|
|
skills = []
|
|
skills_dir = Path.home() / ".claude/skills"
|
|
|
|
if not skills_dir.exists():
|
|
return skills
|
|
|
|
for skill_dir in skills_dir.iterdir():
|
|
if not skill_dir.is_dir() or skill_dir.name.startswith("."):
|
|
continue
|
|
|
|
meta = {"name": "", "description": ""}
|
|
# Check files in priority order, accumulating metadata
|
|
# (earlier files take precedence for each field)
|
|
for md_name in ["SKILL.md", "skill.md", "prompt.md", "README.md"]:
|
|
md_file = skill_dir / md_name
|
|
if md_file.exists():
|
|
try:
|
|
content = md_file.read_text()
|
|
parsed = self._parse_frontmatter(content)
|
|
if not meta["name"] and parsed["name"]:
|
|
meta["name"] = parsed["name"]
|
|
if not meta["description"] and parsed["description"]:
|
|
meta["description"] = parsed["description"]
|
|
if meta["description"]:
|
|
break
|
|
except OSError:
|
|
pass
|
|
|
|
skills.append({
|
|
"name": meta["name"] or skill_dir.name,
|
|
"description": meta["description"] or f"Skill: {skill_dir.name}",
|
|
})
|
|
|
|
return skills
|
|
|
|
def _parse_frontmatter(self, content: str) -> dict:
|
|
"""Extract name and description from markdown YAML frontmatter.
|
|
|
|
Returns:
|
|
Dict with 'name' and 'description' keys (both str, may be empty).
|
|
"""
|
|
result = {"name": "", "description": ""}
|
|
lines = content.splitlines()
|
|
if not lines:
|
|
return result
|
|
|
|
# Check for YAML frontmatter
|
|
frontmatter_end = 0
|
|
if lines[0].strip() == "---":
|
|
for i, line in enumerate(lines[1:], start=1):
|
|
stripped = line.strip()
|
|
if stripped == "---":
|
|
frontmatter_end = i + 1
|
|
break
|
|
# Check each known frontmatter field
|
|
for field in ("name", "description"):
|
|
if stripped.startswith(f"{field}:"):
|
|
val = stripped[len(field) + 1:].strip()
|
|
# Remove quotes if present
|
|
if val.startswith('"') and val.endswith('"'):
|
|
val = val[1:-1]
|
|
elif val.startswith("'") and val.endswith("'"):
|
|
val = val[1:-1]
|
|
# Handle YAML multi-line indicators (>- or |-)
|
|
if val in (">-", "|-", ">", "|", ""):
|
|
if i + 1 < len(lines):
|
|
next_line = lines[i + 1].strip()
|
|
if next_line and not next_line.startswith("---"):
|
|
val = next_line
|
|
else:
|
|
val = ""
|
|
else:
|
|
val = ""
|
|
if val:
|
|
result[field] = val[:100]
|
|
|
|
# Fall back to first meaningful line for description
|
|
if not result["description"]:
|
|
for line in lines[frontmatter_end:]:
|
|
stripped = line.strip()
|
|
if stripped and not stripped.startswith("#") and not stripped.startswith("<!--") and stripped != "---":
|
|
result["description"] = stripped[:100]
|
|
break
|
|
|
|
return result
|
|
|
|
def _enumerate_codex_skills(self) -> list[dict]:
|
|
"""Enumerate Codex skills from cache and user directory.
|
|
|
|
Sources:
|
|
- ~/.codex/vendor_imports/skills-curated-cache.json (curated)
|
|
- ~/.codex/skills/*/ (user-installed)
|
|
|
|
Note: No deduplication — if curated and user skills share a name,
|
|
both appear in the list (per plan Known Limitations).
|
|
|
|
Returns:
|
|
List of {name: str, description: str} dicts.
|
|
Empty list if no skills found.
|
|
"""
|
|
skills = []
|
|
|
|
# 1. Curated skills from cache
|
|
cache_file = Path.home() / ".codex/vendor_imports/skills-curated-cache.json"
|
|
if cache_file.exists():
|
|
try:
|
|
data = json.loads(cache_file.read_text())
|
|
for skill in data.get("skills", []):
|
|
# Use 'id' preferentially, fall back to 'name'
|
|
name = skill.get("id") or skill.get("name", "")
|
|
# Use 'shortDescription' preferentially, fall back to 'description'
|
|
desc = skill.get("shortDescription") or skill.get("description", "")
|
|
if name:
|
|
skills.append({
|
|
"name": name,
|
|
"description": desc[:100] if desc else f"Skill: {name}",
|
|
})
|
|
except (json.JSONDecodeError, OSError):
|
|
# Continue without curated skills on parse error
|
|
pass
|
|
|
|
# 2. User-installed skills
|
|
user_skills_dir = Path.home() / ".codex/skills"
|
|
if user_skills_dir.exists():
|
|
for skill_dir in user_skills_dir.iterdir():
|
|
if not skill_dir.is_dir() or skill_dir.name.startswith("."):
|
|
continue
|
|
|
|
meta = {"name": "", "description": ""}
|
|
# Check SKILL.md for metadata
|
|
skill_md = skill_dir / "SKILL.md"
|
|
if skill_md.exists():
|
|
try:
|
|
content = skill_md.read_text()
|
|
meta = self._parse_frontmatter(content)
|
|
except OSError:
|
|
pass
|
|
|
|
skills.append({
|
|
"name": meta["name"] or skill_dir.name,
|
|
"description": meta["description"] or f"User skill: {skill_dir.name}",
|
|
})
|
|
|
|
return skills
|