From dfcb4c93eb733518d35039023587b4a887fd2537 Mon Sep 17 00:00:00 2001 From: teernisse Date: Thu, 19 Feb 2026 11:51:11 -0500 Subject: [PATCH] misc --- .DS_Store | Bin 0 -> 6148 bytes .beads/issues.jsonl | 22 +++ .gitignore | 3 + IMPLEMENTATION_PLAN.md | 427 +++++++++++++++++++++++++++++++++++++++++ tests/.DS_Store | Bin 0 -> 6148 bytes 5 files changed, 452 insertions(+) create mode 100644 .DS_Store create mode 100644 IMPLEMENTATION_PLAN.md create mode 100644 tests/.DS_Store diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..0e0fbb0933ba72e6708c5f664c9166850c535ed9 GIT binary patch literal 6148 zcmeHKy-EW?5S}q-IE-jO3)3CgizyY0GsY(fSZjWWgoFzwq7aaK2(1GaV&xl%>1-^m zQd#>H&g>4!UM|5-MP^|4yW5%BZ})@Sxdi|$UDk^Lc>wUKF?uW1%`x_KOWBf)Y!afG zqYk}xvskG`oo*^x2UG!7;IApb-)(X??mNtwH;My+#fVQuU>|) zhv}I5w=vyw=bTuh82~c0jux~MCkT$BsmJ9Wzdh!*Ew|)#qA6?Io8T*i%;(X5BdE=u zS?hPM&*eNh8vDXI$#q%h`4hYlFmD$6r1b?fN%Ow5TO2>^aX`ZF@*nIDZz}hqw=;*k zhD9;XI`HF`X<(Vw(SRQH$x|CQu9IGO4TC3q-?zFu`R)qlM>zacEk?|*_PYvD&1Um+ z7PVIeQ~_0Bq5z){0X4=@Ft%up4(iMmfY761WAw~-3C;-wL&4Z0dC1VF653RhD+bzh z8V@8c6pSs}bU?X$pfID{P|&c`{DBDv3@vJ}3aA1}1v2(E&-MR!@%=xUq;IN#D)6rq zK(AaWmuO2;TT`3kS{qP1rN+i_u|<PLnlbVP{6H8A#ujOWOn(H74BDvz HKdQhNd@`X- literal 0 HcmV?d00001 diff --git a/.beads/issues.jsonl b/.beads/issues.jsonl index d533432..9d8f608 100644 --- a/.beads/issues.jsonl +++ b/.beads/issues.jsonl @@ -1,4 +1,6 @@ +{"id":"bd-10e","title":"Epic: Split sync_cmd.rs into focused modules","description":"Break the 1843-line god module (sync_cmd.rs) into a sync/ directory with focused files: types.rs, diff.rs, checkpoint.rs, throttle.rs, single.rs, batch.rs, mod.rs. Also deduplicates sync_inner/sync_one_alias_inner overlap (~150 lines). No file exceeds 350 lines.\n\n## Child Beads (execution order)\n1. **bd-1re** -- Extract types.rs and diff.rs (data types + pure functions, lowest risk)\n2. **bd-1yq** -- Extract checkpoint.rs and throttle.rs (self-contained components)\n3. **bd-1fe** -- Extract single.rs and batch.rs, dedup sync_inner (final extraction + dedup)\n\n## Prerequisite\nAll 3 beads depend on bd-2x7 (sync OutputMode conversion) being complete first, so the output formatting is using emit() before the split.","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-13T15:23:40.369546Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:45:29.285016Z","compaction_level":0,"original_size":0} {"id":"bd-132","title":"Add semantic search with embeddings","description":"## What\nAdd semantic/fuzzy search as alternative to text search. Compute embeddings for endpoint descriptions and schema names during index building. Use cosine similarity for ranking.\n\n## Acceptance Criteria\n- [ ] --semantic flag enables embedding-based search\n- [ ] Results ranked by cosine similarity\n- [ ] Fallback to text search when embeddings unavailable\n\n## Files\n- CREATE: src/core/embeddings.rs\n- MODIFY: src/core/search.rs (add semantic search mode)","status":"open","priority":4,"issue_type":"task","created_at":"2026-02-12T16:31:57.268115Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:42:45.253877Z","compaction_level":0,"original_size":0,"labels":["future","phase3"],"dependencies":[{"issue_id":"bd-132","depends_on_id":"bd-2e4","type":"blocks","created_at":"2026-02-12T16:42:45.253860Z","created_by":"tayloreernisse"},{"issue_id":"bd-132","depends_on_id":"bd-3aq","type":"parent-child","created_at":"2026-02-12T16:31:57.269278Z","created_by":"tayloreernisse"}]} +{"id":"bd-14o","title":"Epic: Output Sink Abstraction","description":"Replace robot:bool threading (29+ branches across 12 command handlers) with an OutputMode enum, HumanDisplay trait, and emit() function. Commands produce typed data; output layer decides presentation. Eliminates tangled output concerns from business logic and enables future output formats (YAML) without touching command handlers.\n\n## Child Beads (execution order)\n1. **bd-2im** -- Create OutputMode enum, HumanDisplay trait, emit() (foundation)\n2. **bd-bq8** -- Convert tags command and main.rs (proof of concept)\n3. **bd-2wp** -- Convert show, schemas, search, diff (query commands, parallel)\n4. **bd-29k** -- Convert aliases, doctor, cache_cmd (management commands, parallel with bd-2wp)\n5. **bd-1nm** -- Convert list command (most complex query command)\n6. **bd-71g** -- Convert fetch command (documented exception, signature-only)\n7. **bd-2x7** -- Convert sync command (most complex, last conversion)\n8. **bd-2x6** -- Clean up remnants, consolidate resolution logic","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-13T15:23:37.625935Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:45:23.459859Z","compaction_level":0,"original_size":0} {"id":"bd-161","title":"Epic: Sync and Updates","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:23.251895Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:56:19.814767Z","closed_at":"2026-02-12T20:56:19.814725Z","close_reason":"All child beads completed","compaction_level":0,"original_size":0,"labels":["epic"]} {"id":"bd-16o","title":"Wire fetch command with local file, stdin, and remote URL support","description":"## Background\nThe fetch command is the primary entry point for getting specs into swagger-cli. It orchestrates: HTTP download (or local file/stdin read), format detection, YAML normalization, index building, pointer validation, and crash-consistent cache write. It supports auth headers, bearer tokens, auth profiles, --force overwrite, and robot output.\n\n## Approach\nImplement src/cli/fetch.rs with FetchArgs struct and async execute() function:\n\n**FetchArgs (clap derive):**\n- url: String (positional, required — can be URL, file path, or \"-\" for stdin)\n- alias: String (--alias, required)\n- header: Vec (--header, repeatable)\n- auth_header: Vec (--auth-header, alias for --header)\n- bearer: Option (--bearer)\n- auth_profile: Option (--auth-profile)\n- force: bool (--force)\n- timeout_ms: u64 (--timeout-ms, default 10000)\n- max_bytes: u64 (--max-bytes, default 26214400)\n- retries: u32 (--retries, default 2)\n- input_format: Option (--input-format, values: auto/json/yaml)\n- resolve_external_refs: bool (--resolve-external-refs)\n- ref_allow_host: Vec (--ref-allow-host, repeatable)\n- ref_max_depth: u32 (--ref-max-depth, default 3)\n- ref_max_bytes: u64 (--ref-max-bytes, default 10MB)\n- allow_private_host: Vec (--allow-private-host, repeatable)\n- allow_insecure_http: bool (--allow-insecure-http)\n\n**Execute flow:**\n1. Validate alias format (validate_alias)\n2. Check if alias exists — error unless --force\n3. Resolve auth (merge --auth-profile with explicit headers, --bearer)\n4. Determine source: URL (http/https), local file (file:// or path), or stdin (-)\n5. For URL: use AsyncHttpClient.fetch_spec() with SSRF/HTTPS policy\n6. For local file: canonicalize path, read bytes directly (no network policy)\n7. For stdin: read all bytes from stdin\n8. Detect format, normalize to JSON\n9. Parse as serde_json::Value\n10. Build index (build_index)\n11. Write cache (write_cache with all artifacts)\n12. Output robot JSON or human success message\n\n## Acceptance Criteria\n- [ ] `swagger-cli fetch ./petstore.json --alias pet` succeeds (local file)\n- [ ] `swagger-cli fetch https://... --alias pet --robot` outputs JSON with ok:true, data.alias, data.endpoint_count\n- [ ] `swagger-cli fetch - --alias stdin-api` reads from stdin\n- [ ] Alias validation rejects \"../bad\" before any network call\n- [ ] Existing alias without --force returns ALIAS_EXISTS (exit 6)\n- [ ] --force overwrites existing alias\n- [ ] --bearer TOKEN adds Authorization: Bearer TOKEN header\n- [ ] --auth-profile loads from config.toml\n- [ ] Robot output includes: alias, url, version, title, endpoint_count, schema_count, cached_at, source_format, cache_dir, files, content_hash\n- [ ] Auth header values never appear in output or error messages\n\n## Files\n- MODIFY: src/cli/fetch.rs (FetchArgs, execute, auth resolution, source routing)\n- MODIFY: src/output/robot.rs (add output_fetch function)\n- MODIFY: src/output/human.rs (add output_fetch function)\n\n## TDD Anchor\nRED: Write integration test `test_fetch_local_file` — use assert_cmd to run `swagger-cli fetch tests/fixtures/petstore.json --alias test-pet --robot` with SWAGGER_CLI_HOME set to tempdir. Assert exit 0, stdout JSON has ok:true.\nGREEN: Implement full fetch pipeline.\nVERIFY: `cargo test test_fetch_local_file`\n\nAdditional tests:\n- test_fetch_alias_exists_error\n- test_fetch_force_overwrites\n- test_fetch_stdin\n- test_fetch_bearer_auth\n- test_fetch_yaml_file (if YAML fixture exists)\n\n## Edge Cases\n- stdin (\"-\") is not a URL — don't try to HTTP-fetch it\n- Local file paths must be canonicalized (resolve symlinks, relative paths) before reading\n- file:// URLs must be converted to local paths (strip scheme)\n- If auth-profile references an EnvVar source, resolve the env var at fetch time\n- Robot output: redact --bearer and --auth-header values even in success output\n\n## Dependency Context\nUses AsyncHttpClient from bd-3b6 (async HTTP client with SSRF protection). Uses build_index, detect_format, normalize_to_json from bd-189 (spec format detection and index building). Uses CacheManager.write_cache and validate_alias from bd-1ie (cache write path). Uses Config for auth profiles from bd-1sb (configuration system).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:26:35.220966Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:25:35.255563Z","closed_at":"2026-02-12T19:25:35.255378Z","close_reason":"Implemented in Wave 4 commit","compaction_level":0,"original_size":0,"labels":["fetch","phase1"],"dependencies":[{"issue_id":"bd-16o","depends_on_id":"bd-189","type":"blocks","created_at":"2026-02-12T16:34:06.059316Z","created_by":"tayloreernisse"},{"issue_id":"bd-16o","depends_on_id":"bd-1ie","type":"blocks","created_at":"2026-02-12T16:34:06.154074Z","created_by":"tayloreernisse"},{"issue_id":"bd-16o","depends_on_id":"bd-1sb","type":"blocks","created_at":"2026-02-12T16:34:06.248426Z","created_by":"tayloreernisse"},{"issue_id":"bd-16o","depends_on_id":"bd-3b6","type":"blocks","created_at":"2026-02-12T16:34:06.005417Z","created_by":"tayloreernisse"},{"issue_id":"bd-16o","depends_on_id":"bd-3d2","type":"blocks","created_at":"2026-02-12T16:26:35.222663Z","created_by":"tayloreernisse"},{"issue_id":"bd-16o","depends_on_id":"bd-3ny","type":"parent-child","created_at":"2026-02-12T16:26:35.222248Z","created_by":"tayloreernisse"}]} {"id":"bd-189","title":"Implement spec format detection, YAML normalization, and index building","description":"## Background\nAfter downloading raw spec bytes, swagger-cli must detect the input format (JSON vs YAML), normalize YAML to JSON, parse the spec as serde_json::Value, and build a SpecIndex from it. The index building is the heart of the query performance -- it pre-extracts endpoints, schemas, and tags into a compact, sorted structure with JSON pointers back to raw.json. All pointers must be validated.\n\n## Approach\nCreate src/core/indexer.rs with:\n\n**Format detection:** `detect_format(bytes, filename_hint, content_type_hint) -> Format` where Format is Json or Yaml. Priority: explicit --input-format flag > Content-Type header > file extension > content sniffing (try JSON parse first, fall back to YAML).\n\n**YAML normalization:** `normalize_to_json(bytes, format) -> Result>` -- if YAML, parse with serde_yaml then serialize to serde_json. If JSON, pass through (validate it parses).\n\n**Index building:** `build_index(raw_json: &serde_json::Value, content_hash: &str, generation: u64) -> Result`:\n1. Extract info.title, info.version, openapi version\n2. Iterate paths.* -> methods -> build IndexedEndpoint with: path, method (uppercased), summary, description, operation_id, tags, deprecated, parameters (name/location/required/desc), request_body_required, request_body_content_types, security_schemes (effective: operation-level overrides root-level), security_required, operation_ptr (JSON pointer format: /paths/~1pet~1{petId}/get)\n3. Iterate components.schemas.* -> build IndexedSchema with name, schema_ptr\n4. Compute tags from endpoints + root-level tags -> IndexedTag with endpoint_count\n5. Sort deterministically: endpoints by (path ASC, method_rank ASC), schemas by (name ASC), tags by (name ASC)\n6. Validate ALL operation_ptr and schema_ptr resolve in the raw Value\n7. Set index_version to current version constant (1)\n\n**Canonical method ranking:** GET=0, POST=1, PUT=2, PATCH=3, DELETE=4, OPTIONS=5, HEAD=6, TRACE=7, unknown=99.\n\n**JSON pointer encoding:** Path segments use RFC 6901: `~` -> `~0`, `/` -> `~1`. So `/pet/{petId}` becomes `/paths/~1pet~1{petId}`.\n\n## Acceptance Criteria\n- [ ] JSON input detected and passed through correctly\n- [ ] YAML input detected and normalized to equivalent JSON\n- [ ] build_index extracts correct endpoint count from petstore spec (19 endpoints)\n- [ ] Endpoints sorted by path ASC, method_rank ASC (deterministic)\n- [ ] Schemas sorted by name ASC\n- [ ] Tags have correct endpoint_count\n- [ ] All operation_ptr values resolve in raw Value (validated during build_index)\n- [ ] Invalid pointer causes fetch failure (not silent corruption)\n- [ ] JSON pointer encoding handles /pet/{petId} correctly (escapes /)\n- [ ] Security inheritance: operation without security inherits root; operation with empty [] means no auth\n\n## Files\n- CREATE: src/core/indexer.rs (detect_format, normalize_to_json, build_index, method_rank, json_pointer_encode, validate_pointers)\n- MODIFY: src/core/mod.rs (pub mod indexer;)\n\n## TDD Anchor\nRED: Write `test_build_index_petstore` -- load tests/fixtures/petstore.json as Value, call build_index, assert endpoint_count == 19 and endpoints are sorted.\nGREEN: Implement full index building.\nVERIFY: `cargo test test_build_index_petstore`\n\nAdditional tests:\n- test_detect_format_json, test_detect_format_yaml\n- test_yaml_normalization_roundtrip\n- test_json_pointer_encoding\n- test_method_rank_ordering\n- test_security_inheritance\n- test_invalid_pointer_rejected\n\n## Edge Cases\n- Some specs have paths with special chars in operation IDs -- don't assume alphanumeric\n- serde_yaml may produce different JSON than direct JSON parse (number types, null handling) -- normalize consistently\n- Large specs (8MB+ GitHub) should still build index in <1s\n- OpenAPI 3.1 may use `webhooks` key -- ignore for MVP (only extract from `paths`)\n- Tags defined at root level but not used by any operation should still appear with endpoint_count=0\n\n## Dependency Context\nUses SpecIndex, IndexedEndpoint, IndexedSchema, IndexedTag, IndexedParam types from bd-ilo (error types + core models bead).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:26:35.194671Z","created_by":"tayloreernisse","updated_at":"2026-02-12T17:41:12.926777Z","closed_at":"2026-02-12T17:41:12.926730Z","close_reason":"Format detection, YAML normalization, index building with pointer validation","compaction_level":0,"original_size":0,"labels":["fetch","phase1"],"dependencies":[{"issue_id":"bd-189","depends_on_id":"bd-3ny","type":"parent-child","created_at":"2026-02-12T16:26:35.195659Z","created_by":"tayloreernisse"},{"issue_id":"bd-189","depends_on_id":"bd-ilo","type":"blocks","created_at":"2026-02-12T16:26:35.196142Z","created_by":"tayloreernisse"}]} @@ -8,22 +10,39 @@ {"id":"bd-1cv","title":"Implement global network policy (offline/auto/online-only)","description":"## Background\nThe --network global flag controls whether swagger-cli makes network calls. \"auto\" (default) allows network when needed. \"offline\" blocks all network calls (fetch/sync fail with OFFLINE_MODE). \"online-only\" flags when network would be needed but is not available. This is critical for CI reproducibility and agent sandboxing.\n\n## Approach\n\n### NetworkPolicy Enum (src/core/network.rs)\nCreate `NetworkPolicy` enum with three variants:\n- **Auto** (default): Allow network calls when needed, no restrictions. Standard behavior.\n- **Offline**: Block ALL outbound network calls preemptively. Any command that would make a network request (fetch, sync) returns `SwaggerCliError::OfflineMode` immediately without attempting the call. Commands that are index-only (list, search, show, tags) work normally.\n- **OnlineOnly**: Allow network calls but surface a clear, distinct error if a network call would be needed AND DNS resolution fails or the host is unreachable. The error is different from OfflineMode — it indicates \"you requested online-only mode but network is unavailable\" rather than \"network calls are blocked by policy.\"\n\n### Implementation\n1. Parse --network flag in CLI (already in Cli struct from skeleton bead)\n2. Create `NetworkPolicy` enum and `check_network_policy()` function in `src/core/network.rs`\n3. Check policy before any HTTP call in AsyncHttpClient — if Offline, return `SwaggerCliError::OfflineMode` without making the request\n4. For OnlineOnly: attempt the request, but if it fails due to network issues, return a specific `SwaggerCliError::NetworkUnavailable` (different exit code or error code than OfflineMode)\n5. `SWAGGER_CLI_NETWORK` env var as alternative to --network flag (flag takes precedence)\n\n### Scope boundaries\n- **Local file sources (file:// paths, stdin):** Network policy does NOT apply. These are local I/O operations. `fetch ./local-spec.yaml --network offline` should succeed because no network call is made.\n- **Stdin sources:** Same as local files — no network needed, policy irrelevant.\n- **sync --dry-run in offline mode:** ALLOWED. Dry-run compares cached state without actually fetching, so no network call is needed. Returns cached state comparison only.\n\n## Acceptance Criteria\n- [ ] NetworkPolicy enum with Auto, Offline, OnlineOnly variants in src/core/network.rs\n- [ ] --network offline causes fetch (remote URL) to fail with OFFLINE_MODE error (exit 15)\n- [ ] --network offline allows fetch from local file path (no network needed)\n- [ ] --network offline allows fetch from stdin (no network needed)\n- [ ] --network offline allows list/search/show/tags (index-only, no network needed)\n- [ ] --network offline allows sync --dry-run (cached comparison only, no network)\n- [ ] --network online-only surfaces clear error when network is unavailable (distinct from OFFLINE_MODE)\n- [ ] --network auto allows all commands (default behavior, no restrictions)\n- [ ] SWAGGER_CLI_NETWORK=offline env var works same as --network offline flag\n- [ ] Flag takes precedence over env var when both are set\n- [ ] Robot error for offline mode has code OFFLINE_MODE with suggestion to remove --network flag or unset env var\n- [ ] Robot error for online-only network failure has code NETWORK_UNAVAILABLE with distinct suggestion\n\n## Edge Cases\n- **sync --dry-run in offline mode:** Allowed — returns cached state comparison only, no actual fetch happens. This is a read-only operation on cached data.\n- **Local file + offline mode:** Allowed — `fetch ./spec.yaml --network offline` succeeds because it is local I/O, not a network call.\n- **OnlineOnly vs Offline distinction:** Offline blocks proactively (never attempts the call). OnlineOnly attempts the call and fails with a specific error if the network is down. This matters for agents that want to know \"was this blocked by policy or by actual network unavailability?\"\n- **Mixed sources:** If a future version supports specs that reference remote $ref URLs but the base spec is local, the network policy should apply to the remote $ref resolution (not the local file read).\n\n## Files\n- CREATE: src/core/network.rs (NetworkPolicy enum, check_network_policy function)\n- MODIFY: src/core/http.rs (check policy before fetch)\n- MODIFY: src/cli/fetch.rs (check policy at start, skip for local/stdin sources)\n- MODIFY: src/cli/sync.rs (check policy at start, allow --dry-run in offline)\n\n## TDD Anchor\nRED: Write `test_offline_blocks_fetch` — set SWAGGER_CLI_NETWORK=offline, run fetch with remote URL, assert exit 15 and OFFLINE_MODE error.\nRED: Write `test_offline_allows_local_file` — set offline, fetch local file, assert success.\nRED: Write `test_offline_allows_list` — set offline, run list on cached alias, assert success.\nRED: Write `test_online_only_network_unavailable` — set online-only, mock DNS failure, assert NETWORK_UNAVAILABLE error.\nGREEN: Implement policy check.\nVERIFY: `cargo test test_offline_blocks_fetch`\n\n## Dependency Context\nModifies AsyncHttpClient in bd-3b6 (async HTTP client) to check network policy before requests. Uses SwaggerCliError variants (OfflineMode) from bd-ilo (error types and core data models).","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T16:29:50.156478Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:36:49.525619Z","closed_at":"2026-02-12T19:36:49.525325Z","close_reason":"Implemented NetworkPolicy enum (Auto/Offline/OnlineOnly), resolve_policy with CLI flag > env var precedence, check_remote_fetch enforcement. Integrated into AsyncHttpClient and fetch command.","compaction_level":0,"original_size":0,"labels":["global","phase2"],"dependencies":[{"issue_id":"bd-1cv","depends_on_id":"bd-16o","type":"blocks","created_at":"2026-02-12T16:29:50.158591Z","created_by":"tayloreernisse"},{"issue_id":"bd-1cv","depends_on_id":"bd-3b6","type":"blocks","created_at":"2026-02-12T16:29:50.158232Z","created_by":"tayloreernisse"},{"issue_id":"bd-1cv","depends_on_id":"bd-3ll","type":"parent-child","created_at":"2026-02-12T16:29:50.157784Z","created_by":"tayloreernisse"}]} {"id":"bd-1d4","title":"Implement cache lifecycle command with stats, prune, and LRU eviction","description":"## Background\nThe cache command manages cache growth with stats, pruning, and LRU eviction. Separate from doctor (which validates health). Uses coalesced last_accessed timestamps for LRU ordering.\n\n## Approach\nImplement src/cli/cache.rs with CacheArgs and execute():\n\n**CacheArgs:** stats (bool, default action), prune_stale (bool), prune_threshold (u32, default 90 days), max_total_mb (Option), dry_run (bool).\n\n**Operations:**\n- Stats: list_aliases with size computation, show per-alias and total bytes\n- Prune: find aliases older than threshold, delete (or dry-run report)\n- LRU eviction: sort by last_accessed ASC, delete oldest until total < max_total_mb (or dry-run)\n\n## Acceptance Criteria\n- [ ] cache --stats shows per-alias sizes and total\n- [ ] cache --prune-stale deletes aliases >90 days old\n- [ ] cache --prune-threshold 30 overrides default\n- [ ] cache --max-total-mb 500 evicts oldest-accessed aliases\n- [ ] --dry-run shows what would be pruned without deleting\n- [ ] Robot output: aliases[], total_bytes, pruned[], evicted[]\n\n## Edge Cases\n- **No aliases cached:** cache --stats returns ok:true with total_bytes:0, empty aliases array.\n- **Concurrent prune + fetch:** If a fetch writes a new alias while prune is deleting, the new alias should not be affected. Prune operates on snapshot of alias list taken at start.\n- **last_accessed coalescing:** LRU ordering uses coalesced last_accessed (10-min granularity). Hot-read aliases within the same 10-min window have identical last_accessed — tie-break by fetched_at.\n- **--max-total-mb smaller than single largest alias:** Evict everything except the largest, then warn that target cannot be reached.\n- **Alias being synced during prune:** Skip locked aliases with warning.\n\n## Files\n- MODIFY: src/cli/cache.rs (CacheArgs, execute, prune, evict)\n- MODIFY: src/output/robot.rs (add output_cache)\n- MODIFY: src/output/human.rs (add output_cache)\n\n## TDD Anchor\nRED: Write `test_cache_prune_stale` — create alias with old fetched_at, run cache --prune-stale --robot, assert alias appears in pruned[].\nGREEN: Implement stale detection and pruning.\nVERIFY: `cargo test test_cache_prune_stale`\n\n## Dependency Context\nUses CacheManager (list_aliases, delete_alias) from bd-3ea. Uses is_stale from CacheMetadata.","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T16:29:50.122830Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:25:35.261575Z","closed_at":"2026-02-12T19:25:35.261539Z","close_reason":"Implemented in Wave 4 commit","compaction_level":0,"original_size":0,"labels":["health","phase2"],"dependencies":[{"issue_id":"bd-1d4","depends_on_id":"bd-1y0","type":"parent-child","created_at":"2026-02-12T16:29:50.123937Z","created_by":"tayloreernisse"},{"issue_id":"bd-1d4","depends_on_id":"bd-3d2","type":"blocks","created_at":"2026-02-12T16:29:50.124722Z","created_by":"tayloreernisse"},{"issue_id":"bd-1d4","depends_on_id":"bd-3ea","type":"blocks","created_at":"2026-02-12T16:29:50.124351Z","created_by":"tayloreernisse"}]} {"id":"bd-1dj","title":"Implement show command with ref expansion and pointer navigation","description":"## Background\nThe show command displays full details for a specific endpoint. Unlike list/search, show MUST load raw.json to extract the complete operation subtree (parameters with schemas, request body, response schemas, etc.). It uses the operation_ptr from the index to locate the exact JSON node in raw.json, avoiding full-spec parsing. Optional --expand-refs resolves internal $ref pointers with cycle detection.\n\n## Approach\nImplement src/cli/show.rs with ShowArgs and execute():\n\n**ShowArgs:** alias (Option), path (String, positional), method (Option), format (String, default \"pretty\"), expand_refs (bool), max_depth (u32, default 3).\n\n**Execute flow:**\n1. Resolve alias, load_index\n2. Find matching endpoint(s) by path in index\n3. If multiple methods and --method not specified → USAGE_ERROR listing available methods\n4. Get operation_ptr from matched IndexedEndpoint\n5. CacheManager::load_raw(alias, &meta) — loads raw.json as Value, validates raw_hash\n6. Navigate to operation subtree using JSON pointer (operation_ptr)\n7. If --expand-refs: recursively resolve internal $ref pointers (starting with #/) with cycle detection (max_depth). External refs get annotated as {\"$external_ref\": \"...\"}. Circular refs get {\"$circular_ref\": \"...\"}. Add warnings to meta.warnings[].\n8. Output robot JSON or human formatted details\n\n**JSON Pointer navigation:** Parse `/paths/~1pet~1{petId}/get` → navigate Value tree. `~1` → `/`, `~0` → `~`.\n\n**Ref expansion:** Walk the Value tree. When encountering `{\"$ref\": \"#/components/schemas/Pet\"}`, resolve by navigating the pointer in raw Value. Track visited refs for cycle detection. Stop at max_depth.\n\n## Acceptance Criteria\n- [ ] `swagger-cli show petstore \"/pet/{petId}\" --robot` returns full operation details\n- [ ] Multiple methods without --method returns USAGE_ERROR with available methods\n- [ ] --method POST selects specific method\n- [ ] operation_ptr correctly navigates to the right subtree in raw.json\n- [ ] --expand-refs resolves internal refs up to max_depth\n- [ ] Circular refs produce $circular_ref annotation (not infinite loop)\n- [ ] External refs produce $external_ref annotation + warning\n- [ ] raw_hash mismatch returns CacheIntegrity error\n- [ ] Robot output includes: path, method, summary, description, tags, operation_id, parameters, request_body, responses, security\n\n## Files\n- MODIFY: src/cli/show.rs (ShowArgs, execute, pointer navigation, ref expansion)\n- CREATE: src/core/refs.rs (expand_refs, resolve_pointer, cycle detection)\n- MODIFY: src/core/mod.rs (pub mod refs;)\n- MODIFY: src/output/robot.rs (add output_show)\n- MODIFY: src/output/human.rs (add output_show)\n\n## TDD Anchor\nRED: Write `test_show_endpoint_details` — fetch petstore fixture, run show \"/pet/{petId}\" --method GET --robot, parse JSON, assert data.path == \"/pet/{petId}\" and data.method == \"GET\".\nGREEN: Implement pointer navigation and output.\nVERIFY: `cargo test test_show_endpoint_details`\n\nAdditional tests:\n- test_show_multiple_methods_error\n- test_expand_refs_basic\n- test_expand_refs_circular_detection\n- test_expand_refs_external_annotation\n\n## Edge Cases\n- JSON pointer decoding: `~1` → `/`, `~0` → `~` (order matters: decode ~1 first, then ~0)\n- Path matching should be exact (not regex) — \"/pet/{petId}\" must match literally\n- Some operations may have no request body, no parameters, or no security — handle None gracefully\n- ref expansion must handle refs-to-refs (transitive resolution)\n\n## Dependency Context\nUses CacheManager.load_index and load_raw from bd-3ea (cache read). Uses index types from bd-ilo. Requires a fetched spec in cache.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:27:27.091022Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:25:35.258550Z","closed_at":"2026-02-12T19:25:35.258511Z","close_reason":"Implemented in Wave 4 commit","compaction_level":0,"original_size":0,"labels":["phase1","query"],"dependencies":[{"issue_id":"bd-1dj","depends_on_id":"bd-3d2","type":"blocks","created_at":"2026-02-12T16:27:27.093222Z","created_by":"tayloreernisse"},{"issue_id":"bd-1dj","depends_on_id":"bd-3ea","type":"blocks","created_at":"2026-02-12T16:27:27.092774Z","created_by":"tayloreernisse"},{"issue_id":"bd-1dj","depends_on_id":"bd-epk","type":"parent-child","created_at":"2026-02-12T16:27:27.092375Z","created_by":"tayloreernisse"}]} +{"id":"bd-1fe","title":"Extract sync single.rs and batch.rs, deduplicate sync_inner, finalize mod.rs","description":"## Background\nFinal extraction step. Move single-alias sync logic and batch sync logic into dedicated files. Also deduplicate sync_inner() and sync_one_alias_inner() which share ~150 lines of identical HTTP client setup + conditional fetch + normalize + diff + write logic.\n\n## Approach\n1. Create src/cli/sync/single.rs containing:\n - sync_one_alias() (line 351) -- the reusable single-alias sync\n - sync_one_alias_inner() (line 396) -- the core implementation\n - DELETE sync_inner() (lines 557-700) -- currently duplicates sync_one_alias_inner\n - The execute() entry point for single-alias mode calls sync_one_alias() directly instead of sync_inner()\n - HumanDisplay impls for SyncOutput (from Epic 1 conversion) stay with the output type or move here\n2. Create src/cli/sync/batch.rs containing:\n - sync_all_inner() (line 786) -- the concurrent stream-based batch sync\n - All Arc/AtomicUsize/stream::iter logic\n - Uses checkpoint::{load,save,remove}_checkpoint and throttle::PerHostThrottle from sibling modules\n3. Finalize src/cli/sync/mod.rs:\n - Args struct (lines 27-70) stays here\n - pub fn execute() (line 1073) stays here as the entry point\n - Re-exports: pub use types::*, pub(crate) use diff::compute_diff, etc.\n4. Delete src/cli/sync_cmd.rs (now fully replaced by sync/ directory)\n\nDeduplication strategy for sync_inner/sync_one_alias_inner:\n- sync_inner() currently: loads meta, builds HTTP client, does conditional fetch, normalizes, diffs, writes cache, outputs\n- sync_one_alias_inner() does the same thing with slightly different parameter threading\n- Collapse: execute(single mode) calls sync_one_alias() with args extracted, then formats output from AliasSyncResult\n- This eliminates ~150 lines of duplicate code\n\n## Acceptance Criteria\n- src/cli/sync/single.rs contains sync_one_alias and sync_one_alias_inner (~250 lines max)\n- src/cli/sync/batch.rs contains sync_all_inner (~300 lines max)\n- sync_inner() function is DELETED (deduped into sync_one_alias)\n- No file in src/cli/sync/ exceeds 350 lines\n- Old sync_cmd.rs is deleted\n- All integration tests pass (sync single and sync --all)\n- All unit tests pass (diff tests, checkpoint tests, etc.)\n- cargo clippy --all-targets -- -D warnings passes\n- cargo fmt --check passes\n\n## Files\n- CREATE: src/cli/sync/single.rs\n- CREATE: src/cli/sync/batch.rs\n- MODIFY: src/cli/sync/mod.rs (Args, execute, re-exports)\n- DELETE: src/cli/sync_cmd.rs (must ask user permission per AGENTS.md rule)\n\n## TDD Anchor\nRED: test_single_sync_uses_sync_one_alias -- verify that single-mode execute() produces an AliasSyncResult (not a SyncOutput directly), confirming the dedup worked\nGREEN: Refactor execute(single) to call sync_one_alias then format\nVERIFY: cargo test sync\n\n## Edge Cases\n- sync_inner() output functions (output_no_changes, output_changes) were deleted in Epic 1 -- confirm they're gone\n- batch.rs captures many Arc-wrapped values in async closures -- ensure all moved values are Clone\n- PerHostThrottle is passed as Option<&PerHostThrottle> to sync_one_alias -- keep this signature\n- The #[allow(clippy::too_many_arguments)] on sync_one_alias_inner may need updating after dedup\n\n## Dependency Context\nDepends on bd-1re (types/diff extracted) and bd-1yq (checkpoint/throttle extracted). Uses types from sync::types and diff from sync::diff.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:27:13.212359Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:31:15.824046Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1fe","depends_on_id":"bd-10e","type":"parent-child","created_at":"2026-02-13T15:27:13.213387Z","created_by":"tayloreernisse"},{"issue_id":"bd-1fe","depends_on_id":"bd-1yq","type":"blocks","created_at":"2026-02-13T15:31:15.824027Z","created_by":"tayloreernisse"}]} {"id":"bd-1ie","title":"Implement cache write path with crash-consistent protocol and alias validation","description":"## Background\nThe cache write path is the most safety-critical code in swagger-cli. It implements a crash-consistent multi-file commit protocol: raw.source -> raw.json -> index.json are written as .tmp files with fsync, renamed atomically, then meta.json is written LAST as the commit marker. Per-alias file locking prevents concurrent write corruption. Alias names are validated against a strict regex to prevent path traversal.\n\n## Approach\n**Alias validation:** Implement `validate_alias()` that checks against `^[A-Za-z0-9][A-Za-z0-9._-]{0,63}$`, rejects path separators (/ \\), `..`, reserved device names (CON, PRN, NUL, AUX, COM1-9, LPT1-9 case-insensitive), and leading dots. Return SwaggerCliError::Usage on failure.\n\n**Cache directory management:** Implement `CacheManager` struct with `new()` (uses Config::cache_dir()), `new_with_path()` (for tests), `alias_dir()`, `ensure_dirs()`. Cache layout: `{cache_dir}/aliases/{alias}/`.\n\n**File locking:** Use `fs2::FileExt::try_lock_exclusive()` on `{alias_dir}/.lock` with a bounded timeout (default 1000ms, poll every 50ms). Return SwaggerCliError::CacheLocked on timeout.\n\n**Hash computation:** Implement `compute_hash(bytes: &[u8]) -> String` returning `\"sha256:{hex}\"` using sha2 crate.\n\n**Crash-consistent write:** Implement `CacheManager::write_cache()` that:\n1. Acquires exclusive lock on .lock (bounded timeout)\n2. Computes content_hash, raw_hash, next generation, index_hash\n3. Writes raw.source.tmp, raw.json.tmp, index.json.tmp (each with sync_all before rename)\n4. Renames each .tmp to final name (sync_all after each rename)\n5. Writes meta.json.tmp LAST (commit marker)\n6. Renames meta.json.tmp -> meta.json (sync_all)\n7. Best-effort fsync on alias directory fd (Unix only)\n8. Releases lock\n\n**Parameters for write_cache:** Takes alias, raw_source_bytes, raw_json_bytes, index (SpecIndex), url, spec_version, spec_title, source_format, etag, last_modified, previous_generation (Option).\n\n## Acceptance Criteria\n- [ ] validate_alias(\"petstore\") -> Ok\n- [ ] validate_alias(\"../etc/passwd\") -> Err(Usage)\n- [ ] validate_alias(\"CON\") -> Err(Usage) (case-insensitive)\n- [ ] validate_alias(\".hidden\") -> Err(Usage)\n- [ ] validate_alias(\"a\".repeat(65)) -> Err(Usage)\n- [ ] write_cache creates all 4 files + .lock in correct directory\n- [ ] meta.json is the last file written (verified by checking file mtimes or write order)\n- [ ] compute_hash produces deterministic sha256:hex output\n- [ ] Lock timeout returns CacheLocked error (not hang)\n- [ ] Hash values are deterministic (same input -> same sha256 output)\n- [ ] Files survive process kill between steps (no partial meta.json)\n\n## Files\n- CREATE: src/core/cache.rs (CacheManager, validate_alias, compute_hash, write_cache, lock helpers)\n- MODIFY: src/core/mod.rs (pub mod cache;)\n\n## TDD Anchor\nRED: Write `test_validate_alias_rejects_traversal` -- assert validate_alias(\"../etc\") returns Err with Usage variant.\nGREEN: Implement regex validation + blocklist checks.\nVERIFY: `cargo test test_validate_alias`\n\nAdditional tests:\n- test_validate_alias_accepts_valid (petstore, my-api, v1.0, API_2)\n- test_validate_alias_rejects_reserved (CON, con, NUL, COM1)\n- test_write_cache_creates_all_files\n- test_compute_hash_deterministic\n\n## Edge Cases\n- On macOS, fsync on directory fd may not be supported -- handle gracefully (best-effort)\n- Lock file must be created if it doesn't exist (open with create flag)\n- If alias directory doesn't exist, create it before acquiring lock\n- sync_all() is critical -- without it, data may be in OS page cache but not on disk after rename\n- Generation starts at 1 for new aliases, increments from previous for updates\n\n## Dependency Context\nUses SpecIndex and CacheMetadata types from bd-ilo (error types and core data models). Uses SwaggerCliError variants (CacheLocked, Cache, Usage) from bd-ilo.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:25:15.503359Z","created_by":"tayloreernisse","updated_at":"2026-02-12T17:41:12.870780Z","closed_at":"2026-02-12T17:41:12.870733Z","close_reason":"CacheManager with validate_alias, compute_hash, crash-consistent write_cache","compaction_level":0,"original_size":0,"labels":["infrastructure","phase1"],"dependencies":[{"issue_id":"bd-1ie","depends_on_id":"bd-hcb","type":"parent-child","created_at":"2026-02-12T16:25:15.504778Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ie","depends_on_id":"bd-ilo","type":"blocks","created_at":"2026-02-12T16:25:15.505170Z","created_by":"tayloreernisse"}]} +{"id":"bd-1iz","title":"Create CommandContext struct","description":"## Background\nEvery command handler independently captures Instant::now(), creates CacheManager/AsyncCache, resolves network policy, and loads config. This is ~15-20 lines of identical scaffolding per command. CommandContext encapsulates this common setup.\n\n## Approach\n1. Create src/cli/context.rs with:\n ```rust\n use std::path::{Path, PathBuf};\n use std::time::{Duration, Instant};\n use crate::core::cache::AsyncCache;\n use crate::core::config::cache_dir;\n use crate::core::network::{NetworkPolicy, resolve_policy};\n use crate::errors::SwaggerCliError;\n use crate::output::OutputMode;\n \n pub struct CommandContext {\n pub cache: AsyncCache,\n pub mode: OutputMode,\n pub network_policy: NetworkPolicy,\n pub config_path: Option,\n start: Instant,\n }\n \n impl CommandContext {\n pub fn new(\n mode: OutputMode,\n network_flag: &str,\n config_override: Option<&Path>,\n ) -> Result {\n Ok(Self {\n cache: AsyncCache::new(cache_dir()),\n mode,\n network_policy: resolve_policy(network_flag)?,\n config_path: config_override.map(PathBuf::from),\n start: Instant::now(),\n })\n }\n \n pub fn elapsed(&self) -> Duration {\n self.start.elapsed()\n }\n }\n ```\n2. Update src/cli/mod.rs to add: pub mod context;\n3. Update main.rs to create CommandContext once and pass to dispatch:\n ```rust\n let ctx = CommandContext::new(mode, cli.network.as_str(), cli.config.as_deref())?;\n match &cli.command {\n Commands::Tags(args) => tags::execute(args, &ctx).await,\n // ...\n }\n ```\n4. Convert tags.rs first as proof of concept:\n - Change execute(args, mode) to execute(args, ctx: &CommandContext)\n - Replace AsyncCache::new(cache_dir()) with ctx.cache\n - Replace Instant::now() with ctx.start (use ctx.elapsed())\n - Replace mode with ctx.mode\n\n## Acceptance Criteria\n- CommandContext struct in src/cli/context.rs with cache, mode, network_policy, config_path, start fields\n- CommandContext::new() creates all shared state\n- ctx.elapsed() returns duration since construction\n- main.rs creates CommandContext and passes to tags (proof of concept)\n- tags.rs uses &CommandContext instead of individual parameters\n- All tests pass\n- cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- CREATE: src/cli/context.rs\n- MODIFY: src/cli/mod.rs (add pub mod context)\n- MODIFY: src/main.rs (create CommandContext, pass to tags)\n- MODIFY: src/cli/tags.rs (accept &CommandContext)\n\n## TDD Anchor\nRED: test_command_context_creates_with_auto_network -- CommandContext::new(Human, \"auto\", None) succeeds, network_policy is Auto\nGREEN: Implement CommandContext::new()\nVERIFY: cargo test context\n\nAdditional tests:\n- test_command_context_offline_policy\n- test_command_context_elapsed_increases (sleep 10ms, assert elapsed >= 10ms)\n- test_command_context_invalid_network_flag (should error)\n\n## Edge Cases\n- robot-docs command is sync and doesnt use cache -- it still receives &CommandContext but ignores cache field\n- CommandContext::new() can fail on invalid network flag -- error before any command runs\n- config_override as None means use default config path -- cache_dir() handles this\n\n## Dependency Context\nDepends on bd-2im (OutputMode) and bd-39e (AsyncCache). Uses both types in CommandContext struct.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:29:05.915818Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:31:15.958485Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1iz","depends_on_id":"bd-2im","type":"blocks","created_at":"2026-02-13T15:31:15.919415Z","created_by":"tayloreernisse"},{"issue_id":"bd-1iz","depends_on_id":"bd-2l2","type":"parent-child","created_at":"2026-02-13T15:29:05.916573Z","created_by":"tayloreernisse"},{"issue_id":"bd-1iz","depends_on_id":"bd-39e","type":"blocks","created_at":"2026-02-13T15:31:15.958468Z","created_by":"tayloreernisse"}]} {"id":"bd-1ky","title":"Implement sync --all with async concurrency and resumable execution","description":"## Background\nsync --all synchronizes all aliases concurrently. It uses bounded async concurrency (--jobs, default 4) with per-host throttling (--per-host, default 2) to avoid abusive request patterns. Supports resumable execution via checkpoint files and a failure budget (--max-failures) to limit blast radius.\n\n## Approach\nBuild on the single-alias sync from the previous bead:\n\n**Async concurrency:** Use tokio::sync::Semaphore for global concurrency (--jobs). Use a per-host semaphore map (HashMap) for --per-host throttling. Process aliases via futures::stream::StreamExt::buffer_unordered.\n\n**Resumable sync (--resume):** Write a checkpoint file ({cache_dir}/sync-checkpoint.json) after each alias completes. Contains: aliases_completed, aliases_failed, started_at. On --resume, skip already-completed aliases.\n\n**Failure budget (--max-failures):** Track failure count. When exceeded, abort remaining aliases and report partial results.\n\n**Retry-After:** Honor response header (seconds or HTTP-date format). Use exponential backoff + jitter when header absent.\n\n**Per-alias output:** Collect results from all aliases. Report per-alias success/failure in robot output. Don't abort on single-alias failure (unless failure budget exceeded).\n\n## Acceptance Criteria\n- [ ] sync --all processes all aliases concurrently\n- [ ] --jobs limits concurrent syncs (verified: never more than N in-flight)\n- [ ] --per-host limits requests to same host\n- [ ] --resume skips already-completed aliases\n- [ ] --max-failures aborts after N failures\n- [ ] Per-alias success/failure reported in robot output\n- [ ] Retry-After header honored\n\n## Edge Cases\n- **Empty aliases (no specs cached):** sync --all with no aliases should succeed with empty results, not error.\n- **All aliases fail:** If every alias fails sync, the overall command should still exit 0 with per-alias failure details (unless --max-failures triggered, then exit non-zero).\n- **Checkpoint file corruption:** If sync-checkpoint.json is malformed, delete it and start fresh (don't error).\n- **Per-host semaphore with many aliases to same host:** 10 aliases all pointing to api.example.com with --per-host 2 means only 2 concurrent to that host, even if --jobs allows more.\n- **Retry-After as HTTP-date:** Parse both formats: seconds (e.g., \"120\") and HTTP-date (e.g., \"Thu, 01 Jan 2026 00:00:00 GMT\").\n\n## Files\n- MODIFY: src/cli/sync.rs (add sync_all, concurrency control, checkpoint, failure budget)\n\n## TDD Anchor\nRED: Write `test_sync_all_concurrent` — set up 4 aliases, run sync --all --jobs 2 --robot, verify all 4 synced and output includes per-alias results.\nGREEN: Implement concurrent sync with semaphore.\nVERIFY: `cargo test test_sync_all_concurrent`\n\n## Dependency Context\nExtends single-alias sync from bd-3f4 (sync command). Uses tokio Semaphore for concurrency control. Uses CacheManager.list_aliases from bd-3ea (cache read path).","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T16:28:47.465114Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:55:47.313562Z","closed_at":"2026-02-12T20:55:47.313518Z","close_reason":"Completed by agent swarm","compaction_level":0,"original_size":0,"labels":["phase2","sync"],"dependencies":[{"issue_id":"bd-1ky","depends_on_id":"bd-161","type":"parent-child","created_at":"2026-02-12T16:28:47.466140Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ky","depends_on_id":"bd-3f4","type":"blocks","created_at":"2026-02-12T16:34:06.315322Z","created_by":"tayloreernisse"}]} {"id":"bd-1lj","title":"Create CI/CD pipeline and cargo-deny configuration","description":"## Background\nThe CI/CD pipeline runs tests, linting, security audits, and multi-platform builds on GitLab CI. It also configures cargo-deny for license/advisory policy enforcement.\n\n## Approach\n\n### .gitlab-ci.yml Structure\n\n**Stages:** test, build, release\n\n**Test stage jobs:**\n- `test:unit` — `cargo test --lib` (unit tests only, no integration tests)\n- `test:integration` — `cargo test --test '*'` (integration tests in tests/ directory)\n- `lint` — `cargo fmt --check` + `cargo clippy -- -D warnings` (warnings are errors)\n- `security:deps` — `cargo-deny check` (license + advisory) + `cargo-audit` (RUSTSEC advisory DB)\n\n**Build stage:**\n- Build template (YAML anchor or extends) shared across 4 target jobs:\n - `build:aarch64-apple-darwin` — Apple Silicon macOS\n - `build:x86_64-apple-darwin` — Intel macOS\n - `build:x86_64-unknown-linux-gnu` — x86 Linux\n - `build:aarch64-unknown-linux-gnu` — ARM Linux\n- Each job: `cargo build --release --locked --target $TARGET`, upload binary as artifact\n\n**Release stage:**\n- `release` job (runs on tagged commits only):\n - Collect all 4 binaries from build artifacts\n - Generate `SHA256SUMS` file: `sha256sum swagger-cli-* > SHA256SUMS`\n - Sign with minisign: `minisign -S -m SHA256SUMS` (produces SHA256SUMS.minisig)\n - Upload all files to GitLab Package Registry via `curl` to Package Registry API\n- `docker` job (runs on tagged commits):\n - `docker build -t $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG .`\n - `docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG`\n - Also tag as `:latest`\n\n### deny.toml Structure\n\n**[licenses]:**\n- `unlicensed = \"deny\"` — all deps must have a license\n- `allow = [\"MIT\", \"Apache-2.0\", \"BSD-2-Clause\", \"BSD-3-Clause\", \"ISC\", \"Zlib\", \"Unicode-3.0\", \"Unicode-DFS-2016\", \"OpenSSL\"]` — common OSS license allow-list\n- `copyleft = \"deny\"` — no copyleft licenses without explicit exception\n\n**[advisories]:**\n- `vulnerability = \"deny\"` — fail on known vulnerabilities\n- `unmaintained = \"warn\"` — warn but don't fail on unmaintained crates\n- `unsound = \"warn\"` — warn on unsound crates\n- `yanked = \"deny\"` — fail on yanked versions\n- `notice = \"warn\"` — warn on advisories with notices\n\n**[bans]:**\n- `multiple-versions = \"warn\"` — warn if same crate appears in multiple versions\n- `wildcards = \"deny\"` — no wildcard version specs\n- Specific ban list empty initially (add problematic crates as discovered)\n\n## Acceptance Criteria\n- [ ] .gitlab-ci.yml has test, build, release stages in correct order\n- [ ] test:unit runs `cargo test --lib`\n- [ ] test:integration runs `cargo test --test '*'`\n- [ ] lint job runs both fmt --check and clippy -D warnings\n- [ ] security:deps runs both cargo-deny check and cargo-audit\n- [ ] All 4 platform targets defined as separate build jobs\n- [ ] Build jobs use shared template/anchor to avoid duplication\n- [ ] Release job generates SHA256SUMS from all binaries\n- [ ] Release job signs SHA256SUMS with minisign\n- [ ] Release job uploads to GitLab Package Registry\n- [ ] Docker job builds and pushes to CI_REGISTRY\n- [ ] deny.toml license allow-list covers common OSS licenses\n- [ ] deny.toml advisory policy denies known vulnerabilities\n- [ ] `cargo deny check` passes locally after creation\n\n## Files\n- CREATE: .gitlab-ci.yml\n- CREATE: deny.toml\n\n## TDD Anchor\nValidate: `cargo deny check` passes locally (after deny.toml is created).\nVERIFY: `cargo deny check 2>&1 | head -5`\n\n## Edge Cases\n- **Cross-compilation toolchains:** macOS targets require macOS runners or cross-compilation tools. Verify CI has the right runners or use `cross` tool.\n- **cargo-deny not installed:** The security:deps job must install cargo-deny and cargo-audit before running. Use `cargo install` with version pinning.\n- **Minisign key management:** The release job needs the minisign private key as a CI secret variable. Document the required CI variables.\n- **Tag-only release trigger:** Release jobs must have `rules: - if: $CI_COMMIT_TAG` to avoid running on every push.\n- **Large binary artifacts:** Rust release binaries can be 10MB+. Verify CI artifact storage limits.","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T16:31:32.392078Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:55:47.310857Z","closed_at":"2026-02-12T20:55:47.310807Z","close_reason":"Completed by agent swarm","compaction_level":0,"original_size":0,"labels":["ci","phase2"],"dependencies":[{"issue_id":"bd-1lj","depends_on_id":"bd-1lo","type":"parent-child","created_at":"2026-02-12T16:31:32.394604Z","created_by":"tayloreernisse"},{"issue_id":"bd-1lj","depends_on_id":"bd-a7e","type":"blocks","created_at":"2026-02-12T16:31:32.395053Z","created_by":"tayloreernisse"}]} {"id":"bd-1lo","title":"Epic: Distribution and CI","status":"open","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:28.089610Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:22:28.090122Z","compaction_level":0,"original_size":0,"labels":["epic"]} +{"id":"bd-1nm","title":"Convert list command to OutputMode","description":"## Background\nlist.rs (898 lines) is the most feature-rich query command with filtering (method, tag, path regex), sorting, pagination, and an all-aliases variant. It has the most complex human output with tables, filter summaries, and pagination info.\n\nCurrent execute signature (line 135):\n```rust\npub async fn execute(args: &Args, robot_mode: bool) -> Result<(), SwaggerCliError>\n```\n\n## Approach\n\n### 1. Change execute() and all-aliases path to accept OutputMode\n- `execute()` at line 135: replace `robot_mode: bool` with `mode: OutputMode`\n- All-aliases branch (around line 307): propagate `mode`\n\n### 2. Impl HumanDisplay for ListOutput\n`ListOutput` struct (line 56):\n```rust\npub struct ListOutput {\n pub endpoints: Vec,\n pub total: usize,\n pub filtered: usize,\n pub applied_filters: BTreeMap,\n pub meta: ListMeta,\n}\n```\n`ListMeta` struct (line 98):\n```rust\npub struct ListMeta {\n pub alias: String,\n pub spec_version: Option,\n pub cached_at: String,\n pub duration_ms: u64,\n}\n```\n`EndpointEntry` struct (~line 65) with Tabled derive: `method`, `path`, `summary`, `tags`, `deprecated`\n\nHumanDisplay logic (currently at lines 269-297):\n- Header: \"API: {title} {version} -- {total} endpoints\"\n- Table via `render_table_or_empty(endpoints, \"No endpoints found\")`\n- Filter summary: \"Showing {filtered} of {total}\" -- only when filters active\n- Pagination: \"({applied_filters})\" after filter line\n\n### 3. Impl HumanDisplay for AllAliasesListOutput\n`AllAliasesListOutput` struct (line 65):\n```rust\npub struct AllAliasesListOutput {\n pub endpoints: Vec,\n pub total: usize,\n pub filtered: usize,\n pub applied_filters: BTreeMap,\n pub aliases_searched: usize,\n pub warnings: Vec,\n pub duration_ms: u64,\n}\n```\n`AllAliasEndpointEntry` adds `alias` column to the table.\n\nHuman output: same as ListOutput but with ALIAS column in table and warnings list.\n\n### 4. Replace branches with emit()\nReplace `if robot_mode { robot_success(...) } else { ... }` with `emit(&output, mode, \"list\", duration)`.\n\n## Acceptance Criteria\n- [ ] `list::execute()` accepts `OutputMode` instead of `bool`\n- [ ] `HumanDisplay for ListOutput` produces identical output to current lines 269-297\n- [ ] `HumanDisplay for AllAliasesListOutput` produces identical output with alias column\n- [ ] Zero `if robot_mode` / `if !robot_mode` branches in list.rs\n- [ ] Pagination footer: \"Showing {filtered} of {total}\" only when filters are active, \"filtered from {total_unfiltered}\" only when filtered < total\n- [ ] Integration tests pass (`test_list_endpoints`, `test_list_method_filter`, etc.)\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- MODIFY: src/cli/list.rs (accept OutputMode, impl HumanDisplay for ListOutput + AllAliasesListOutput)\n- MODIFY: src/main.rs (pass mode to list at line 124)\n\n## TDD Anchor\nRED: `test_list_human_display_with_filters` -- create `ListOutput` with `applied_filters: {\"method\": \"GET\"}` and 3 endpoints, call `display_human(&mut Vec)`, assert contains \"API:\" header, table content, and \"Showing 3 of 10\" footer\nGREEN: Impl `HumanDisplay for ListOutput`\nVERIFY: `cargo test list`\n\nAdditional tests:\n- `test_list_empty_human_display` -- 0 endpoints, assert \"No endpoints found\"\n- `test_list_all_aliases_human_display` -- assert ALIAS column present\n- `test_list_no_filters_no_footer` -- when no filters applied, no \"Showing X of Y\" line\n\n## Edge Cases\n- Pagination footer varies: shows \"filtered from {total}\" ONLY when filters reduce the count\n- All-aliases output includes warnings list that may be empty -- skip warnings section if empty\n- Tags in endpoint rows may be empty -- display as empty string, not \"[]\"\n- `render_table_or_empty` from `src/output/table.rs` is reused unchanged\n\n## Dependency Context\nUses `OutputMode`, `HumanDisplay`, `emit()` from bd-2im. `render_table_or_empty` from `src/output/table.rs` (unchanged). Pattern validated by bd-bq8 (tags PoC).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:25:27.562031Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:45:11.377508Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1nm","depends_on_id":"bd-14o","type":"parent-child","created_at":"2026-02-13T15:25:27.563677Z","created_by":"tayloreernisse"},{"issue_id":"bd-1nm","depends_on_id":"bd-bq8","type":"blocks","created_at":"2026-02-13T15:31:09.202777Z","created_by":"tayloreernisse"}]} +{"id":"bd-1re","title":"Extract sync types.rs and diff.rs from sync_cmd.rs","description":"## Background\nsync_cmd.rs is 1843 lines. First extraction step: pull out all data types and the diff computation logic into dedicated files. This is the lowest-risk extraction since these are pure data types and pure functions with no side effects.\n\n## Approach\n1. Create src/cli/sync/ directory\n2. Create src/cli/sync/types.rs containing (from sync_cmd.rs):\n - EndpointKey (line 79)\n - SchemaDiff (line 85)\n - EndpointDiff (line 91)\n - ChangeSummary (line 98)\n - ChangeDetails (line 107)\n - SyncOutput (line 118)\n - AliasSyncResult (line 132)\n - SyncAllOutput (line 152)\n - All derive macros and serde attributes preserved exactly\n - pub(crate) visibility for types used within sync module, pub for types used by tests\n3. Create src/cli/sync/diff.rs containing:\n - MAX_DETAIL_ITEMS const (line 76)\n - endpoint_key() (line 208)\n - endpoint_fingerprint() (line 215)\n - compute_diff() (line 236)\n - Move ALL diff-related tests from sync_cmd::tests into diff.rs #[cfg(test)] mod tests\n4. Create src/cli/sync/mod.rs with:\n - pub mod types; pub mod diff;\n - Re-export what sync_cmd.rs currently exposes\n5. Update src/cli/mod.rs: replace pub mod sync_cmd with pub mod sync\n6. Keep remaining code in a temporary sync/legacy.rs or directly in mod.rs until later extraction beads\n\n## Acceptance Criteria\n- src/cli/sync/types.rs contains all 8 output/data structs\n- src/cli/sync/diff.rs contains compute_diff, endpoint_key, endpoint_fingerprint, MAX_DETAIL_ITEMS\n- All diff tests pass in their new location (test_diff_no_changes, test_diff_added_endpoint, test_diff_removed_endpoint, test_diff_modified_endpoint, test_diff_added_schema, test_diff_removed_schema, test_diff_endpoint_modified_by_params)\n- src/cli/mod.rs declares pub mod sync (not sync_cmd)\n- All integration tests pass unchanged\n- cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- CREATE: src/cli/sync/mod.rs\n- CREATE: src/cli/sync/types.rs\n- CREATE: src/cli/sync/diff.rs\n- MODIFY: src/cli/mod.rs (pub mod sync_cmd -> pub mod sync)\n- MODIFY: src/main.rs (update import paths if needed)\n- The remaining sync_cmd.rs code stays in sync/mod.rs temporarily\n\n## TDD Anchor\nNo new tests -- move existing diff tests. After extraction:\nVERIFY: cargo test sync::diff -- verify all 7 diff tests pass in new location\nVERIFY: cargo test integration -- verify end-to-end sync tests pass\n\n## Edge Cases\n- Import paths change: anything that imports from crate::cli::sync_cmd must change to crate::cli::sync\n- The types need correct visibility: pub(crate) for internal sync use, pub for types in test assertions\n- compute_diff uses IndexedEndpoint from crate::core::spec -- import path unchanged\n\n## Dependency Context\nDepends on bd-2x7 (sync OutputMode conversion) being complete, so the output formatting is already using emit() before we split.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:26:33.816785Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:31:15.697260Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1re","depends_on_id":"bd-10e","type":"parent-child","created_at":"2026-02-13T15:26:33.819063Z","created_by":"tayloreernisse"},{"issue_id":"bd-1re","depends_on_id":"bd-2x7","type":"blocks","created_at":"2026-02-13T15:31:15.697222Z","created_by":"tayloreernisse"}]} {"id":"bd-1rk","title":"Implement cross-alias discovery (--all-aliases)","description":"## Background\n--all-aliases on list and search queries across every cached alias, returning results with an additional \"alias\" field per result. Useful for agents managing multiple APIs.\n\n## Approach\n1. In list execute: if --all-aliases, iterate all aliases via list_aliases(), load each index, filter/sort per-alias, merge results with \"alias\" field added to each endpoint item\n2. In search execute: same pattern — search each alias's index, merge results, re-sort by score\n3. Robot output: each endpoint/result includes \"alias\" string field identifying which spec it came from\n\n**Sorting rules:**\n- For `list --all-aliases`: merged endpoints sorted by (alias ASC, path ASC, method_rank ASC) where method_rank follows standard HTTP method ordering (GET=0, POST=1, PUT=2, PATCH=3, DELETE=4, etc.)\n- For `search --all-aliases`: merged results sorted by (score DESC, then normal tie-breaking within same score) — the alias field is just additional metadata, it does not affect ranking\n\n**Robot output envelope:**\n- `data.endpoints[]` or `data.results[]` — each item includes `\"alias\": \"petstore\"` field\n- `data.aliases_searched: string[]` — lists which aliases were queried (e.g., `[\"petstore\", \"github-api\", \"stripe\"]`)\n- This allows agents to detect if an alias was missing/skipped vs simply had no matching results\n\n**Error handling for individual alias failures:**\n- If one alias fails to load (corrupted index, missing files), do NOT abort the entire operation\n- Skip the failed alias and continue with remaining aliases\n- Add warning to `meta.warnings[]` array: `{\"alias\": \"broken-api\", \"code\": \"INDEX_LOAD_FAILED\", \"message\": \"...\"}`\n- Robot output still returns ok:true with partial results (this is a degraded success, not an error)\n- If ALL aliases fail, then return an error\n\n## Acceptance Criteria\n- [ ] `list --all-aliases --robot` returns endpoints from all aliases with alias field on each item\n- [ ] `search \"pet\" --all-aliases --robot` searches across all aliases with alias field on each result\n- [ ] List results sorted by alias ASC, then path ASC, then method_rank ASC\n- [ ] Search results sorted by score DESC (alias does not affect ranking)\n- [ ] Robot output includes data.aliases_searched listing all queried aliases\n- [ ] If one alias fails to load, operation continues with remaining aliases and meta.warnings populated\n- [ ] If all aliases fail, returns error (not partial success)\n- [ ] Human output groups or labels results by alias for readability\n\n## Edge Cases\n- **Single alias cached:** --all-aliases works but is equivalent to normal query (aliases_searched has one entry)\n- **No aliases cached:** Return error — no specs available\n- **One alias fails to load:** Skip with warning in meta.warnings, return results from healthy aliases\n- **All aliases fail:** Return error with details about each failure\n- **Duplicate endpoint paths across aliases:** Both appear in results, distinguished by alias field\n\n## Files\n- MODIFY: src/cli/list.rs (add --all-aliases logic)\n- MODIFY: src/cli/search.rs (add --all-aliases logic)\n\n## TDD Anchor\nRED: Write `test_list_all_aliases` — fetch 2 different specs, run list --all-aliases --robot, assert results contain endpoints from both with alias field.\nRED: Write `test_search_all_aliases` — fetch 2 specs, run search --all-aliases --robot, assert results from both with correct score-based sorting.\nRED: Write `test_all_aliases_partial_failure` — corrupt one alias index, run list --all-aliases, assert partial results returned with warning.\nGREEN: Implement cross-alias iteration.\nVERIFY: `cargo test test_list_all_aliases`\n\n## Dependency Context\nRequires list command (bd-3km) and search command (bd-acf) to be implemented first.","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T16:29:50.184624Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:55:47.312770Z","closed_at":"2026-02-12T20:55:47.312726Z","close_reason":"Completed by agent swarm","compaction_level":0,"original_size":0,"labels":["global","phase2"],"dependencies":[{"issue_id":"bd-1rk","depends_on_id":"bd-3km","type":"blocks","created_at":"2026-02-12T16:29:50.187908Z","created_by":"tayloreernisse"},{"issue_id":"bd-1rk","depends_on_id":"bd-3ll","type":"parent-child","created_at":"2026-02-12T16:29:50.187260Z","created_by":"tayloreernisse"},{"issue_id":"bd-1rk","depends_on_id":"bd-acf","type":"blocks","created_at":"2026-02-12T16:29:50.188740Z","created_by":"tayloreernisse"}]} {"id":"bd-1sb","title":"Implement configuration system with path resolution and auth profiles","description":"## Background\nswagger-cli uses TOML config files for user preferences and auth profiles, stored in XDG config dir. The config system handles path resolution with a specific precedence: --config > SWAGGER_CLI_CONFIG > SWAGGER_CLI_HOME > XDG defaults. Auth profiles allow loading credentials from config.toml instead of passing raw tokens on the command line.\n\n## Approach\nImplement Config, AuthConfig, CredentialSource, AuthType, DisplayConfig structs in src/core/config.rs per the PRD. Config::load() reads from config_path(), Config::save() writes with toml::to_string_pretty(). Both config_path() and cache_dir() implement the D7 override precedence using env vars SWAGGER_CLI_HOME, SWAGGER_CLI_CONFIG, SWAGGER_CLI_CACHE, and directories::ProjectDirs. CredentialSource is a tagged enum (Literal, EnvVar, Keyring) with serde tag=\"source\". AuthType is an enum (Bearer, ApiKey { header }). Default stale_threshold_days is 30.\n\n## Acceptance Criteria\n- [ ] Config::load() returns default Config when no file exists\n- [ ] Config::save() writes valid TOML that round-trips through load()\n- [ ] config_path() respects: --config flag > SWAGGER_CLI_CONFIG env > SWAGGER_CLI_HOME/config/config.toml > XDG\n- [ ] cache_dir() respects: SWAGGER_CLI_CACHE > SWAGGER_CLI_HOME/cache > XDG cache dir\n- [ ] AuthConfig with CredentialSource::EnvVar serializes/deserializes correctly\n- [ ] Default config has stale_threshold_days=30, empty auth_profiles, no default_alias\n- [ ] All tests hermetic (use SWAGGER_CLI_HOME with tempdir)\n\n## Files\n- CREATE: src/core/config.rs (Config, AuthConfig, CredentialSource, AuthType, DisplayConfig, Default impl, load/save, path resolution)\n- MODIFY: src/core/mod.rs (pub mod config;)\n\n## TDD Anchor\nRED: Write `test_config_path_precedence` — set SWAGGER_CLI_HOME to a tempdir, verify config_path() returns tempdir/config/config.toml. Then set SWAGGER_CLI_CONFIG to a specific path, verify it takes precedence.\nGREEN: Implement config_path() with env var checks.\nVERIFY: `cargo test test_config_path_precedence`\n\n## Edge Cases\n- CredentialSource::Keyring is Phase 2 — include the enum variant but document it as unimplemented\n- Config file might not exist (return default, don't error)\n- SWAGGER_CLI_HOME dir might not exist yet (create parent dirs on save)\n- toml crate is named `toml`, not `serde_toml`","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:25:15.475935Z","created_by":"tayloreernisse","updated_at":"2026-02-12T17:41:12.898251Z","closed_at":"2026-02-12T17:41:12.898204Z","close_reason":"Config with TOML load/save, path precedence, auth profiles","compaction_level":0,"original_size":0,"labels":["infrastructure","phase1"],"dependencies":[{"issue_id":"bd-1sb","depends_on_id":"bd-hcb","type":"parent-child","created_at":"2026-02-12T16:25:15.477635Z","created_by":"tayloreernisse"},{"issue_id":"bd-1sb","depends_on_id":"bd-ilo","type":"blocks","created_at":"2026-02-12T16:25:15.478018Z","created_by":"tayloreernisse"}]} +{"id":"bd-1ti","title":"Epic: AsyncCache with spawn_blocking","description":"Wrap CacheManager blocking file I/O (std::fs + fs2::FileExt flock) in tokio::task::spawn_blocking via an AsyncCache wrapper. Prevents runtime starvation under sync --all --jobs=4 concurrent cache access. Preserves sync CacheManager for tests.\n\n## Child Beads (execution order)\n1. **bd-39e** -- Create AsyncCache wrapper with spawn_blocking (new struct in src/core/cache.rs)\n2. **bd-3aa** -- Convert all 12 command handlers from CacheManager to AsyncCache (mechanical .await additions)\n\n## Independence\nThis epic is fully independent of Epic 1 (Output Sink) and Epic 4 (Property Tests). Can run in parallel.","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-13T15:23:43.489072Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:45:34.323847Z","compaction_level":0,"original_size":0} +{"id":"bd-1x1","title":"Add property tests for search engine","description":"## Background\nThe search engine (src/core/search.rs, 722 lines) has tokenized scoring, Unicode-safe snippet extraction, coverage boost, and deterministic ordering. Current tests are example-based (petstore index). No property tests verify invariants across arbitrary inputs.\n\n## Approach\n1. Create tests/property_search_test.rs\n2. Build proptest strategies:\n - arb_indexed_endpoint(): generates IndexedEndpoint with random paths (/[a-z]{1,5}(/[a-z]{1,5}){0,3}), methods (GET|POST|PUT|DELETE|PATCH), optional summaries and descriptions\n - arb_spec_index(): Vec of 0..50 endpoints + 0..20 schemas\n - arb_search_options(): random combination of search_paths/search_descriptions/search_schemas, case_sensitive, exact, limit 1..100\n - arb_query(): 1-5 words of [a-z]{1,10} joined by spaces (for normal queries), plus Unicode strings for safety tests\n\n3. Implement 8 property tests:\n\n a) score_monotonicity: Given an endpoint matching term T in field F, adding a second match in field G should produce score >= original score.\n \n b) deterministic_ordering: search(query, index, opts) called twice = identical Vec (same scores, ranks, names, methods).\n \n c) limit_respected: results.len() <= opts.limit for any query and index.\n \n d) coverage_boost: For a 2-term query where endpoint A matches both terms and endpoint B matches 1 term (in same field), A.score >= B.score.\n \n e) case_insensitivity: For any query Q and index I, search(Q.to_lowercase(), I, case_insensitive) == search(Q.to_uppercase(), I, case_insensitive) in terms of result sets (same names returned).\n \n f) empty_query_safety: search(whitespace_only, index, opts) always returns empty vec. Strategy: \" \".repeat(0..10) + \"\\t\".repeat(0..3).\n \n g) unicode_safety: search(arbitrary_unicode, index, opts) never panics. Strategy: proptest::string::string_regex(\".*\").unwrap() with emoji, RTL, zero-width chars.\n \n h) snippet_bounds: For any match in results, snippet body (excluding \"...\") is <= 50 chars. Call safe_snippet directly with arbitrary haystack/needle.\n\n## Acceptance Criteria\n- tests/property_search_test.rs exists with 8 proptest tests\n- All 8 tests pass with default case count (256)\n- No new dependencies (proptest already in dev-deps)\n- safe_snippet is pub(crate) or the test uses a test helper to access it\n- cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- CREATE: tests/property_search_test.rs\n- POSSIBLY MODIFY: src/core/search.rs (make safe_snippet pub(crate) for direct testing, or test via SearchEngine::search results)\n\n## TDD Anchor\nThese ARE the tests. Write them and verify they pass against existing implementation.\nRED: If any test fails, that is a real bug to fix in src/core/search.rs\nVERIFY: cargo test property_search\n\n## Edge Cases\n- Unicode safety test may trigger panics in safe_snippet if multi-byte char boundaries are mishandled -- the existing code has careful char-based iteration but property testing may find edge cases\n- Empty index (0 endpoints, 0 schemas) should return empty results for any query\n- Very long queries (100+ chars) should not cause excessive allocation\n\n## Dependency Context\nNo dependencies. Fully independent. Tests existing code without modifications.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:28:27.046799Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:28:27.048108Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1x1","depends_on_id":"bd-3va","type":"parent-child","created_at":"2026-02-13T15:28:27.048091Z","created_by":"tayloreernisse"}]} {"id":"bd-1x5","title":"Implement reliability stress tests and performance benchmarks","description":"## Background\nReliability tests validate the crash-consistency, concurrency safety, and determinism claims. These are the hardest tests but most important for production confidence. Includes fault injection (simulated crash at each write step), multi-process lock contention (32 concurrent processes), and property-based tests (proptest for deterministic ordering).\n\n## Approach\n**Crash consistency (tests/reliability/crash_consistency_test.rs):**\n- test_crash_before_meta_rename: write raw+index but not meta → read protocol detects → doctor --fix repairs\n- test_crash_after_raw_before_index: write raw but not index → doctor --fix rebuilds\n\n**Lock contention (tests/reliability/lock_contention_test.rs):**\n- test_concurrent_fetch_32_processes: spawn 32 threads, all fetch same alias with --force → verify all exit 0 or 9 (CACHE_LOCKED), no panics, final state passes doctor\n\n**Property-based (tests/reliability/property_test.rs with proptest):**\n- index_ordering_deterministic: random endpoints → build_index → build_index from shuffled → same JSON\n- hash_deterministic: same bytes → same hash\n\nAdd proptest = '1.0' to [dev-dependencies] in Cargo.toml alongside existing test deps.\n\n**Benchmarks (benches/perf.rs with Criterion):**\n- bench_load_index, bench_list_endpoints, bench_search_query on 500+ endpoint spec\n\n## Acceptance Criteria\n- [ ] Crash consistency tests verify doctor can repair all simulated crash states\n- [ ] 32-process contention test passes without deadlocks or corruption\n- [ ] Property-based tests verify deterministic ordering\n- [ ] Benchmarks establish baseline performance numbers\n- [ ] All reliability tests pass on CI\n\n## Files\n- CREATE: tests/reliability/crash_consistency_test.rs\n- CREATE: tests/reliability/lock_contention_test.rs\n- CREATE: tests/reliability/property_test.rs\n- CREATE: benches/perf.rs\n- MODIFY: Cargo.toml (add [[bench]] for criterion, add proptest dev-dep)\n\n## TDD Anchor\nRun: `cargo test --test crash_consistency && cargo test --test lock_contention && cargo test --test property`\nVERIFY: `cargo bench -- --test`\n\n## Dependency Context\nUses CacheManager write/read protocol from cache beads. Uses build_index from indexer bead. Uses compute_hash. Tests the claims made in the cache architecture.\n\n## Edge Cases\n- **Crash test cleanup:** Crash simulation tests must clean up temp files even on test failure. Use Drop guards or panic hooks.\n- **32-process test may exceed file descriptor limits:** On CI, ulimit may be low. Test should check and skip with a clear message if fd limit < 256.\n- **Property test shrinking:** proptest shrinking can be slow for complex inputs. Set max shrink iterations to avoid CI timeouts.\n- **Benchmark stability:** Criterion requires multiple iterations. First run creates baseline. Tests should not fail on benchmark regression — only warn.\n- **Lock contention test timing:** 32 threads racing for a lock is timing-dependent. Use a barrier to ensure all threads start simultaneously.","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T16:30:59.112199Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:55:28.193678Z","closed_at":"2026-02-12T20:55:28.193502Z","close_reason":"Implemented: 3 test files (crash_consistency_test.rs with 8 tests, lock_contention_test.rs with 3 tests, property_test.rs with 4 tests), 5 Criterion benchmarks in perf.rs, and fixed doctor bug for missing index repair","compaction_level":0,"original_size":0,"labels":["phase2","testing"],"dependencies":[{"issue_id":"bd-1x5","depends_on_id":"bd-189","type":"blocks","created_at":"2026-02-12T16:30:59.114670Z","created_by":"tayloreernisse"},{"issue_id":"bd-1x5","depends_on_id":"bd-1ie","type":"blocks","created_at":"2026-02-12T16:30:59.113671Z","created_by":"tayloreernisse"},{"issue_id":"bd-1x5","depends_on_id":"bd-3ea","type":"blocks","created_at":"2026-02-12T16:30:59.114152Z","created_by":"tayloreernisse"},{"issue_id":"bd-1x5","depends_on_id":"bd-p7g","type":"parent-child","created_at":"2026-02-12T16:30:59.113195Z","created_by":"tayloreernisse"}]} {"id":"bd-1y0","title":"Epic: Health and Cache Lifecycle","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:24.038779Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:33:54.607177Z","closed_at":"2026-02-12T20:33:54.607133Z","close_reason":"All 2 child beads closed: doctor command, cache lifecycle","compaction_level":0,"original_size":0,"labels":["epic"]} +{"id":"bd-1yq","title":"Extract sync checkpoint.rs and throttle.rs","description":"## Background\nSecond extraction from the sync module. Checkpoint (resume/save/load) and per-host throttle are self-contained components with clear boundaries.\n\n## Approach\n1. Create src/cli/sync/checkpoint.rs containing:\n - CHECKPOINT_FILE const (line 166)\n - SyncCheckpoint struct (line 168)\n - load_checkpoint() (line 175)\n - save_checkpoint() (line 181)\n - remove_checkpoint() (line 197)\n - All use SwaggerCliError for error types\n2. Create src/cli/sync/throttle.rs containing:\n - extract_host() (line 311)\n - PerHostThrottle struct (line 318)\n - PerHostThrottle::new() and acquire() methods\n - Uses tokio::sync::{Mutex, Semaphore} and Arc\n3. Update src/cli/sync/mod.rs to add: pub(crate) mod checkpoint; pub(crate) mod throttle;\n4. Update imports in remaining sync code to reference sync::checkpoint:: and sync::throttle::\n\n## Acceptance Criteria\n- checkpoint.rs contains all checkpoint logic (~60 lines)\n- throttle.rs contains PerHostThrottle and extract_host (~50 lines)\n- Both modules compile independently (no circular deps)\n- All integration tests pass (especially sync --all tests that exercise checkpointing and throttling)\n- cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- CREATE: src/cli/sync/checkpoint.rs\n- CREATE: src/cli/sync/throttle.rs\n- MODIFY: src/cli/sync/mod.rs (add module declarations, update imports)\n\n## TDD Anchor\nRED: test_extract_host_parses_url -- verify extract_host(\"https://api.example.com/v1/spec.json\") returns \"api.example.com\"\nGREEN: Move extract_host to throttle.rs\nVERIFY: cargo test sync::throttle\n\nAdditional tests: test_extract_host_invalid_url (returns empty string), test_checkpoint_roundtrip (save then load)\n\n## Edge Cases\n- extract_host() returns empty string for unparseable URLs -- preserve this behavior\n- PerHostThrottle::acquire() uses expect(\"semaphore should not be closed\") -- acceptable since we control the semaphore lifecycle\n- Checkpoint JSON format must not change (backwards compat with existing checkpoint files)\n\n## Dependency Context\nDepends on bd-1re (types and diff extracted first so sync/mod.rs structure exists).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:26:48.616341Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:31:15.776442Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1yq","depends_on_id":"bd-10e","type":"parent-child","created_at":"2026-02-13T15:26:48.618577Z","created_by":"tayloreernisse"},{"issue_id":"bd-1yq","depends_on_id":"bd-1re","type":"blocks","created_at":"2026-02-13T15:31:15.776422Z","created_by":"tayloreernisse"}]} {"id":"bd-21m","title":"Epic: Diff Command","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:26.500265Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:33:55.613961Z","closed_at":"2026-02-12T20:33:55.613916Z","close_reason":"All 1 child bead closed: diff command with CI gate","compaction_level":0,"original_size":0,"labels":["epic"]} +{"id":"bd-29k","title":"Convert aliases, doctor, cache_cmd commands to OutputMode","description":"## Background\nConvert the 3 management commands (aliases, doctor, cache). These are moderately complex: aliases has 5 sub-operations with different output types, doctor runs progressive checks, and cache_cmd has 4 sub-modes.\n\n## Approach\nFor each command, change execute() to accept `OutputMode` instead of `robot: bool`, impl `HumanDisplay`, and replace `if robot` branches with `emit()`.\n\n### aliases.rs (execute at line 123: `pub async fn execute(args: &Args, robot: bool)`, 615 lines)\n5 sub-operations, each with different output. Current output structs:\n- `AliasListOutput` (line 57): `aliases: Vec`, `default_alias: Option`, `count: usize`\n- `AliasShowOutput` (line 64): `name`, `source_url`, `version`, `is_default`, `cached_at`, `last_accessed`, `content_hash`, `endpoint_count`, `schema_count`, `raw_size_bytes`, `source_format`, `generation`\n- `AliasRenameOutput` (line 81): `old_name`, `new_name`, `message`\n- `AliasDeleteOutput` (line 87): `name`, `message`\n- `AliasSetDefaultOutput` (line 93): `name`, `message`\n- `AliasListEntry` (line 46): `name`, `source_url`, `version`, `is_default`, `cached_at`, `endpoints`, `schemas`\n\nImpl `HumanDisplay` for each:\n- `AliasListOutput`: table via `render_table_or_empty` with columns NAME/URL/VERSION/DEFAULT/CACHED_AT/ENDPOINTS/SCHEMAS\n- `AliasShowOutput`: key-value display (Name: ..., URL: ..., etc.)\n- `AliasRenameOutput`, `AliasDeleteOutput`, `AliasSetDefaultOutput`: confirmation message strings\n\nSub-functions (`cmd_list`, `cmd_show`, `cmd_rename`, `cmd_delete`, `cmd_set_default`) each receive `mode: OutputMode` and call `emit()`.\n\n### doctor.rs (execute at line 344: `pub async fn execute(args: &Args, robot_mode: bool)`, 767 lines)\n**DOCUMENTED EXCEPTION** to HumanDisplay pattern. Doctor prints progressive check results (pass/fail per check with real-time output), which does not fit the produce-data-emit-once model.\n\nOutput struct: `DoctorOutput` (line 71) with `health: HealthStatus`, `aliases: Vec`, `warnings: Vec`, `total_disk_bytes: u64`, `fixable_count: usize`, `unfixable_count: usize`\n\nApproach:\n- Accept `OutputMode` instead of `bool`\n- For `Robot` mode: collect all results, emit `DoctorOutput` via `robot_success()` at end (existing behavior)\n- For `Human` mode: keep progressive `println!` calls inline (existing behavior)\n- Replace `if robot_mode` checks with `matches!(mode, OutputMode::Robot { .. })`\n- Do NOT impl `HumanDisplay for DoctorOutput`\n\n### cache_cmd.rs (execute at line 151: `pub async fn execute(args: &Args, robot: bool)`, 586 lines)\n4 sub-modes with tagged enum output:\n- `CacheOutput` enum (line 54): `Stats(StatsOutput)`, `Path(PathOutput)`, `Prune(PruneOutput)`, `Evict(EvictOutput)`\n- `StatsOutput` (line 66): `aliases: Vec`, `total_size_bytes`, `total_aliases`, `total_endpoints`, `total_schemas`\n- `PathOutput` (line 78): `path: String`\n- `PruneOutput` (line 82): `removed: Vec`, `total_removed`, `bytes_freed`\n- `EvictOutput` (line 91): `alias`, `message`, `removed: bool`\n\nImpl `HumanDisplay` for each variant's inner type:\n- `StatsOutput`: table of alias stats + summary line\n- `PathOutput`: just the path string\n- `PruneOutput`: list of removed items + \"Freed X bytes\" summary\n- `EvictOutput`: confirmation message\n\nSub-functions (`execute_path`, `execute_stats`, `execute_prune`, `execute_evict`) each receive `mode` and call `emit()`.\n\n## Acceptance Criteria\n- [ ] aliases.rs and cache_cmd.rs: zero `if robot` / `if !robot` branches, all output through `emit()`\n- [ ] doctor.rs: accepts `OutputMode`, uses `matches!(mode, OutputMode::Robot { .. })` for branching, keeps progressive human output inline (documented exception)\n- [ ] `HumanDisplay` implemented for: `AliasListOutput`, `AliasShowOutput`, `AliasRenameOutput`, `AliasDeleteOutput`, `AliasSetDefaultOutput`, `StatsOutput`, `PathOutput`, `PruneOutput`, `EvictOutput`\n- [ ] No `HumanDisplay` impl for `DoctorOutput` (documented exception)\n- [ ] All integration tests pass unchanged\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- MODIFY: src/cli/aliases.rs (accept OutputMode, impl HumanDisplay for 5 output types)\n- MODIFY: src/cli/doctor.rs (accept OutputMode, replace bool checks with OutputMode matches)\n- MODIFY: src/cli/cache_cmd.rs (accept OutputMode, impl HumanDisplay for 4 output types)\n- MODIFY: src/main.rs (pass mode to these 3 commands at lines 129, 133, 134)\n\n## TDD Anchor\nRED: `test_alias_list_human_display` -- create `AliasListOutput` with 2 aliases, call `display_human(&mut Vec)`, assert table contains alias names and \"default\" marker\nGREEN: Impl `HumanDisplay for AliasListOutput`\nVERIFY: `cargo test aliases`\n\nAdditional tests:\n- `test_alias_show_human_display` -- assert key-value pairs present\n- `test_cache_stats_human_display` -- assert summary line\n- `test_cache_path_human_display` -- assert just the path string\n- `test_cache_prune_human_display` -- assert \"Freed X bytes\" line\n\n## Edge Cases\n- doctor.rs is a documented exception -- an agent MUST NOT try to impl HumanDisplay for it. If tempted, check the progressive output pattern: it prints per-alias check results in real-time before the final summary.\n- cache prune/evict modify state AND produce output -- `HumanDisplay` must handle success messages and counts, not the mutation itself\n- aliases rename/delete produce confirmation messages, not tables -- `HumanDisplay` writes a single line like \"Renamed 'old' to 'new'\"\n- cache_cmd.rs has 4 separate `CacheManager::new()` calls in different sub-functions (lines 151, 192, 240, 308) -- all change to accept mode param, cache creation stays local for now (AsyncCache conversion is bd-3aa)\n\n## Dependency Context\nUses `OutputMode`, `HumanDisplay`, `emit()` from bd-2im (output foundation). Pattern validated by bd-bq8 (tags) and bd-2wp (query commands).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:25:13.124957Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:43:17.599335Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-29k","depends_on_id":"bd-14o","type":"parent-child","created_at":"2026-02-13T15:25:13.125620Z","created_by":"tayloreernisse"},{"issue_id":"bd-29k","depends_on_id":"bd-bq8","type":"blocks","created_at":"2026-02-13T15:31:09.174144Z","created_by":"tayloreernisse"}]} {"id":"bd-2e4","title":"Flesh out Phase 3 scope and requirements","description":"## Background\nPhase 3 beads (SBOM, keyring, curl gen, breaking-change classification, semantic search, YAML output) are currently stubs without enough detail for implementation. Before any Phase 3 work begins, the requirements and scope for each feature must be fleshed out with proper acceptance criteria, approach, and file lists.\n\n## What Needs to Happen\n1. Review each Phase 3 bead against the PRD's Phase 3 section\n2. Research any dependencies or design decisions not yet documented\n3. Write full bead descriptions (Background, Approach, Acceptance Criteria, Files, TDD Anchor, Edge Cases) for each Phase 3 bead\n4. Validate scope boundaries — what's in vs out for each feature\n\n## Acceptance Criteria\n- [ ] All Phase 3 beads have full descriptions (score 4+/5 on agent-readiness)\n- [ ] Each bead has concrete file lists and TDD anchors\n- [ ] Scope boundaries are documented (what's explicitly out of scope)\n- [ ] No unresolved ambiguities — all genuinely ambiguous decisions are resolved\n\n## Phase 3 Beads to Flesh Out\n- bd-37c: SBOM generation and cosign attestation\n- bd-3pz: OS keychain credential backend\n- bd-60k: Generate curl commands from endpoints\n- bd-j23: Breaking-change classification for diff\n- bd-132: Semantic search with embeddings\n- bd-b8h: YAML output format","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-12T16:42:40.196904Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:42:40.200279Z","compaction_level":0,"original_size":0,"labels":["phase3","planning"],"dependencies":[{"issue_id":"bd-2e4","depends_on_id":"bd-3aq","type":"parent-child","created_at":"2026-02-12T16:42:40.200266Z","created_by":"tayloreernisse"}]} {"id":"bd-2gp","title":"Implement golden robot output tests and index-only invariant tests","description":"## Background\nGolden tests are the #1 defense against robot JSON shape regressions. They verify structural invariants (ok, data, meta.schema_version, meta.tool_version, meta.command, meta.duration_ms) and snapshot-compare against golden files. Index-only invariant tests verify that list/search work without raw.json (core performance promise).\n\n## Approach\n**Golden tests (tests/integration/golden_test.rs):**\n1. For each command (list, show, search, schemas, tags, aliases), run with --robot\n2. Parse output as JSON, verify structural invariants (ok is bool, data is object, meta has required fields)\n3. Compare against golden snapshot files in tests/integration/golden/\n4. Fail CI if shape changes unless schema_version is incremented\n\n**Index-only invariant tests:**\n- test_list_does_not_read_raw_json: fetch spec, delete raw.json, run list — must succeed\n- test_search_does_not_read_raw_json: same pattern with search\n- test_tags_does_not_read_raw_json: same\n- test_schemas_list_does_not_read_raw_json: same (list mode only; show mode needs raw)\n\n**JSON Schema validation (optional enhancement):**\n- Create docs/robot-schema/v1/success.schema.json and error.schema.json\n- Validate robot output against these schemas in golden tests\n\n## Acceptance Criteria\n- [ ] Golden test verifies all 6 command outputs have correct structure\n- [ ] Index-only tests pass (list/search/tags/schemas-list work without raw.json)\n- [ ] Golden files exist in tests/integration/golden/\n- [ ] JSON Schema files exist in docs/robot-schema/v1/\n\n## Edge Cases\n- **duration_ms non-determinism:** Golden files must NOT include duration_ms in snapshot comparison (it changes every run). Strip or mask it before comparing.\n- **tool_version changes:** Updating Cargo.toml version breaks golden files. Golden comparison should either mask tool_version or tests should update golden files via an env var flag.\n- **Platform-specific key ordering:** serde_json with BTreeMap ensures deterministic ordering. Verify golden files use sorted keys.\n- **Index-only tests must verify raw.json is actually deleted:** Don't just skip loading it — physically remove the file and prove the command works without it.\n\n## Files\n- CREATE: tests/integration/golden_test.rs\n- CREATE: tests/integration/golden/*.json (golden snapshot files)\n- CREATE: docs/robot-schema/v1/success.schema.json\n- CREATE: docs/robot-schema/v1/error.schema.json\n\n## TDD Anchor\nRun all golden and invariant tests.\nVERIFY: `cargo test golden && cargo test does_not_read_raw`\n\n## Dependency Context\nRequires all query commands to be implemented. Uses test helpers from bd-lx6 (Create test fixtures and integration test helpers).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:30:59.080993Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:06:06.247229Z","closed_at":"2026-02-12T20:06:06.247185Z","close_reason":"Golden robot output tests + index-only invariant tests complete with JSON Schema artifacts","compaction_level":0,"original_size":0,"labels":["phase2","testing"],"dependencies":[{"issue_id":"bd-2gp","depends_on_id":"bd-3bl","type":"blocks","created_at":"2026-02-12T16:30:59.083124Z","created_by":"tayloreernisse"},{"issue_id":"bd-2gp","depends_on_id":"bd-acf","type":"blocks","created_at":"2026-02-12T16:30:59.082691Z","created_by":"tayloreernisse"},{"issue_id":"bd-2gp","depends_on_id":"bd-lx6","type":"blocks","created_at":"2026-02-12T16:34:06.420878Z","created_by":"tayloreernisse"},{"issue_id":"bd-2gp","depends_on_id":"bd-p7g","type":"parent-child","created_at":"2026-02-12T16:30:59.082181Z","created_by":"tayloreernisse"},{"issue_id":"bd-2gp","depends_on_id":"bd-x15","type":"blocks","created_at":"2026-02-12T16:30:59.083539Z","created_by":"tayloreernisse"}]} +{"id":"bd-2im","title":"Create OutputMode enum, HumanDisplay trait, and emit() function","description":"## Background\nEvery command handler takes `robot: bool` and manually branches (29+ if-branches across 12 files). `output/human.rs` is 8 lines. All formatting lives in command handlers.\n\nThis bead creates the foundational types that replace the `robot:bool` threading pattern. All subsequent Epic 1 beads depend on these types.\n\n## Approach\n1. Add `OutputMode` enum to `src/output/mod.rs` (after existing `RobotEnvelope` and `RobotError`):\n ```rust\n #[derive(Debug, Clone, Copy)]\n pub enum OutputMode {\n Robot { pretty: bool },\n Human,\n }\n ```\n\n2. Add `HumanDisplay` trait to `src/output/mod.rs`:\n ```rust\n pub trait HumanDisplay {\n fn display_human(&self, w: &mut dyn std::io::Write) -> std::io::Result<()>;\n }\n ```\n Using `dyn Write` enables testing against `Vec`.\n\n3. Add `emit()` function to `src/output/mod.rs`:\n ```rust\n pub fn emit(\n data: &T,\n mode: OutputMode,\n command: &str,\n duration: std::time::Duration,\n ) {\n match mode {\n OutputMode::Robot { pretty: true } => robot::robot_success_pretty(data, command, duration),\n OutputMode::Robot { pretty: false } => robot::robot_success(data, command, duration),\n OutputMode::Human => data.display_human(&mut std::io::stdout()).expect(\"stdout write\"),\n }\n }\n ```\n\n4. Keep existing `robot::robot_success()`, `robot::robot_success_pretty()`, and `robot::robot_error()` unchanged -- they are called internally by `emit()` and also directly by `main.rs` error handling.\n\nCurrent `src/output/mod.rs` structure (47 lines):\n- Lines 1-3: pub mod declarations (human, robot, table)\n- Lines 5-7: use statements\n- Lines 9-17: `RobotEnvelope` struct\n- Lines 19-25: `RobotError` struct\n- Lines 27-47: impl blocks for `RobotEnvelope`\n\nAdd `OutputMode`, `HumanDisplay`, and `emit()` after line 47.\n\n## Acceptance Criteria\n- [ ] `OutputMode` enum with `Robot { pretty: bool }` and `Human` variants exists in `src/output/mod.rs`\n- [ ] `HumanDisplay` trait with `fn display_human(&self, w: &mut dyn Write) -> io::Result<()>` exists\n- [ ] `emit()` dispatches to `robot::robot_success` / `robot_success_pretty` for Robot mode\n- [ ] `emit()` calls `data.display_human(&mut stdout())` for Human mode\n- [ ] `emit()` with `Robot { pretty: false }` produces valid `RobotEnvelope` JSON (ok:true, data, meta with schema_version/tool_version/command/duration_ms)\n- [ ] `emit()` with `Robot { pretty: true }` produces pretty-printed (indented) JSON\n- [ ] `emit()` with `Human` calls `HumanDisplay::display_human()`\n- [ ] `cargo test` passes, `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- MODIFY: `src/output/mod.rs` (add `OutputMode`, `HumanDisplay`, `emit()` after line 47)\n\n## TDD Anchor\nRED: `test_emit_robot_produces_valid_envelope` -- Create a dummy struct that impls both `Serialize` and `HumanDisplay`, call `emit` in robot mode capturing stdout, parse as `RobotEnvelope`, assert `ok == true` and `data` present.\nGREEN: Implement `OutputMode`, `HumanDisplay`, `emit()`\nVERIFY: `cargo test output`\n\nAdditional tests:\n- `test_emit_human_calls_display_human` -- emit with Human mode, verify display_human output written to stdout\n- `test_emit_robot_pretty_is_indented` -- emit with `Robot { pretty: true }`, verify output contains newlines/indentation\n- `test_output_mode_debug` -- verify Debug derive works\n\n## Edge Cases\n- `HumanDisplay` io::Error: `expect()` in emit is acceptable since stdout failure is unrecoverable (same pattern as `println!`)\n- Empty data structs should still produce valid JSON (`{\"ok\":true,\"data\":{},\"meta\":{...}}`)\n- `emit()` takes `&T` not `T` since robot_success/robot_success_pretty take `T: Serialize` -- need to verify the existing functions accept references. Current signatures: `pub fn robot_success(data: T, ...)` -- since `&T: Serialize` when `T: Serialize`, passing `data` directly works. For HumanDisplay, `data.display_human()` works because the trait is implemented on the concrete type and called via reference.\n\n## Dependency Context\nNo dependencies. This is the foundation bead for Epic 1.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:24:20.947999Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:42:11.397561Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2im","depends_on_id":"bd-14o","type":"parent-child","created_at":"2026-02-13T15:24:20.949197Z","created_by":"tayloreernisse"}]} +{"id":"bd-2l2","title":"Epic: Shared Command Pipeline (CommandContext)","description":"Create a CommandContext struct encapsulating common command setup (cache, output mode, timing, network policy, config path). Eliminates ~15-20 lines of identical scaffolding per command handler. Enables future cross-cutting concerns (telemetry, logging) without touching individual commands.\n\n## Child Beads (execution order)\n1. **bd-1iz** -- Create CommandContext struct and convert tags as proof of concept\n2. **bd-2z1** -- Convert all remaining 11 commands to use &CommandContext\n\n## Prerequisites\nDepends on Epic 1 (bd-14o, all commands accept OutputMode) and Epic 3 (bd-1ti, bd-39e AsyncCache exists). CommandContext wraps both OutputMode and AsyncCache.","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-13T15:23:46.949270Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:45:43.693159Z","compaction_level":0,"original_size":0} {"id":"bd-2mr","title":"Add supply chain hardening and robot JSON Schema artifacts","description":"## Background\nSupply chain hardening: release artifacts include SHA256SUMS + minisign signatures. Robot output JSON Schemas published as build artifacts for agent validation.\n\n## Approach\n\n### Supply Chain Artifacts\n1. Update release job in .gitlab-ci.yml to generate SHA256SUMS and sign with minisign\n2. Upload SHA256SUMS + SHA256SUMS.minisig alongside binaries to GitLab Package Registry\n3. Update install.sh to verify signature when minisign is available\n\n### JSON Schema Files\n\nCreate `docs/robot-schema/v1/success.schema.json`:\n```json\n{\n \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n \"type\": \"object\",\n \"required\": [\"ok\", \"data\", \"meta\"],\n \"properties\": {\n \"ok\": { \"type\": \"boolean\", \"const\": true },\n \"data\": { \"type\": \"object\" },\n \"meta\": {\n \"type\": \"object\",\n \"required\": [\"schema_version\", \"tool_version\", \"command\", \"duration_ms\"],\n \"properties\": {\n \"schema_version\": { \"type\": \"string\", \"pattern\": \"^\\\\d+\\\\.\\\\d+$\" },\n \"tool_version\": { \"type\": \"string\" },\n \"command\": { \"type\": \"string\" },\n \"command_version\": { \"type\": \"string\", \"description\": \"Per-command payload version for independent evolution\" },\n \"duration_ms\": { \"type\": \"integer\", \"minimum\": 0 }\n }\n }\n },\n \"additionalProperties\": false\n}\n```\n\nCreate `docs/robot-schema/v1/error.schema.json`:\n```json\n{\n \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n \"type\": \"object\",\n \"required\": [\"ok\", \"error\", \"meta\"],\n \"properties\": {\n \"ok\": { \"type\": \"boolean\", \"const\": false },\n \"error\": {\n \"type\": \"object\",\n \"required\": [\"code\", \"message\"],\n \"properties\": {\n \"code\": { \"type\": \"string\" },\n \"message\": { \"type\": \"string\" },\n \"suggestion\": { \"type\": \"string\" }\n }\n },\n \"meta\": {\n \"type\": \"object\",\n \"properties\": {\n \"schema_version\": { \"type\": \"string\" },\n \"tool_version\": { \"type\": \"string\" },\n \"command\": { \"type\": \"string\" },\n \"duration_ms\": { \"type\": \"integer\", \"minimum\": 0 }\n }\n }\n },\n \"additionalProperties\": false\n}\n```\n\n### Compatibility Policy\n- **No version bump:** Adding new optional fields to data or meta (additive changes)\n- **MUST bump schema_version:** Removing fields, renaming fields, changing field types, changing required status\n- **meta.command_version:** Each command can independently evolve its data payload structure. When a command's data shape changes in a breaking way, bump command_version without bumping the global schema_version. This allows agents to pin to specific command output shapes.\n\n## Acceptance Criteria\n- [ ] Release pipeline generates SHA256SUMS from all binary artifacts\n- [ ] minisign signature generated for SHA256SUMS (when key available in CI)\n- [ ] SHA256SUMS and SHA256SUMS.minisig uploaded alongside binaries\n- [ ] install.sh attempts signature verification when minisign is on PATH\n- [ ] docs/robot-schema/v1/success.schema.json matches the structure above\n- [ ] docs/robot-schema/v1/error.schema.json matches the structure above\n- [ ] success.schema.json meta requires: schema_version, tool_version, command, duration_ms\n- [ ] success.schema.json meta includes optional command_version field\n- [ ] error.schema.json error requires: code, message (suggestion is optional)\n- [ ] Both schemas have additionalProperties: false at top level\n- [ ] Schema files are valid JSON Schema (validate with a JSON Schema validator)\n\n## Files\n- MODIFY: .gitlab-ci.yml (add checksum + signing to release)\n- MODIFY: install.sh (add signature verification)\n- CREATE: docs/robot-schema/v1/success.schema.json\n- CREATE: docs/robot-schema/v1/error.schema.json\n\n## Dependency Context\nExtends CI pipeline from bd-1lj and install script from bd-gvr.\n\n## Edge Cases\n- **Schema evolution testing:** When robot output changes, both the code AND the JSON Schema must be updated together. Tests should validate all command outputs against the schemas.\n- **Minisign not available in CI:** If minisign is not installed, the release should still succeed but skip signing with a clear warning (don't fail the release).\n- **JSON Schema draft version:** Use 2020-12 draft. Older validators may not support it — document the minimum validator version.\n- **additionalProperties:false on nested objects:** The top-level schemas have it, but nested objects (data, meta) may need it too if strict validation is desired. Decision: only enforce at top level for now.","status":"open","priority":4,"issue_type":"task","created_at":"2026-02-12T16:31:32.482765Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:56:21.036013Z","compaction_level":0,"original_size":0,"labels":["ci","phase2"],"dependencies":[{"issue_id":"bd-2mr","depends_on_id":"bd-1lj","type":"blocks","created_at":"2026-02-12T16:34:06.478112Z","created_by":"tayloreernisse"},{"issue_id":"bd-2mr","depends_on_id":"bd-1lo","type":"parent-child","created_at":"2026-02-12T16:31:32.485330Z","created_by":"tayloreernisse"},{"issue_id":"bd-2mr","depends_on_id":"bd-gvr","type":"blocks","created_at":"2026-02-12T16:34:06.528568Z","created_by":"tayloreernisse"}]} {"id":"bd-2pl","title":"Epic: Alias Management","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:22.527514Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:33:53.108811Z","closed_at":"2026-02-12T20:33:53.108763Z","close_reason":"All 1 child bead closed: aliases command","compaction_level":0,"original_size":0,"labels":["epic"]} {"id":"bd-2s6","title":"Implement doctor command with integrity validation and --fix repair","description":"## Background\nThe doctor command validates installation health and cache integrity. It checks config/cache directories exist, validates each alias's cache files (meta, index, raw), detects integrity issues (generation/hash mismatches, stale index versions, missing files), warns on insecure config permissions, and validates all index pointers against raw.json. Optional --fix repairs recoverable issues.\n\n## Approach\nImplement src/cli/doctor.rs with DoctorArgs and execute():\n\n**Checks performed:**\n1. Config directory exists and is readable/writable\n2. Cache directory exists and is readable/writable\n3. For each alias: check meta.json exists, validate generation/index_hash/index_version against index.json, validate raw_hash against raw.json, validate all operation_ptr/schema_ptr resolve in raw.json\n4. Detect stale caches (>30 days, configurable via config)\n5. Check config.toml permissions -- warn if group/world readable when auth tokens present\n6. Report disk usage (per-alias and total)\n\n**--fix repair modes (per alias, after acquiring lock):**\n1. If raw exists but index missing/invalid or index_version mismatched -> rebuild index from raw\n2. If raw + index valid but meta missing -> reconstruct meta from raw + index\n3. If raw unreadable/unparseable -> delete alias (last resort)\n\n**Health status:** HEALTHY (no issues), WARNING (stale caches, permission issues), DEGRADED (some aliases have integrity issues but are fixable), UNHEALTHY (unfixable corruption).\n\n## Acceptance Criteria\n- [ ] doctor reports HEALTHY for a valid cache\n- [ ] doctor detects missing meta.json as integrity issue\n- [ ] doctor detects generation mismatch between meta and index\n- [ ] doctor detects invalid operation_ptr (pointer doesn't resolve)\n- [ ] doctor warns on stale caches (>30 days)\n- [ ] doctor warns on insecure config permissions\n- [ ] --fix rebuilds index from raw when index is invalid\n- [ ] --fix reconstructs meta when meta is missing but raw+index exist\n- [ ] --fix deletes alias only when raw is unreadable\n- [ ] Robot output: health status, per-alias status, warnings[], disk_usage\n\n## Edge Cases\n- **Concurrent doctor + fetch:** Doctor reads while fetch writes. Doctor should acquire read lock or tolerate mid-write state gracefully (report as integrity issue, not crash).\n- **Very large cache (hundreds of aliases):** Doctor must not OOM — process aliases one at a time, not load all into memory.\n- **Permission denied on cache directory:** Report as WARNING, not crash. Doctor should be resilient to partial access.\n- **Empty alias directory (no files):** Skip with warning, don't crash. This can happen from interrupted deletes.\n- **--fix on locked alias:** If another process holds the lock, skip that alias with warning (don't block).\n\n## Files\n- MODIFY: src/cli/doctor.rs (DoctorArgs, execute, check_alias, fix_alias, permission_check)\n- MODIFY: src/output/robot.rs (add output_doctor)\n- MODIFY: src/output/human.rs (add output_doctor)\n\n## TDD Anchor\nRED: Write `test_doctor_detects_missing_meta` -- create cache with raw+index but no meta, run doctor --robot, assert alias status is \"integrity_error\".\nGREEN: Implement per-alias integrity checking.\nVERIFY: `cargo test test_doctor_detects_missing_meta`\n\n## Dependency Context\nUses CacheManager (load_index, load_raw, list_aliases) from bd-3ea (cache read path). Uses SpecIndex and CacheMetadata types from bd-ilo (error types and core data models). Uses build_index from bd-189 (indexer) for --fix index rebuild.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:29:50.084259Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:25:35.260958Z","closed_at":"2026-02-12T19:25:35.260922Z","close_reason":"Implemented in Wave 4 commit","compaction_level":0,"original_size":0,"labels":["health","phase2"],"dependencies":[{"issue_id":"bd-2s6","depends_on_id":"bd-189","type":"blocks","created_at":"2026-02-12T16:29:50.088686Z","created_by":"tayloreernisse"},{"issue_id":"bd-2s6","depends_on_id":"bd-1y0","type":"parent-child","created_at":"2026-02-12T16:29:50.087511Z","created_by":"tayloreernisse"},{"issue_id":"bd-2s6","depends_on_id":"bd-3d2","type":"blocks","created_at":"2026-02-12T16:29:50.089158Z","created_by":"tayloreernisse"},{"issue_id":"bd-2s6","depends_on_id":"bd-3ea","type":"blocks","created_at":"2026-02-12T16:29:50.088266Z","created_by":"tayloreernisse"}]} +{"id":"bd-2vu","title":"Add property tests for indexer and format detection","description":"## Background\nThe indexer (src/core/indexer.rs, 659 lines) handles format detection, JSON/YAML normalization, and index building. Current tests are example-based. Property tests can catch edge cases in format sniffing and ensure build_index invariants hold for arbitrary valid OpenAPI structures.\n\n## Approach\n1. Create tests/property_indexer_test.rs\n2. Build proptest strategies:\n - arb_openapi_spec(): generates valid-ish OpenAPI 3.x JSON with random paths, methods, summaries, schemas\n - arb_bytes_with_format(): generates bytes that are either valid JSON or YAML, paired with expected Format\n - arb_content_type(): random valid Content-Type strings (application/json, application/yaml, text/yaml, etc.)\n - arb_filename(): random filenames with .json/.yaml/.yml extensions\n\n3. Implement 6 property tests:\n\n a) format_detection_idempotent: detect_format(bytes, hint, ct) called twice with same args = same Format.\n \n b) json_sniffing_correct: For any bytes where first non-whitespace is '{' or '[', detect_format(bytes, None, None) returns Format::Json.\n \n c) content_type_overrides_sniffing: When content_type_hint is Some(\"application/json\"), result is always Format::Json regardless of bytes content.\n \n d) normalize_json_roundtrip: For valid JSON bytes, normalize_to_json(bytes, Format::Json) returns (original_bytes_semantically, parsed_value) where serde_json::to_value(parsed) == original parsed value.\n \n e) build_index_endpoint_count: For a generated OpenAPI spec with N paths and M methods per path, build_index produces sum(methods_per_path) endpoints.\n \n f) content_hash_deterministic: compute_hash(bytes) called twice = identical result. (This tests the indexer pipeline hash, distinct from the existing property_test.rs hash tests which test raw SHA-256.)\n\n## Acceptance Criteria\n- tests/property_indexer_test.rs exists with 6 proptest tests\n- All 6 tests pass with default case count (256)\n- No new dependencies\n- cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- CREATE: tests/property_indexer_test.rs\n\n## TDD Anchor\nThese ARE the tests. Write them and verify they pass.\nRED: If format_detection or build_index behaves unexpectedly, fix in src/core/indexer.rs\nVERIFY: cargo test property_indexer\n\n## Edge Cases\n- YAML bytes that start with '{' (valid YAML but looks like JSON to sniffer) -- detect_format returns Json, which is correct since valid JSON is valid YAML\n- Empty bytes [] -- detect_format should return Yaml (no '{' or '[' found), normalize_to_json may error\n- OpenAPI spec with no paths -- build_index should produce 0 endpoints\n- Spec with duplicate path+method combinations -- build_index should handle gracefully\n\n## Dependency Context\nNo dependencies. Fully independent. Tests existing code without modifications.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:28:45.040864Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:28:45.041636Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2vu","depends_on_id":"bd-3va","type":"parent-child","created_at":"2026-02-13T15:28:45.041622Z","created_by":"tayloreernisse"}]} +{"id":"bd-2wp","title":"Convert show, schemas, search, diff commands to OutputMode","description":"## Background\nAfter the tags proof-of-concept validates the pattern, convert the 4 simple query commands. Each follows the same pattern: load index, compute result, branch on robot bool for output.\n\n## Approach\nFor each of show.rs, schemas.rs, search.rs, diff.rs:\n1. Change execute() signature: replace `robot: bool` with `mode: OutputMode`\n2. Impl `HumanDisplay` for the command's output struct\n3. Move human formatting from the `if !robot` block into `display_human()`\n4. Replace `if robot { robot_success(...) } else { ... }` with `emit(&output, mode, cmd, duration)`\n5. Update main.rs dispatch to pass `mode` instead of `bool` for these commands\n\n### Per-command details:\n\n**show.rs** (execute at line 50: `pub async fn execute(args: &Args, robot: bool)`)\n- Output struct: `ShowOutput` (line 36) with fields: `path: String`, `method: String`, `summary: Option`, `description: Option`, `operation_id: Option`, `tags: Vec`, `deprecated: bool`, `parameters: Vec`, `request_body: Option`, `responses: Vec`, `security: Vec`\n- Human output in `print_human()` (lines 198-254): Prints sections (SUMMARY, PARAMETERS, etc.) line-by-line\n- Impl `HumanDisplay for ShowOutput` moving `print_human()` logic into `display_human()`, using `write!(w, ...)` instead of `println!`\n\n**schemas.rs** (execute at line 88: `pub async fn execute(args: &Args, robot: bool)`)\n- Two sub-modes: list and show (delegated via `execute_list()` and `execute_show()`)\n- List output: `SchemasListOutput` (line 42) with `schemas: Vec`, `total: usize`\n- Show output: `SchemaShowOutput` (line 53) with `name: String`, `schema: serde_json::Value`\n- Impl `HumanDisplay for SchemasListOutput` -- table via `render_table_or_empty`\n- Impl `HumanDisplay for SchemaShowOutput` -- pretty-print JSON schema\n- Both sub-functions need `mode: OutputMode` propagated from execute()\n\n**search.rs** (execute at line 174: `pub async fn execute(args: &Args, robot_mode: bool)`)\n- Has two modes: single-alias and all-aliases (line 174: `if args.all_aliases`)\n- Single output: `RobotOutput` (line 50) with `results: Vec`, `total: usize`\n- All-aliases output: `AllAliasesRobotOutput` (line 56) with `results: Vec`, `total: usize`, `aliases_searched: usize`, `warnings: Vec`\n- NOTE: Rename `RobotOutput` -> `SearchOutput` and `AllAliasesRobotOutput` -> `AllAliasesSearchOutput` since they are no longer robot-specific\n- Impl `HumanDisplay for SearchOutput` -- table with rank/score/type/name/method/summary columns\n- Impl `HumanDisplay for AllAliasesSearchOutput` -- table with alias column added\n- `execute_all_aliases()` (around line 250) also needs `mode: OutputMode`\n\n**diff.rs** (execute at line 61: `pub async fn execute(args: &Args, robot_mode: bool)`)\n- Output struct: `DiffOutput` (line 37) with `left: String`, `right: String`, `changes: DiffResult`\n- DiffResult contains: `summary: ChangeSummary`, `details: Option`\n- Human output prints change counts and optional details table\n- Impl `HumanDisplay for DiffOutput`\n\n## Acceptance Criteria\n- [ ] All 4 commands accept `OutputMode` instead of `bool`\n- [ ] `HumanDisplay` implemented for: `ShowOutput`, `SchemasListOutput`, `SchemaShowOutput`, `SearchOutput` (renamed from `RobotOutput`), `AllAliasesSearchOutput` (renamed from `AllAliasesRobotOutput`), `DiffOutput`\n- [ ] Zero `if robot` / `if !robot` branches in show.rs, schemas.rs, search.rs, diff.rs\n- [ ] search.rs structs renamed: `RobotOutput` -> `SearchOutput`, `AllAliasesRobotOutput` -> `AllAliasesSearchOutput`\n- [ ] All integration tests pass unchanged\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- MODIFY: src/cli/show.rs (accept OutputMode, impl HumanDisplay for ShowOutput)\n- MODIFY: src/cli/schemas.rs (accept OutputMode, impl HumanDisplay for SchemasListOutput + SchemaShowOutput)\n- MODIFY: src/cli/search.rs (accept OutputMode, rename structs, impl HumanDisplay for SearchOutput + AllAliasesSearchOutput)\n- MODIFY: src/cli/diff.rs (accept OutputMode, impl HumanDisplay for DiffOutput)\n- MODIFY: src/main.rs (pass mode to these 4 commands at lines 124-127, 135)\n\n## TDD Anchor\nRED: `test_show_human_display` -- create `ShowOutput` with known data, call `display_human(&mut Vec)`, assert contains path, method, summary section\nGREEN: Impl `HumanDisplay for ShowOutput`\nVERIFY: `cargo test show`\n\nAdditional tests per command:\n- `test_schemas_list_human_display` -- assert table contains schema names\n- `test_schema_show_human_display` -- assert contains pretty-printed JSON\n- `test_search_human_display` -- assert table with rank/score columns\n- `test_search_all_aliases_human_display` -- assert alias column present\n- `test_diff_human_display` -- assert change summary line\n\n## Edge Cases\n- schemas.rs two code paths (list vs show) need SEPARATE `HumanDisplay` impls, not one enum -- each sub-function calls emit() independently\n- search.rs `RobotResult` and `AliasRobotResult` structs should also be renamed (`SearchResultEntry`, `AliasSearchResultEntry`) for consistency, but these are internal -- agent discretion\n- show.rs `print_human()` function (line 198) should be removed after its logic moves to `HumanDisplay` impl\n- diff.rs uses `ChangeSummary` and `ChangeDetails` from `crate::cli::sync_cmd` -- these types move to `crate::cli::sync::types` in Epic 2, but that happens AFTER this bead\n\n## Dependency Context\nUses `OutputMode`, `HumanDisplay`, `emit()` from bd-2im. Pattern validated by bd-bq8 (tags PoC).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:24:57.392417Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:42:46.502898Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2wp","depends_on_id":"bd-14o","type":"parent-child","created_at":"2026-02-13T15:24:57.393590Z","created_by":"tayloreernisse"},{"issue_id":"bd-2wp","depends_on_id":"bd-bq8","type":"blocks","created_at":"2026-02-13T15:31:09.145207Z","created_by":"tayloreernisse"}]} +{"id":"bd-2x6","title":"Clean up robot:bool remnants and delete pre_scan duplication","description":"## Background\nAfter all 10 command handlers are converted from robot:bool to OutputMode+HumanDisplay+emit(), there will be remnants to clean up: duplicated robot-mode resolution logic, dead code, and an output/human.rs that is still only 8 lines despite HumanDisplay impls now existing across the codebase.\n\n## Approach\n\n### 1. Consolidate robot-mode resolution (src/main.rs)\n\n**Current state**: Two functions duplicate the same logic:\n- `pre_scan_robot()` (lines 15-36): Runs before clap parse for error formatting. Reads raw args + env var + TTY.\n- `resolve_robot_mode()` (lines 43-58): Runs after clap parse. Reads Cli fields + env var + TTY.\n\n**Target**: Rename `resolve_robot_mode` to `resolve_output_mode` returning `OutputMode`. Keep `pre_scan_robot()` but make it call a shared inner function:\n\n```rust\nfn robot_from_env_and_tty() -> bool {\n if std::env::var(\"SWAGGER_CLI_ROBOT\").is_ok_and(|v| v == \"1\" || v.eq_ignore_ascii_case(\"true\")) {\n return true;\n }\n !std::io::stdout().is_terminal()\n}\n\nfn pre_scan_robot() -> bool {\n let args: Vec = std::env::args().collect();\n if args.iter().any(|a| a == \"--no-robot\") { return false; }\n if args.iter().any(|a| a == \"--robot\" || a == \"--json\") { return true; }\n robot_from_env_and_tty()\n}\n\nfn resolve_output_mode(cli: &Cli) -> OutputMode {\n if cli.no_robot { return OutputMode::Human; }\n if cli.robot { return OutputMode::Robot { pretty: cli.pretty }; }\n if robot_from_env_and_tty() {\n OutputMode::Robot { pretty: cli.pretty }\n } else {\n OutputMode::Human\n }\n}\n```\n\n### 2. Move shared formatting helpers to output/human.rs\n\nAfter all HumanDisplay impls are written, identify formatting functions used by 3+ impls and move them to `output/human.rs`. Based on existing patterns in the codebase, these are:\n\n- `format_duration(duration: Duration) -> String` -- \"123ms\" or \"1.2s\" pattern used by tags, list, show, search, diff, sync. Currently each command formats inline.\n- `format_header(title: &str, version: &str, count: usize, label: &str) -> String` -- \"PetStore v3.0 -- 42 endpoints\" pattern used by tags, list, show, schemas. Currently duplicated.\n- Re-export `print_error()` (already there)\n\nDo NOT move command-specific formatting (e.g., show's section headers, doctor's check results) -- those stay in HumanDisplay impls.\n\n### 3. Dead code removal (cargo clippy -D warnings)\n\nRun `cargo clippy --all-targets -- -D warnings` and fix:\n- Remove unused `robot: bool` parameters from any internal helper functions\n- Remove any `#[allow(dead_code)]` added during transition\n- Remove old `output_no_changes()` and `output_changes()` if not already deleted in bd-2x7\n- Clean up unused imports in main.rs and command files\n\n### 4. Verify output/human.rs grew beyond 8 lines\n\nFinal output/human.rs should contain: `print_error()`, `format_duration()`, `format_header()`, and any other shared helpers. Expected ~30-50 lines.\n\n## Acceptance Criteria\n- [ ] `pre_scan_robot()` and `resolve_output_mode()` share a `robot_from_env_and_tty()` helper -- zero duplicated env/TTY logic\n- [ ] `resolve_output_mode()` returns `OutputMode` (not bool)\n- [ ] `output/human.rs` contains `print_error`, `format_duration`, `format_header` (3+ functions, 30+ lines)\n- [ ] Zero `dead_code` warnings from `cargo clippy --all-targets -- -D warnings`\n- [ ] Zero unused imports in main.rs\n- [ ] All tests pass (`cargo test`)\n- [ ] `cargo fmt --check` passes\n\n## Files\n- MODIFY: src/main.rs (consolidate resolution, add `robot_from_env_and_tty()`, rename `resolve_robot_mode` -> `resolve_output_mode`)\n- MODIFY: src/output/human.rs (add `format_duration()`, `format_header()`)\n- MODIFY: src/output/mod.rs (re-export shared helpers if needed)\n- POSSIBLY MODIFY: Any command files with remaining dead code\n\n## TDD Anchor\nRED: `test_format_duration_ms` in output/human.rs tests -- `format_duration(Duration::from_millis(123))` returns `\"123ms\"`\nGREEN: Implement `format_duration`\nVERIFY: `cargo test output::human && cargo clippy --all-targets -- -D warnings`\n\nAdditional tests:\n- `test_format_duration_seconds` -- `format_duration(Duration::from_millis(1234))` returns `\"1.2s\"`\n- `test_format_header` -- `format_header(\"PetStore\", \"3.0\", 42, \"endpoints\")` returns `\"PetStore 3.0 -- 42 endpoints\"`\n- `test_robot_from_env_and_tty` -- unit test for shared helper\n\n## Edge Cases\n- `pre_scan_robot()` MUST remain as a separate function -- it runs before clap parse, so it cannot use `Cli` fields. The shared helper only covers env+TTY; arg scanning stays in `pre_scan_robot`.\n- `format_duration` must handle Duration::ZERO gracefully (return \"0ms\")\n- If no shared helpers emerge from the conversion (unlikely given the patterns), just add `format_duration` -- it is used by every command.\n\n## Dependency Context\nDepends on ALL command conversion beads being complete: bd-bq8 (tags), bd-2wp (query commands), bd-29k (management commands), bd-1nm (list), bd-71g (fetch), bd-2x7 (sync). After all are done, the cleanup can identify actual shared patterns.","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-13T15:26:11.170276Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:41:48.353749Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2x6","depends_on_id":"bd-14o","type":"parent-child","created_at":"2026-02-13T15:26:11.171948Z","created_by":"tayloreernisse"},{"issue_id":"bd-2x6","depends_on_id":"bd-2x7","type":"blocks","created_at":"2026-02-13T15:31:09.401575Z","created_by":"tayloreernisse"}]} +{"id":"bd-2x7","title":"Convert sync command to OutputMode","description":"## Background\nsync_cmd.rs (1843 lines) is the most complex command with both single-alias and batch (--all) modes. It has extensive robot/human branching in `output_no_changes()`, `output_changes()`, and `sync_all_inner()`. This is the final output sink conversion before the sync split epic.\n\nCurrent execute signature (line 1073):\n```rust\npub async fn execute(\n args: &Args,\n robot: bool,\n network_flag: &str,\n config_override: Option<&std::path::Path>,\n) -> Result<(), SwaggerCliError>\n```\n\n## Approach\n\n### 1. Change execute() to accept OutputMode\nReplace `robot: bool` with `mode: OutputMode`. Propagate `mode` to:\n- `sync_inner()` (~line 557)\n- `sync_one_alias_inner()` (~line 396)\n- `sync_all_inner()` (~line 786)\n- `output_no_changes()` and `output_changes()` helper functions\n\n### 2. Impl HumanDisplay for SyncOutput (single-alias result)\n`SyncOutput` struct (line 118):\n```rust\npub struct SyncOutput {\n pub alias: String,\n pub changed: bool,\n pub reason: Option, // \"no changes\" or \"updated\"\n pub local_version: Option,\n pub remote_version: Option,\n pub changes: Option,\n pub details: Option,\n pub dry_run: bool,\n}\n```\nMove logic from `output_no_changes()` and `output_changes()` into `HumanDisplay`:\n- No changes: \"'{alias}' is up to date (v{version})\"\n- Changes: \"'{alias}' updated: {added} added, {removed} removed, {modified} modified\" with optional details table\n- Dry-run: prefix with \"[dry-run] \"\n\n### 3. Impl HumanDisplay for SyncAllOutput (batch result)\n`SyncAllOutput` struct (line 152):\n```rust\npub struct SyncAllOutput {\n pub total: usize,\n pub succeeded: usize,\n pub failed: usize,\n pub skipped: usize,\n pub aborted: bool,\n pub results: Vec,\n pub dry_run: bool,\n}\n```\n`AliasSyncResult` struct (line 132):\n```rust\npub struct AliasSyncResult {\n pub alias: String,\n pub status: String, // \"ok\", \"failed\", \"skipped\"\n pub changed: bool,\n pub reason: Option,\n pub error: Option,\n pub local_version: Option,\n pub remote_version: Option,\n pub changes: Option,\n pub duration_ms: u64,\n}\n```\nMove batch output logic from `sync_all_inner()` (lines 1023-1059) into `HumanDisplay`:\n- Per-alias status line: \"[ok] alias (v1 -> v2, 3 changes) 123ms\" or \"[FAIL] alias: error message\"\n- Summary: \"Sync complete: X ok, Y failed, Z skipped\"\n- Aborted: \"(aborted early: failure budget exceeded)\"\n\n### 4. Delete output helpers\nAfter HumanDisplay impls, delete `output_no_changes()` and `output_changes()` functions.\n\n### 5. Replace branches with emit()\nReplace all `if robot { robot_success(...) } else { output_*(...) }` with `emit(&output, mode, \"sync\", duration)`.\n\n## Acceptance Criteria\n- [ ] sync `execute()` accepts `OutputMode` instead of `bool`\n- [ ] `HumanDisplay for SyncOutput` produces identical output to current `output_no_changes`/`output_changes`\n- [ ] `HumanDisplay for SyncAllOutput` produces identical per-alias status lines and summary\n- [ ] `output_no_changes()` and `output_changes()` helper functions deleted\n- [ ] Zero `if robot` / `if !robot` branches in sync_cmd.rs\n- [ ] Dry-run prefix \"[dry-run]\" appears in human output for both single and batch\n- [ ] Integration tests pass (sync single, sync --all)\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- MODIFY: src/cli/sync_cmd.rs (accept OutputMode, impl HumanDisplay, delete helpers, use emit())\n- MODIFY: src/main.rs (pass mode to sync at line 130-132)\n\n## TDD Anchor\nRED: `test_sync_output_no_changes_human_display` -- create `SyncOutput { changed: false, alias: \"petstore\", .. }`, call `display_human`, assert contains \"up to date\"\nGREEN: Impl `HumanDisplay for SyncOutput`\nVERIFY: `cargo test sync`\n\nAdditional tests:\n- `test_sync_output_changes_human_display` -- assert contains \"updated\" and change counts\n- `test_sync_output_dry_run_prefix` -- assert starts with \"[dry-run]\"\n- `test_sync_all_output_human_display` -- assert per-alias lines and \"Sync complete\" summary\n- `test_sync_all_aborted_display` -- assert \"(aborted early\" footer\n\n## Edge Cases\n- `SyncAllOutput` human display must handle interleaved [ok]/[FAIL]/[skip] statuses per alias\n- Aborted sync must print \"(aborted early: failure budget exceeded)\" footer\n- Dry-run prefix \"[dry-run]\" must appear in BOTH single and batch human output\n- Per-alias version display uses \"?\" when version is `None`\n- `ChangeSummary` fields: `endpoints_added`, `endpoints_removed`, `endpoints_modified`, `schemas_added`, `schemas_removed` -- all used in human display\n\n## Dependency Context\nUses `OutputMode`, `HumanDisplay`, `emit()` from bd-2im (output foundation). This is the last command conversion and unblocks bd-2x6 (cleanup) and the sync split epic (bd-10e, starting with bd-1re).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:25:55.047595Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:43:45.294395Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2x7","depends_on_id":"bd-14o","type":"parent-child","created_at":"2026-02-13T15:25:55.049048Z","created_by":"tayloreernisse"},{"issue_id":"bd-2x7","depends_on_id":"bd-1nm","type":"blocks","created_at":"2026-02-13T15:31:09.331797Z","created_by":"tayloreernisse"},{"issue_id":"bd-2x7","depends_on_id":"bd-29k","type":"blocks","created_at":"2026-02-13T15:31:09.300374Z","created_by":"tayloreernisse"},{"issue_id":"bd-2x7","depends_on_id":"bd-2wp","type":"blocks","created_at":"2026-02-13T15:31:09.267231Z","created_by":"tayloreernisse"},{"issue_id":"bd-2x7","depends_on_id":"bd-71g","type":"blocks","created_at":"2026-02-13T15:31:09.372386Z","created_by":"tayloreernisse"}]} +{"id":"bd-2z1","title":"Convert all commands to use CommandContext","description":"## Background\nAfter CommandContext exists and tags is converted as proof of concept (bd-1iz), convert all remaining commands to accept `&CommandContext` instead of individual `mode`/`cache`/`timing` parameters. This eliminates ~15-20 lines of identical scaffolding per command.\n\n## Approach\nFor each command handler:\n1. Change `execute()` signature to accept `&CommandContext`\n2. Replace `AsyncCache::new(cache_dir())` with `ctx.cache.clone()` (AsyncCache derives Clone)\n3. Replace `Instant::now()` / timing with `ctx.elapsed()` for duration\n4. Replace `mode` parameter with `ctx.mode`\n5. For commands needing network_policy: use `ctx.network_policy`\n6. For commands needing config_path: use `ctx.config_path.as_deref()`\n\n### Per-command current signatures and changes:\n\n**Simple query commands:**\n| Command | Current Signature | Changes |\n|---------|------------------|---------|\n| `show.rs:50` | `execute(args: &Args, robot: bool)` | `execute(args: &Args, ctx: &CommandContext)` -- use `ctx.cache`, `ctx.mode`, `ctx.elapsed()` |\n| `schemas.rs:88` | `execute(args: &Args, robot: bool)` | Same pattern. Also update `execute_list` and `execute_show` to receive cache/mode from ctx. |\n| `search.rs:174` | `execute(args: &Args, robot_mode: bool)` | Same. `execute_all_aliases` uses `ctx.cache.list_aliases()` instead of creating its own. |\n| `diff.rs:61` | `execute(args: &Args, robot_mode: bool)` | Same. Two load_index calls use `ctx.cache`. |\n| `tags.rs:58` | Already converted in bd-1iz | No change needed. |\n\n**List command:**\n| Command | Current Signature | Changes |\n|---------|------------------|---------|\n| `list.rs:135` | `execute(args: &Args, robot_mode: bool)` | `execute(args: &Args, ctx: &CommandContext)`. All-aliases variant at line 307 uses `ctx.cache.list_aliases()`. |\n\n**Management commands:**\n| Command | Current Signature | Changes |\n|---------|------------------|---------|\n| `aliases.rs:123` | `execute(args: &Args, robot: bool)` | Same pattern. Sub-functions (`cmd_list`, etc.) receive `&ctx.cache` and `ctx.mode`. |\n| `doctor.rs:344` | `execute(args: &Args, robot_mode: bool)` | Same. Uses `ctx.cache` for health checks. |\n| `cache_cmd.rs:151` | `execute(args: &Args, robot: bool)` | Same. 4 sub-functions each previously created their own CacheManager -- all use `ctx.cache`. |\n\n**Network-aware commands:**\n| Command | Current Signature | Changes |\n|---------|------------------|---------|\n| `fetch.rs:390` | `execute(args: &Args, robot_mode: bool, network_flag: &str, config_override: Option<&Path>)` | `execute(args: &Args, ctx: &CommandContext)`. Uses `ctx.network_policy` (already resolved), `ctx.config_path.as_deref()`, `ctx.cache`. Removes `resolve_policy(network_flag)?` call. |\n| `sync_cmd.rs:1073` | `execute(args: &Args, robot: bool, network_flag: &str, config_override: Option<&Path>)` | Same as fetch. |\n\n**Sync command (robot_docs):**\n| Command | Current Signature | Changes |\n|---------|------------------|---------|\n| `robot_docs.rs:484` | `pub fn execute(args: &Args, pretty: bool)` | `pub fn execute(args: &Args, ctx: &CommandContext)`. Uses `matches!(ctx.mode, OutputMode::Robot { pretty: true })`. Note: this is the only sync (non-async) command. `ctx.cache` is unused. |\n\n### main.rs dispatch update:\n```rust\nlet ctx = CommandContext::new(mode, cli.network.as_str(), cli.config.as_deref())?;\nlet result = match &cli.command {\n Commands::Fetch(args) => fetch::execute(args, &ctx).await,\n Commands::List(args) => list::execute(args, &ctx).await,\n Commands::Show(args) => show::execute(args, &ctx).await,\n Commands::Search(args) => search::execute(args, &ctx).await,\n Commands::Schemas(args) => schemas::execute(args, &ctx).await,\n Commands::Tags(args) => tags::execute(args, &ctx).await,\n Commands::Aliases(args) => aliases::execute(args, &ctx).await,\n Commands::Sync(args) => sync_cmd::execute(args, &ctx).await,\n Commands::Doctor(args) => doctor::execute(args, &ctx).await,\n Commands::Cache(args) => cache_cmd::execute(args, &ctx).await,\n Commands::Diff(args) => diff::execute(args, &ctx).await,\n Commands::RobotDocs(args) => robot_docs::execute(args, &ctx),\n};\n```\n\nRemove `network_flag`, `config_override`, and local `robot`/`pretty` variables from main.\n\n## Acceptance Criteria\n- [ ] All 12 commands accept `&CommandContext` (not individual parameters)\n- [ ] Zero `AsyncCache::new(cache_dir())` in command handlers (all use `ctx.cache`)\n- [ ] Zero `Instant::now()` in command handlers (all use `ctx.elapsed()`)\n- [ ] main.rs creates ONE `CommandContext` and passes `&ctx` to all commands\n- [ ] fetch and sync use `ctx.network_policy` and `ctx.config_path` (no more `resolve_policy()` calls in commands)\n- [ ] `robot_docs.rs` receives `&CommandContext` but only uses `ctx.mode`\n- [ ] All integration tests pass\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- MODIFY: src/main.rs (single CommandContext, pass to all 12 commands)\n- MODIFY: src/cli/show.rs, schemas.rs, search.rs, diff.rs (query commands)\n- MODIFY: src/cli/list.rs\n- MODIFY: src/cli/aliases.rs, doctor.rs, cache_cmd.rs\n- MODIFY: src/cli/fetch.rs (remove network_flag/config_override params)\n- MODIFY: src/cli/sync_cmd.rs or src/cli/sync/*.rs (remove network_flag/config_override params)\n- MODIFY: src/cli/robot_docs.rs (accept &CommandContext, extract mode)\n\n## TDD Anchor\nNo new tests needed -- this is a mechanical signature change. Existing tests verify behavior.\nVERIFY: `cargo test` (all tests)\nVERIFY: `cargo clippy --all-targets -- -D warnings`\n\n## Edge Cases\n- `robot_docs.rs` is the only sync (non-async) command -- it receives `&CommandContext` but `ctx.cache` is unused (it generates static documentation)\n- fetch and sync previously took `network_flag: &str` and resolved policy internally via `resolve_policy()` -- now `CommandContext::new()` resolves upfront, meaning invalid network flag errors happen BEFORE command dispatch (this is better UX: fail fast)\n- search.rs and list.rs all-aliases variants created their own CacheManager for iteration -- convert to use `ctx.cache.clone()` and `ctx.cache.list_aliases().await`\n- `CommandContext` is not `Clone` by default (contains `Instant`) -- commands receive `&CommandContext`, sub-functions receive `&ctx.cache` or `ctx.mode`\n\n## Dependency Context\nDepends on bd-1iz (CommandContext created and tags converted as proof of concept). All Epic 1 beads must also be done (commands already accept OutputMode -- this bead changes the parameter from `mode: OutputMode` to `ctx: &CommandContext` and accesses `ctx.mode`). Also depends on bd-2x6 (cleanup complete, resolve_output_mode returns OutputMode) and bd-3aa (commands already use AsyncCache).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:29:22.756303Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:44:50.776087Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2z1","depends_on_id":"bd-1iz","type":"blocks","created_at":"2026-02-13T15:31:16.008592Z","created_by":"tayloreernisse"},{"issue_id":"bd-2z1","depends_on_id":"bd-2l2","type":"parent-child","created_at":"2026-02-13T15:29:22.757753Z","created_by":"tayloreernisse"},{"issue_id":"bd-2z1","depends_on_id":"bd-2x6","type":"blocks","created_at":"2026-02-13T15:31:16.058018Z","created_by":"tayloreernisse"}]} {"id":"bd-30a","title":"Implement aliases command with list, rename, delete, set-default","description":"## Background\nThe aliases command manages multiple API specs. List all aliases with stats, show details, rename, delete, set default. All operations except delete are metadata-only (fast). Delete removes the entire alias directory after acquiring lock.\n\n## Approach\nImplement src/cli/aliases.rs with AliasesArgs and execute():\n\n**AliasesArgs:** list (bool, default), show (Option), rename (Option> — [old, new], requires 2 values), delete (Option), set_default (Option).\n\n**Operations:**\n- **List:** CacheManager.list_aliases() -> display name, url, version, is_default, cached_at, size, endpoint/schema counts\n- **Show:** load meta for specific alias, display full details\n- **Rename:** validate new alias name format (same rules as alias creation — alphanumeric, hyphens, underscores), check new name does not already exist, rename directory atomically using `std::fs::rename()` syscall (atomic on same filesystem), if renamed alias was the default alias in config, update config.default_alias to the new name and save\n- **Delete:** acquire lock on alias directory, remove entire alias directory (`std::fs::remove_dir_all`), if deleted alias was the default alias, clear default_alias in config (set to None), save config. No confirmation prompt — CLI is non-interactive for agent compatibility. PRD says \"explicit delete required\" meaning the user must explicitly pass --delete, but no interactive Y/N prompt.\n- **Set-default:** verify alias exists in cache before setting, update config.default_alias, save config. If alias does not exist, return error with suggestion listing available aliases.\n\n## Error Handling Details\n\n**Rename errors:**\n- New name fails format validation -> error with INVALID_ALIAS_NAME code and suggestion showing valid format\n- New name already exists -> error with ALIAS_EXISTS code\n- Rename to same name -> no-op, return success (idempotent, do not error)\n- Old alias does not exist -> error with ALIAS_NOT_FOUND code\n- Filesystem rename fails -> error with IO_ERROR code\n\n**Delete errors:**\n- Alias does not exist -> error with ALIAS_NOT_FOUND code\n- Lock contention (e.g., sync running) -> error with LOCK_CONTENTION code and suggestion to retry\n- Deleting the only alias -> allowed (leaves empty aliases state, no special handling)\n\n**Set-default errors:**\n- Alias does not exist -> error with ALIAS_NOT_FOUND code and suggestion listing available aliases\n\n## Acceptance Criteria\n- [ ] `aliases --robot` lists all aliases with correct metadata (name, url, version, is_default, cached_at, size, endpoint_count, schema_count)\n- [ ] `aliases --show petstore` shows full details for one alias\n- [ ] `aliases --rename old new` renames directory atomically and updates config if renamed alias was default\n- [ ] `aliases --rename old old` (same name) is a no-op, returns success\n- [ ] `aliases --delete old-api` removes alias directory and clears default if it was default\n- [ ] Delete does NOT prompt for confirmation (non-interactive CLI)\n- [ ] `aliases --set-default petstore` updates config, errors if alias does not exist\n- [ ] Rename validates new alias format (alphanumeric, hyphens, underscores)\n- [ ] Rename checks new name does not already exist\n- [ ] Delete of default alias clears default_alias in config\n- [ ] Robot output for each operation is well-structured with ok/data/meta envelope\n- [ ] Error responses include appropriate error codes and suggestions\n\n## Edge Cases\n- **Rename to same name:** No-op, return success (idempotent behavior).\n- **Delete the only alias:** Allowed. Leaves cache in empty state. Subsequent commands that need an alias will error with ALIAS_NOT_FOUND suggesting the user fetch a spec.\n- **Delete while sync is running:** Lock contention. Return LOCK_CONTENTION error with suggestion to wait or retry. Do not force-delete.\n- **Set-default to non-existent alias:** Error with ALIAS_NOT_FOUND and suggestion listing available aliases from cache.\n- **Rename when target name has invalid characters:** Error with INVALID_ALIAS_NAME showing the format rules.\n\n## Files\n- MODIFY: src/cli/aliases.rs (AliasesArgs, execute, rename/delete/set-default logic)\n- MODIFY: src/output/robot.rs (add output_aliases)\n- MODIFY: src/output/human.rs (add output_aliases)\n\n## TDD Anchor\nRED: Write `test_aliases_list` — fetch two specs, run aliases --robot, assert data.aliases has length 2.\nRED: Write `test_aliases_rename` — rename an alias, verify directory moved and config updated.\nRED: Write `test_aliases_rename_same_name` — rename to same name, verify no-op success.\nRED: Write `test_aliases_delete` — delete alias, verify directory removed and config cleared.\nRED: Write `test_aliases_delete_lock_contention` — hold lock on alias, attempt delete, assert LOCK_CONTENTION error.\nGREEN: Implement list_aliases and output.\nVERIFY: `cargo test test_aliases_list`\n\n## Dependency Context\nUses CacheManager (list_aliases, delete_alias) from bd-3ea. Uses Config (default_alias, save) from bd-1sb.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:28:47.390765Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:25:35.260202Z","closed_at":"2026-02-12T19:25:35.260164Z","close_reason":"Implemented in Wave 4 commit","compaction_level":0,"original_size":0,"labels":["management","phase2"],"dependencies":[{"issue_id":"bd-30a","depends_on_id":"bd-1sb","type":"blocks","created_at":"2026-02-12T16:28:47.395669Z","created_by":"tayloreernisse"},{"issue_id":"bd-30a","depends_on_id":"bd-2pl","type":"parent-child","created_at":"2026-02-12T16:28:47.394226Z","created_by":"tayloreernisse"},{"issue_id":"bd-30a","depends_on_id":"bd-3d2","type":"blocks","created_at":"2026-02-12T16:28:47.396077Z","created_by":"tayloreernisse"},{"issue_id":"bd-30a","depends_on_id":"bd-3ea","type":"blocks","created_at":"2026-02-12T16:28:47.394978Z","created_by":"tayloreernisse"}]} {"id":"bd-37c","title":"Add SBOM generation and cosign attestation","description":"## What\nGenerate SBOM (CycloneDX or SPDX) during CI build. Sign release artifacts with cosign for provenance attestation.\n\n## Acceptance Criteria\n- [ ] SBOM generated in CI pipeline\n- [ ] cosign attestation attached to release artifacts\n- [ ] Verifiable with cosign verify\n\n## Files\n- MODIFY: .gitlab-ci.yml (add SBOM + cosign steps)","status":"open","priority":4,"issue_type":"task","created_at":"2026-02-12T16:31:57.365996Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:42:45.149708Z","compaction_level":0,"original_size":0,"labels":["future","phase3"],"dependencies":[{"issue_id":"bd-37c","depends_on_id":"bd-2e4","type":"blocks","created_at":"2026-02-12T16:42:45.149692Z","created_by":"tayloreernisse"},{"issue_id":"bd-37c","depends_on_id":"bd-3aq","type":"parent-child","created_at":"2026-02-12T16:31:57.367010Z","created_by":"tayloreernisse"}]} +{"id":"bd-39e","title":"Create AsyncCache wrapper with spawn_blocking","description":"## Background\nCacheManager uses std::fs and fs2::FileExt (flock) -- blocking I/O on the tokio async runtime. Under sync --all --jobs=4, concurrent async tasks hit blocking cache I/O, potentially starving tokio worker threads. The acquire_lock() method does a busy-wait loop with thread::sleep(50ms) polling.\n\n## Approach\n1. Add #[derive(Clone)] to CacheManager (it wraps PathBuf -- trivially cloneable)\n2. Create AsyncCache struct in src/core/cache.rs:\n ```rust\n #[derive(Clone)]\n pub struct AsyncCache {\n inner: CacheManager,\n }\n \n impl AsyncCache {\n pub fn new(cache_dir: PathBuf) -> Self {\n Self { inner: CacheManager::new(cache_dir) }\n }\n \n pub fn cache_dir(&self) -> &Path {\n self.inner.cache_dir()\n }\n }\n ```\n3. Add async wrappers for each public CacheManager method using spawn_blocking:\n - load_index(alias) -> Result<(SpecIndex, CacheMetadata)>\n - load_raw(alias, meta) -> Result\n - write_cache(alias, ...) -> Result\n - list_aliases() -> Result>\n - ensure_dirs(alias) -> Result<()>\n - alias_dir(alias) -> PathBuf (sync -- just path computation, no I/O)\n - delete_alias_dir(alias) -> Result<()>\n Each wrapper: clones self.inner and alias string, moves into spawn_blocking closure, unwraps JoinError.\n4. Handle JoinError from spawn_blocking by mapping to SwaggerCliError::Cache(\"task join error: ...\")\n\n## Acceptance Criteria\n- CacheManager derives Clone\n- AsyncCache struct exists with async wrappers for all public CacheManager methods\n- Each async method uses tokio::task::spawn_blocking internally\n- AsyncCache::load_index returns identical results to CacheManager::load_index\n- AsyncCache::write_cache produces identical on-disk state to CacheManager::write_cache\n- cargo test passes\n- cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- MODIFY: src/core/cache.rs (add Clone derive, add AsyncCache)\n\n## TDD Anchor\nRED: test_async_cache_load_index_matches_sync -- write spec via sync CacheManager, load via AsyncCache, assert identical SpecIndex\nGREEN: Implement AsyncCache with spawn_blocking wrappers\nVERIFY: cargo test cache\n\nAdditional tests:\n- test_async_cache_write_then_load_roundtrip\n- test_async_cache_concurrent_reads -- spawn 10 concurrent load_index tasks, all succeed\n- test_async_cache_list_aliases_matches_sync\n\n## Edge Cases\n- spawn_blocking JoinError (e.g. runtime shutdown) -- map to SwaggerCliError::Cache with descriptive message\n- CacheManager::write_cache has #[allow(clippy::too_many_arguments)] -- AsyncCache wrapper mirrors the same signature\n- load_raw needs CacheMetadata by value (not reference) since it moves into spawn_blocking -- clone the metadata\n- alias_dir() is pure computation (no I/O) -- keep it sync on AsyncCache, no spawn_blocking needed\n\n## Dependency Context\nNo dependencies on other beads. Independent of Epic 1. CacheManager already exists and is unchanged.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:27:34.986195Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:31:01.353167Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-39e","depends_on_id":"bd-1ti","type":"parent-child","created_at":"2026-02-13T15:27:34.987137Z","created_by":"tayloreernisse"}]} +{"id":"bd-3aa","title":"Convert command handlers to use AsyncCache","description":"## Background\nAfter AsyncCache exists (bd-39e), convert all command handlers from `CacheManager::new(cache_dir())` to `AsyncCache::new(cache_dir())`. This ensures all cache I/O goes through `spawn_blocking`, preventing tokio runtime starvation under concurrent load.\n\n## Approach\nFor each command handler, replace `CacheManager::new(cache_dir())` with `AsyncCache::new(cache_dir())` and add `.await` to cache method calls. All command handlers are already `async fn`, so adding `.await` is mechanical.\n\n### Per-command changes:\n\n**Simple query commands (load_index only):**\n- `tags.rs` (line 60): `CacheManager::new(cache_dir())` -> `AsyncCache::new(cache_dir())`; `.load_index(alias).await`\n- `show.rs` (line 53): Same pattern; also `.load_raw(alias, &meta).await` for raw spec access\n- `schemas.rs` (line 93 region): Same; both `execute_list` and `execute_show` use `load_index`\n- `search.rs`: `load_index` in single-alias path; `list_aliases` + loop of `load_index` in all-aliases path\n- `diff.rs`: Two `load_index` calls (left and right alias)\n\n**List command:**\n- `list.rs` (line 157): `load_index` in single-alias; `list_aliases` + loop of `load_index` in all-aliases path\n\n**Management commands:**\n- `aliases.rs` (line 124): `list_aliases` in cmd_list; `load_index` in cmd_show; `ensure_dirs`+`alias_dir` in cmd_rename; `delete_alias` in cmd_delete\n- `doctor.rs`: `list_aliases`, `load_index` per alias, `alias_dir` for disk size checks, `ensure_dirs` for repair\n- `cache_cmd.rs` (4 instances at lines 151, 192, 240, 308): `list_aliases` in stats; `alias_dir().to_string()` in path (sync, no .await needed); `delete_alias` in prune; `delete_alias` in evict\n\n**Fetch and sync:**\n- `fetch.rs`: `ensure_dirs`, `write_cache`\n- `sync_cmd.rs` or `sync/*.rs` (if split done): `load_index`, `load_raw`, `write_cache`, `ensure_dirs`, `list_aliases`\n\n### Method mapping (CacheManager -> AsyncCache):\n| CacheManager (sync) | AsyncCache (async) | Needs .await? |\n|---------------------|-------------------|---------------|\n| `.load_index(alias)` | `.load_index(alias)` | Yes |\n| `.load_raw(alias, &meta)` | `.load_raw(alias, meta.clone())` | Yes (meta cloned for spawn_blocking) |\n| `.write_cache(alias, ...)` | `.write_cache(alias, ...)` | Yes |\n| `.list_aliases()` | `.list_aliases()` | Yes |\n| `.ensure_dirs(alias)` | `.ensure_dirs(alias)` | Yes |\n| `.delete_alias(alias)` | `.delete_alias(alias)` | Yes |\n| `.alias_dir(alias)` | `.alias_dir(alias)` | NO (pure path computation, stays sync) |\n| `.cache_dir()` | `.cache_dir()` | NO (returns &Path) |\n\n### Import changes:\nReplace `use crate::core::cache::CacheManager;` with `use crate::core::cache::AsyncCache;` in each file.\n\n## Acceptance Criteria\n- [ ] Zero `CacheManager::new()` in any command handler (only `AsyncCache::new()`)\n- [ ] All `.load_index()`, `.load_raw()`, `.write_cache()`, `.list_aliases()`, `.ensure_dirs()`, `.delete_alias()` calls have `.await`\n- [ ] `.alias_dir()` and `.cache_dir()` calls do NOT have `.await` (sync methods)\n- [ ] Sync `CacheManager` only used in: tests, `src/core/cache.rs` internals, and within `AsyncCache` itself\n- [ ] All integration tests pass\n- [ ] Lock contention tests pass (`tests/lock_contention_test.rs`)\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- MODIFY: src/cli/tags.rs, show.rs, schemas.rs, search.rs, diff.rs (query commands)\n- MODIFY: src/cli/list.rs (query + all-aliases)\n- MODIFY: src/cli/aliases.rs, doctor.rs, cache_cmd.rs (management)\n- MODIFY: src/cli/fetch.rs (write path)\n- MODIFY: src/cli/sync_cmd.rs or src/cli/sync/*.rs (read + write paths)\n\n## TDD Anchor\nNo new tests needed -- existing integration tests verify correct behavior. The change is mechanical (CacheManager -> AsyncCache, add .await).\nVERIFY: `cargo test` (all tests)\nVERIFY: `cargo clippy --all-targets -- -D warnings`\n\n## Edge Cases\n- `cache_cmd.rs` `execute_path()` uses `cm.alias_dir(alias).to_string_lossy()` -- alias_dir is sync on AsyncCache, no .await needed\n- `doctor.rs` uses CacheManager for both reading and repair -- both become AsyncCache calls\n- `load_raw` takes `&CacheMetadata` on CacheManager but AsyncCache needs owned data for spawn_blocking -- AsyncCache's `load_raw` takes `meta: CacheMetadata` (cloned). Callers: `show.rs` and `sync_cmd.rs` -- add `.clone()` at call site.\n- Tests in command modules may create `CacheManager` directly for unit tests -- these STAY sync (test convenience, no tokio runtime needed)\n- If Epic 2 (sync split) is done first, modify `sync/single.rs` and `sync/batch.rs`. If not, modify `sync_cmd.rs`.\n\n## Dependency Context\nUses `AsyncCache` from bd-39e (the wrapper struct with spawn_blocking methods). AsyncCache has the same public API as CacheManager but with async method signatures.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:27:52.569639Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:44:14.058696Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3aa","depends_on_id":"bd-1ti","type":"parent-child","created_at":"2026-02-13T15:27:52.570336Z","created_by":"tayloreernisse"},{"issue_id":"bd-3aa","depends_on_id":"bd-39e","type":"blocks","created_at":"2026-02-13T15:31:15.877905Z","created_by":"tayloreernisse"}]} {"id":"bd-3aq","title":"Epic: Phase 3 Future","status":"open","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:29.339564Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:22:29.340289Z","compaction_level":0,"original_size":0,"labels":["epic"]} {"id":"bd-3b6","title":"Build async HTTP client with SSRF protection and streaming download","description":"## Background\nswagger-cli fetches OpenAPI specs over HTTPS with strict security controls. The HTTP client must enforce SSRF protection (blocking private/loopback/link-local/multicast IPs), require HTTPS by default, support streaming downloads with max-bytes enforcement, and handle retries with backoff. This is async (tokio + reqwest).\n\n## Approach\nCreate src/core/http.rs with:\n\n**AsyncHttpClient struct:** Wraps reqwest::Client configured with rustls-tls, connect timeout (5s), overall timeout (configurable, default 10s), redirect policy (max 5). Provides `fetch_spec()` async method.\n\n**SSRF Protection:** Before connecting, resolve the hostname and check the IP against blocked CIDR ranges: 127.0.0.0/8, ::1, 169.254.0.0/16, fe80::/10, 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, multicast (224.0.0.0/4, ff00::/8). Also check resolved IP AFTER redirects (DNS rebinding defense). Return PolicyBlocked error for violations. Accept --allow-private-host exceptions.\n\n**HTTPS Enforcement:** Reject http:// URLs unless --allow-insecure-http is set. Return PolicyBlocked.\n\n**Streaming Download:** Use response.chunk() in a loop, counting bytes. Abort when exceeding max_bytes (default 25MB). This prevents OOM on huge specs.\n\n**Retries:** Retry on 5xx and network errors up to N times (default 2) with exponential backoff + jitter. Honor Retry-After header. Do NOT retry 4xx (except 429 rate limit).\n\n**Auth Headers:** Accept Vec of (name, value) header pairs. Redact auth values in any error messages.\n\n## Acceptance Criteria\n- [ ] fetch_spec(\"https://...\") returns body bytes on success\n- [ ] Loopback IP (127.0.0.1) is blocked with PolicyBlocked error\n- [ ] Private IP (10.0.0.1) is blocked with PolicyBlocked error\n- [ ] Link-local (169.254.169.254) is blocked with PolicyBlocked error\n- [ ] http:// URL without --allow-insecure-http returns PolicyBlocked\n- [ ] Download exceeding max_bytes aborts with InvalidSpec error\n- [ ] 401/403 returns Auth error (not retried)\n- [ ] 500 is retried up to retry count\n- [ ] Auth header values are not included in error messages\n\n## Files\n- CREATE: src/core/http.rs (AsyncHttpClient, SSRF checks, streaming download, retries)\n- MODIFY: src/core/mod.rs (pub mod http;)\n\n## TDD Anchor\nRED: Write `test_ssrf_blocks_loopback` — call the IP validation function with 127.0.0.1, assert it returns Err(PolicyBlocked).\nGREEN: Implement CIDR range checking.\nVERIFY: `cargo test test_ssrf_blocks`\n\nAdditional tests (use mockito for HTTP):\n- test_fetch_success_https\n- test_fetch_rejects_http\n- test_fetch_max_bytes_abort\n- test_fetch_retries_on_500\n- test_fetch_no_retry_on_401\n- test_auth_header_redacted_in_errors\n\n## Edge Cases\n- DNS resolution must happen BEFORE connecting — use `tokio::net::lookup_host()` or reqwest's resolve API\n- DNS rebinding: a hostname might resolve to public IP initially, then private IP on redirect. Check IP at EACH hop.\n- IPv6 mapped IPv4 addresses (::ffff:127.0.0.1) must also be caught\n- Retry-After header may be seconds or HTTP-date — parse both formats\n- connect_timeout (5s) is separate from overall timeout (10s)\n\n## Dependency Context\nUses SwaggerCliError variants (Network, Auth, PolicyBlocked, InvalidSpec) from bd-ilo (error types and core data models).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:26:35.163338Z","created_by":"tayloreernisse","updated_at":"2026-02-12T17:46:06.061487Z","closed_at":"2026-02-12T17:46:06.061440Z","close_reason":"Async HTTP client with SSRF protection, HTTPS enforcement, DNS resolution checks, streaming download with retries","compaction_level":0,"original_size":0,"labels":["fetch","phase1","security"],"dependencies":[{"issue_id":"bd-3b6","depends_on_id":"bd-3d2","type":"blocks","created_at":"2026-02-12T16:26:35.167736Z","created_by":"tayloreernisse"},{"issue_id":"bd-3b6","depends_on_id":"bd-3ny","type":"parent-child","created_at":"2026-02-12T16:26:35.167093Z","created_by":"tayloreernisse"}]} {"id":"bd-3bl","title":"Implement tags command","description":"## Background\nThe tags command is simple — it lists OpenAPI tags with their endpoint counts and descriptions. Pure index-backed, fast.\n\n## Approach\nImplement src/cli/tags.rs with TagsArgs (alias only) and execute(). Load index, output tags from index.tags (already sorted and counted during index building). Robot: data.tags[] with name, description, endpoint_count. Human: formatted list with \"X total\" in header.\n\n## Acceptance Criteria\n- [ ] `tags petstore --robot` returns JSON with data.tags array\n- [ ] Each tag has name (string), description (string|null), endpoint_count (integer)\n- [ ] Tags sorted by name ASC (pre-sorted in index)\n- [ ] Human output shows tag name, count, and description\n- [ ] Human output shows \"X total\" count in header line\n- [ ] Tags with no description show null in robot output, empty/omitted in human output\n- [ ] Empty tags list (spec with no tags defined) returns ok:true with data.tags as empty array\n- [ ] Robot meta includes standard fields (schema_version, tool_version, command, duration_ms)\n\n## Edge Cases\n- **Spec with no tags:** Return ok:true, data.tags: [], meta.total: 0. Human output: \"0 total\" header, no rows.\n- **Tags with empty descriptions:** Tag defined in spec with `description: \"\"` — treat as null/empty in output (same as missing description).\n- **Orphaned tags:** Tags defined at root level in the OpenAPI spec but not referenced by any operation. These should still appear in output with endpoint_count: 0 (they exist in the spec, the command reports what the spec declares).\n\n## Files\n- MODIFY: src/cli/tags.rs (TagsArgs, execute)\n- MODIFY: src/output/robot.rs (add output_tags)\n- MODIFY: src/output/human.rs (add output_tags)\n\n## TDD Anchor\nRED: Write `test_tags_list` — fetch petstore, run tags --robot, assert data.tags has expected tag count.\nRED: Write `test_tags_empty` — use a spec with no tags, assert data.tags is empty array.\nRED: Write `test_tags_no_description` — use a spec with a tag that has no description, assert description is null in robot output.\nGREEN: Implement tags command.\nVERIFY: `cargo test test_tags_list`\n\n## Dependency Context\nUses SpecIndex and IndexedTag types from bd-ilo (error types and core data models). Uses CacheManager.load_index from bd-3ea (cache read path).","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T16:28:05.366529Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:25:35.259676Z","closed_at":"2026-02-12T19:25:35.259636Z","close_reason":"Implemented in Wave 4 commit","compaction_level":0,"original_size":0,"labels":["phase2","query"],"dependencies":[{"issue_id":"bd-3bl","depends_on_id":"bd-3d2","type":"blocks","created_at":"2026-02-12T16:28:05.368603Z","created_by":"tayloreernisse"},{"issue_id":"bd-3bl","depends_on_id":"bd-3ea","type":"blocks","created_at":"2026-02-12T16:28:05.368039Z","created_by":"tayloreernisse"},{"issue_id":"bd-3bl","depends_on_id":"bd-jek","type":"parent-child","created_at":"2026-02-12T16:28:05.367634Z","created_by":"tayloreernisse"}]} @@ -35,10 +54,13 @@ {"id":"bd-3ll","title":"Epic: Global Features","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:25.182608Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:56:19.814082Z","closed_at":"2026-02-12T20:56:19.814006Z","close_reason":"All child beads completed","compaction_level":0,"original_size":0,"labels":["epic"]} {"id":"bd-3ny","title":"Epic: Fetch Pipeline","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:18.835110Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:33:49.963696Z","closed_at":"2026-02-12T20:33:49.963651Z","close_reason":"All 3 child beads closed: HTTP client, fetch command, spec indexer","compaction_level":0,"original_size":0,"labels":["epic"]} {"id":"bd-3pz","title":"Add OS keychain credential backend","description":"## What\nImplement CredentialSource::Keyring variant — resolve auth tokens from macOS Keychain or Linux Secret Service at runtime.\n\n## Acceptance Criteria\n- [ ] macOS Keychain lookup works\n- [ ] Linux Secret Service lookup works\n- [ ] Graceful fallback when keychain unavailable\n\n## Files\n- CREATE: src/core/keyring.rs\n- MODIFY: src/core/config.rs (implement Keyring resolution)","status":"open","priority":4,"issue_type":"task","created_at":"2026-02-12T16:31:57.341889Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:42:45.178091Z","compaction_level":0,"original_size":0,"labels":["future","phase3"],"dependencies":[{"issue_id":"bd-3pz","depends_on_id":"bd-2e4","type":"blocks","created_at":"2026-02-12T16:42:45.178074Z","created_by":"tayloreernisse"},{"issue_id":"bd-3pz","depends_on_id":"bd-3aq","type":"parent-child","created_at":"2026-02-12T16:31:57.342970Z","created_by":"tayloreernisse"}]} +{"id":"bd-3va","title":"Epic: Property Tests for Search Engine and Indexer","description":"Add 14+ proptest property tests covering search scoring monotonicity, deterministic ordering, Unicode safety, snippet bounds, format detection idempotency, normalize roundtrips, and build_index invariants. Current property tests (tests/property_test.rs) only cover 4 trivial properties (hash determinism, index ordering, JSON roundtrip, hash format).\n\n## Child Beads (parallel, no deps between them)\n1. **bd-1x1** -- Add 8 property tests for search engine (tests/property_search_test.rs)\n2. **bd-2vu** -- Add 6 property tests for indexer and format detection (tests/property_indexer_test.rs)\n\n## Independence\nThis epic is fully independent of all other epics. Both beads can start immediately.","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-13T15:23:45.137482Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:45:39.798598Z","compaction_level":0,"original_size":0} {"id":"bd-60k","title":"Generate curl commands from endpoints","description":"## What\nNew command: swagger-cli curl [--method METHOD] that generates a ready-to-run curl command with correct URL, headers, auth, and example request body.\n\n## Acceptance Criteria\n- [ ] Generates valid curl command for endpoint\n- [ ] Includes auth headers from profile\n- [ ] Includes example request body from schema\n\n## Files\n- CREATE: src/cli/curl.rs","status":"open","priority":4,"issue_type":"task","created_at":"2026-02-12T16:31:57.318701Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:42:45.203709Z","compaction_level":0,"original_size":0,"labels":["future","phase3"],"dependencies":[{"issue_id":"bd-60k","depends_on_id":"bd-2e4","type":"blocks","created_at":"2026-02-12T16:42:45.203691Z","created_by":"tayloreernisse"},{"issue_id":"bd-60k","depends_on_id":"bd-3aq","type":"parent-child","created_at":"2026-02-12T16:31:57.319605Z","created_by":"tayloreernisse"}]} +{"id":"bd-71g","title":"Convert fetch command to OutputMode","description":"## Background\nfetch.rs (933 lines) is a DOCUMENTED EXCEPTION to the HumanDisplay pattern. It prints progressive output during execution (\"Fetching URL...\", \"Format: JSON\", \"Normalizing...\", \"Building index...\", \"Cached as {alias}\") that does not fit the produce-data-then-emit-once model.\n\nCurrent execute signature (line 390):\n```rust\npub async fn execute(\n args: &Args,\n robot_mode: bool,\n network_flag: &str,\n config_override: Option<&std::path::Path>,\n) -> Result<(), SwaggerCliError>\n```\n\nOutput struct (line 90):\n```rust\npub struct FetchOutput {\n pub alias: String,\n pub url: Option,\n pub title: String,\n pub version: String,\n pub endpoint_count: usize,\n pub schema_count: usize,\n pub cached_at: String,\n pub source_format: String,\n pub content_hash: String,\n}\n```\n\n## Approach\n1. Change execute() signature: replace `robot_mode: bool` with `mode: OutputMode`\n2. Replace `if robot_mode` checks with `matches!(mode, OutputMode::Robot { .. })`\n3. For Robot mode: keep existing `robot_success(output, \"fetch\", start.elapsed())` call at end (or convert to emit for consistency)\n4. For Human mode: keep progressive `println!` calls inline (documented exception -- NO HumanDisplay impl)\n5. Keep `network_flag` and `config_override` params (they get absorbed into CommandContext in Epic 5, not here)\n\nThis is primarily a signature change to make the interface consistent. Minimal code changes beyond the parameter type.\n\n### Specific if-branches to update:\n- Line ~430 region: `if robot_mode` for quiet progressive output -- change to `if matches!(mode, OutputMode::Robot { .. })`\n- Line ~530 region: `if robot_mode` for suppressing progress messages\n- Line ~580 region: `if !robot_mode` for human fetch output\n- Final output: `if robot_mode { robot_success(output, ...) }` -> `if matches!(mode, OutputMode::Robot { .. }) { let pretty = matches!(mode, OutputMode::Robot { pretty: true }); if pretty { robot_success_pretty(...) } else { robot_success(...) } }`\n\n## Acceptance Criteria\n- [ ] fetch::execute() accepts `OutputMode` instead of `bool`\n- [ ] Robot output unchanged (valid RobotEnvelope JSON)\n- [ ] Robot pretty mode produces indented JSON (new behavior from OutputMode::Robot { pretty: true })\n- [ ] Human output unchanged (progressive messages)\n- [ ] No HumanDisplay impl for fetch (documented exception)\n- [ ] Integration tests pass (`test_fetch_and_list`, etc.)\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- MODIFY: src/cli/fetch.rs (accept OutputMode, replace bool checks)\n- MODIFY: src/main.rs (pass mode to fetch at line 121-123)\n\n## TDD Anchor\nNo new tests needed -- this is a signature change. Existing integration tests verify behavior.\nVERIFY: `cargo test integration`\nVERIFY: `cargo test fetch`\n\n## Edge Cases\n- fetch is async with streaming downloads -- OutputMode is only checked at output decision points, not during download\n- Error output still uses `robot::robot_error()` through main.rs error handler (unchanged)\n- The `--resolve-external-refs` flag adds additional output messages in human mode -- must work with both modes\n- Pretty printing: fetch currently never calls `robot_success_pretty` -- with OutputMode, it can now produce pretty robot JSON via `OutputMode::Robot { pretty: true }`\n\n## Dependency Context\nUses `OutputMode` from bd-2im (type only, not emit/HumanDisplay). This is an interface-only change. Pattern context from bd-bq8 (tags PoC).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:25:41.669499Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:46:40.425445Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-71g","depends_on_id":"bd-14o","type":"parent-child","created_at":"2026-02-13T15:25:41.670209Z","created_by":"tayloreernisse"},{"issue_id":"bd-71g","depends_on_id":"bd-bq8","type":"blocks","created_at":"2026-02-13T15:31:09.232553Z","created_by":"tayloreernisse"}]} {"id":"bd-a7e","title":"Bootstrap Rust project and directory structure","description":"## Background\nswagger-cli is a greenfield Rust CLI tool for querying OpenAPI specifications. Nothing exists yet — no Cargo.toml, no src/, no VCS. This bead creates the complete project skeleton that all other beads build on.\n\nNote: The PRD code examples show sync patterns (blocking reqwest), but we've made a conscious decision to implement async from the start (tokio + non-blocking reqwest), following feedback-2 recommendations. The PRD code examples are stale in this regard.\n\n## Approach\n1. Run `cargo init --name swagger-cli` to create the Rust project\n2. Run `jj init --git` to initialize jj version control (colocated with git)\n3. Populate Cargo.toml with all dependencies (async tokio instead of blocking reqwest):\n - `reqwest = { version = \"0.13\", default-features = false, features = [\"json\", \"rustls-tls\"] }`\n - `tokio = { version = \"1\", features = [\"full\"] }`\n - `serde`, `serde_json`, `serde_yaml`, `clap`, `anyhow`, `thiserror`, `toml`, `directories`, `colored`, `tabled`, `chrono`, `regex`, `sha2`, `fs2`\n - dev-deps: `assert_cmd`, `predicates`, `tempfile`, `mockito`, `criterion`, `tokio` (macros+rt)\n4. Create full directory structure: src/{main.rs, lib.rs, errors.rs, utils.rs}, src/cli/, src/core/, src/output/, tests/integration/, tests/fixtures/, benches/, docs/robot-schema/v1/\n5. Add .gitignore, run `cargo check`\n\n## Acceptance Criteria\n- [ ] `cargo check` succeeds with zero errors\n- [ ] Cargo.toml has all dependencies with correct features (async reqwest, tokio)\n- [ ] src/main.rs has `#[tokio::main] async fn main()` that exits 0\n- [ ] All directories from project structure exist\n- [ ] src/lib.rs declares pub mods for cli, core, output, errors, utils\n- [ ] jj initialized (`.jj/` exists)\n- [ ] .gitignore present\n\n## Files\n- CREATE: Cargo.toml, src/main.rs, src/lib.rs, src/errors.rs, src/utils.rs, src/cli/mod.rs, src/core/mod.rs, src/output/mod.rs, .gitignore\n\n## TDD Anchor\nRED: `cargo check` fails (no modules). GREEN: Create placeholders. VERIFY: `cargo check && echo OK`\n\n## Edge Cases\n- Use `edition = \"2021\"` (safer for dep compat)\n- Do NOT add `[[bench]]` entries yet\n- main.rs should NOT do any real work yet","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:23:21.100423Z","created_by":"tayloreernisse","updated_at":"2026-02-12T17:33:03.858953Z","closed_at":"2026-02-12T17:33:03.858784Z","close_reason":"Completed: project bootstrapped with edition 2024, all deps, async tokio, directory structure, jj init","compaction_level":0,"original_size":0,"labels":["foundation","phase1"],"dependencies":[{"issue_id":"bd-a7e","depends_on_id":"bd-3e0","type":"parent-child","created_at":"2026-02-12T16:23:21.102893Z","created_by":"tayloreernisse"}]} {"id":"bd-acf","title":"Implement search engine with tokenized scoring and search command","description":"## Background\nThe search engine is a core piece of swagger-cli. It provides tokenized multi-term text search across endpoint paths, summaries, descriptions, and schema names. All search is index-backed (never loads raw.json). Scores are quantized to integer basis points for cross-platform determinism. Results include ranked ordering with stable tie-breaking.\n\n## Approach\nImplement src/core/search.rs with SearchEngine struct and src/cli/search.rs with SearchArgs:\n\n**SearchEngine:** Takes a SpecIndex reference. search() method tokenizes query (whitespace-split unless --exact), scores each endpoint and schema against terms with field weights: path=10, summary=5, description=2, schema_name=8. Coverage boost: matching more terms increases score (score *= 1.0 + coverage_ratio). Quantize: (raw_float * 100.0).round() as u32.\n\n**Tie-breaking (deterministic):** Primary: score DESC. Secondary: type ordinal (endpoint=0 before schema=1). Tertiary: path/name ASC, method_rank ASC. Assign 1-based rank after sorting.\n\n**Unicode-safe snippets:** safe_snippet() uses char_indices() to find char-boundary-safe positions. Context window of 50 chars before/after match. Prefix/suffix with \"...\" when truncated.\n\n**SearchArgs:** alias (Option), query (String, positional), case_sensitive (bool), exact (bool), in_fields (Option -- comma-separated: all/paths/descriptions/schemas, invalid -> USAGE_ERROR), limit (usize, default 20), all_aliases (bool -- Phase 2 cross-alias).\n\n**SearchOptions:** search_paths, search_descriptions, search_schemas (parsed from --in), case_sensitive, exact, limit.\n\n## Acceptance Criteria\n- [ ] search(\"pet status\") finds endpoints with \"pet\" and \"status\" in path/summary\n- [ ] Scores quantized to integer (u32), deterministic across platforms\n- [ ] --exact treats query as single phrase\n- [ ] --case-sensitive respects case\n- [ ] --in paths only searches path field\n- [ ] --in invalid_field returns USAGE_ERROR\n- [ ] Results sorted by score DESC with stable tie-breaking\n- [ ] Unicode-safe snippets don't panic on multi-byte chars\n- [ ] Robot output: results[] with type, path/name, method, summary, rank, score, matches[]\n- [ ] Search never loads raw.json (index-only)\n\n## Files\n- CREATE: src/core/search.rs (SearchEngine, SearchResult, SearchResultType, Match, SearchOptions, tokenize, safe_snippet)\n- MODIFY: src/cli/search.rs (SearchArgs, execute)\n- MODIFY: src/output/robot.rs (add output_search)\n- MODIFY: src/output/human.rs (add output_search)\n\n## TDD Anchor\nRED: Write `test_search_basic` -- create a SpecIndex with petstore endpoints, search for \"pet status\", assert results include /pet/findByStatus.\nGREEN: Implement SearchEngine::search().\nVERIFY: `cargo test test_search_basic`\n\nRED: Write `test_search_scores_deterministic` -- search for \"pet\" on petstore index, record scores, search again, assert identical scores and ordering.\nGREEN: Implement quantized scoring with stable sort.\nVERIFY: `cargo test test_search_scores_deterministic`\n\n## Edge Cases\n- Empty query should return empty results, not error\n- Single-char queries should work (common for agents: \"/\" to find all paths)\n- Coverage boost division by zero: if terms.len() == 0, skip boost (shouldn't happen with non-empty query)\n- safe_snippet on very short text: don't add \"...\" if text fits entirely within context\n\n## Dependency Context\nUses SpecIndex, IndexedEndpoint, and IndexedSchema types from bd-ilo (error types and core data models). Uses CacheManager.load_index from bd-3ea (cache read path). Uses CLI skeleton from bd-3d2.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:28:05.308043Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:25:35.259122Z","closed_at":"2026-02-12T19:25:35.259085Z","close_reason":"Implemented in Wave 4 commit","compaction_level":0,"original_size":0,"labels":["phase2","query"],"dependencies":[{"issue_id":"bd-acf","depends_on_id":"bd-3d2","type":"blocks","created_at":"2026-02-12T16:28:05.310709Z","created_by":"tayloreernisse"},{"issue_id":"bd-acf","depends_on_id":"bd-3ea","type":"blocks","created_at":"2026-02-12T16:28:05.310222Z","created_by":"tayloreernisse"},{"issue_id":"bd-acf","depends_on_id":"bd-jek","type":"parent-child","created_at":"2026-02-12T16:28:05.309771Z","created_by":"tayloreernisse"}]} {"id":"bd-b8h","title":"Add YAML output format (--format yaml)","description":"## What\nAdd --format yaml option to list/show/search/schemas commands. Use serde_yaml for serialization with deterministic key ordering. Create YAML golden fixtures.\n\n## Acceptance Criteria\n- [ ] --format yaml outputs valid YAML for all query commands\n- [ ] YAML output is deterministic (sorted keys)\n- [ ] Golden YAML fixtures exist\n\n## Files\n- MODIFY: src/output/mod.rs (add yaml output mode)\n- CREATE: src/output/yaml.rs","status":"open","priority":4,"issue_type":"task","created_at":"2026-02-12T16:31:57.239297Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:42:45.279099Z","compaction_level":0,"original_size":0,"labels":["future","phase3"],"dependencies":[{"issue_id":"bd-b8h","depends_on_id":"bd-2e4","type":"blocks","created_at":"2026-02-12T16:42:45.279082Z","created_by":"tayloreernisse"},{"issue_id":"bd-b8h","depends_on_id":"bd-3aq","type":"parent-child","created_at":"2026-02-12T16:31:57.241719Z","created_by":"tayloreernisse"}]} +{"id":"bd-bq8","title":"Convert tags command and main.rs to OutputMode (proof of concept)","description":"## Background\ntags.rs is the simplest command handler (168 lines) with a clean if/else robot branch at line 64. Converting it first validates the OutputMode+HumanDisplay+emit() pattern before mass rollout to more complex commands.\n\nCurrent execute signature (line 58):\n```rust\npub async fn execute(args: &Args, robot_mode: bool) -> Result<(), SwaggerCliError>\n```\n\nCurrent output struct (line 22, NOT pub -- needs to become pub for emit):\n```rust\nstruct TagsOutput {\n tags: Vec,\n total: usize,\n}\nstruct TagEntry {\n name: String,\n description: Option,\n endpoint_count: usize,\n}\n```\n\nCurrent robot/human branch (lines 64-85):\n- Robot: `robot::robot_success(output, \"tags\", start.elapsed())`\n- Human: converts to TagRow, prints header \"{title} {version} -- {count} tags\", then renders table\n\n## Approach\n\n### 1. Update main.rs\n- Add `resolve_output_mode(cli: &Cli) -> OutputMode`:\n ```rust\n fn resolve_output_mode(cli: &Cli) -> OutputMode {\n if cli.no_robot { return OutputMode::Human; }\n if cli.robot { return OutputMode::Robot { pretty: cli.pretty }; }\n if std::env::var(\"SWAGGER_CLI_ROBOT\")\n .is_ok_and(|v| v == \"1\" || v.eq_ignore_ascii_case(\"true\")) {\n return OutputMode::Robot { pretty: cli.pretty };\n }\n if std::io::stdout().is_terminal() { OutputMode::Human }\n else { OutputMode::Robot { pretty: cli.pretty } }\n }\n ```\n- Replace `let robot = resolve_robot_mode(&cli);` (line 114) with `let mode = resolve_output_mode(&cli);`\n- Pass `mode` to tags: `tags::execute(args, mode).await`\n- Keep `pre_scan_robot()` unchanged (for parse error handling)\n- Temporarily coerce mode to bool for OTHER commands: `let robot = matches!(mode, OutputMode::Robot { .. });`\n- Keep `let pretty = cli.pretty;` for robot_docs until it's converted\n\n### 2. Update tags.rs\n- Change signature: `pub async fn execute(args: &Args, mode: OutputMode) -> Result<(), SwaggerCliError>`\n- Make `TagsOutput` and `TagEntry` pub (needed for emit's Serialize bound, and for test assertions)\n- Impl `HumanDisplay for TagsOutput`:\n ```rust\n impl HumanDisplay for TagsOutput {\n fn display_human(&self, w: &mut dyn std::io::Write) -> std::io::Result<()> {\n // Note: header with spec title/version requires CacheMetadata\n // which TagsOutput doesn't have. Options:\n // a) Add meta fields to TagsOutput\n // b) Print header before emit() call\n // Going with (a): add spec_title and spec_version to TagsOutput\n writeln!(w, \"{} {} -- {} tags\", self.spec_title, self.spec_version, self.total)?;\n let rows: Vec = self.tags.iter().map(|t| TagRow { ... }).collect();\n write!(w, \"{}\", render_table_or_empty(&rows, \"No tags defined in this spec.\"))\n }\n }\n ```\n- Add `spec_title: String` and `spec_version: String` fields to `TagsOutput`\n- Update `build_output()` to accept &CacheMetadata and populate these fields\n- Replace lines 64-85 with: `emit(&output, mode, \"tags\", start.elapsed());`\n\n### 3. Import updates\n- Add `use crate::output::{OutputMode, HumanDisplay, emit};` to tags.rs\n- Remove `use crate::output::robot;` from tags.rs\n\n## Acceptance Criteria\n- [ ] tags::execute() accepts `OutputMode` instead of `bool`\n- [ ] `HumanDisplay` implemented for `TagsOutput`, producing identical human output: \"{title} {version} -- {count} tags\\n{table}\"\n- [ ] `TagsOutput` includes `spec_title` and `spec_version` fields for human display\n- [ ] main.rs has `resolve_output_mode()` returning `OutputMode`\n- [ ] main.rs passes `mode` to tags, coerces to `bool` for all other commands (temporary)\n- [ ] `pre_scan_robot()` unchanged (still used for parse error formatting)\n- [ ] Integration test `test_tags_list` passes unchanged\n- [ ] Golden test output for tags is byte-identical\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- MODIFY: src/main.rs (add `resolve_output_mode`, pass mode to tags, coerce for others)\n- MODIFY: src/cli/tags.rs (accept OutputMode, add meta fields to TagsOutput, impl HumanDisplay, use emit())\n\n## TDD Anchor\nRED: `test_tags_human_display` in tags.rs tests -- create TagsOutput with spec_title=\"PetStore\", spec_version=\"3.0\", 2 tags, call `display_human(&mut Vec)`, assert output contains \"PetStore 3.0 -- 2 tags\" and table headers \"NAME\" and \"ENDPOINTS\"\nGREEN: Implement HumanDisplay for TagsOutput\nVERIFY: `cargo test tags`\n\nAdditional tests:\n- `test_tags_empty_human_display` -- 0 tags produces \"No tags defined in this spec.\"\n- `test_resolve_output_mode_robot_flag` -- Cli with robot=true returns OutputMode::Robot\n- `test_resolve_output_mode_no_robot_flag` -- Cli with no_robot=true returns OutputMode::Human\n\n## Edge Cases\n- `TagsOutput` needs spec_title/spec_version for the human header line. Without these fields, `display_human()` can't print the header. The simplest fix is adding these fields to TagsOutput and populating from CacheMetadata.\n- `build_output()` signature changes from `(&SpecIndex)` to `(&SpecIndex, &CacheMetadata)` to capture title/version.\n- Other commands still work with bool coercion: `let robot = matches!(mode, OutputMode::Robot { .. });` -- this is a transition bridge, removed in bd-2x6.\n- `robot_docs.rs` still takes `pretty: bool` directly (line 136 in main.rs) -- unaffected by this change.\n\n## Dependency Context\nUses `OutputMode`, `HumanDisplay`, and `emit()` from bd-2im (output foundation bead). This is the proof-of-concept that validates the pattern for all subsequent conversions.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T15:24:40.141336Z","created_by":"tayloreernisse","updated_at":"2026-02-13T15:46:20.505890Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-bq8","depends_on_id":"bd-14o","type":"parent-child","created_at":"2026-02-13T15:24:40.142174Z","created_by":"tayloreernisse"},{"issue_id":"bd-bq8","depends_on_id":"bd-2im","type":"blocks","created_at":"2026-02-13T15:31:09.112191Z","created_by":"tayloreernisse"}]} {"id":"bd-epk","title":"Epic: Query Commands Phase 1","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:20.420042Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:33:50.724270Z","closed_at":"2026-02-12T20:33:50.724222Z","close_reason":"All 2 child beads closed: list and show commands","compaction_level":0,"original_size":0,"labels":["epic"]} {"id":"bd-gvr","title":"Create Dockerfile and installation script","description":"## Background\nDockerfile for minimal Alpine-based image and install.sh for curl-based binary installation with checksum/signature verification.\n\n## Approach\n\n**Dockerfile (multi-stage build):**\n- Builder stage: `FROM rust:1.93-alpine` as builder, `apk add musl-dev`, `COPY . .`, `cargo build --release --locked --target x86_64-unknown-linux-musl`\n- Runtime stage: `FROM alpine:latest`, `apk add --no-cache ca-certificates`, `COPY --from=builder /app/target/x86_64-unknown-linux-musl/release/swagger-cli /usr/local/bin/swagger-cli`\n- Pre-create XDG dirs: `mkdir -p /root/.config/swagger-cli /root/.cache/swagger-cli/aliases`\n- `ENTRYPOINT [\"swagger-cli\"]` (no CMD — user passes subcommands directly)\n\n**install.sh:**\n- Header: `#!/usr/bin/env bash`, `set -euo pipefail`\n- Secure temp directory: `mktemp -d` with cleanup trap (`trap \"rm -rf $TMPDIR\" EXIT`)\n- OS detection: `uname -s` → Darwin or Linux (reject others with clear error)\n- Arch detection: `uname -m` → arm64/aarch64 maps to aarch64, x86_64 stays x86_64 (reject others)\n- Download URL: GitLab Package Registry URL pattern, constructed from OS+arch variables\n- Download: `curl -fsSL` binary + SHA256SUMS file\n- Checksum verification (portable): Linux uses `sha256sum --check`, macOS uses `shasum -a 256 --check` — detect which is available\n- Optional minisign verification: if `minisign` is on PATH, download `.minisig` file and verify signature; if not on PATH, print info message and skip (not an error)\n- Install: `chmod +x`, move to `/usr/local/bin/` (or `~/.local/bin/` if no write access to /usr/local/bin)\n- PATH check: verify install dir is on PATH, print warning if not with suggested export command\n\n## Acceptance Criteria\n- [ ] Dockerfile builds successfully with `docker build .`\n- [ ] Container runs `swagger-cli --version` and exits 0\n- [ ] Docker image is minimal (Alpine-based runtime, no build tools in final image)\n- [ ] install.sh starts with `set -euo pipefail` and creates secure temp dir with cleanup trap\n- [ ] install.sh detects OS (Darwin/Linux) and architecture (arm64/x86_64) correctly\n- [ ] install.sh rejects unsupported OS/arch with clear error message\n- [ ] Checksum verification works on Linux (sha256sum) and macOS (shasum -a 256)\n- [ ] Checksum failure aborts install with non-zero exit\n- [ ] Optional minisign verification runs when minisign is available, skips gracefully when not\n- [ ] Binary installed to /usr/local/bin or ~/.local/bin with executable permissions\n- [ ] PATH warning printed if install directory not on PATH\n\n## Files\n- CREATE: Dockerfile\n- CREATE: install.sh\n\n## TDD Anchor\nVERIFY: `docker build -t swagger-cli-test . && docker run swagger-cli-test --version`\nVERIFY: `bash -n install.sh` (syntax check)\n\n## Edge Cases\n- **musl vs glibc:** Alpine uses musl. The Dockerfile must use the musl target, not the gnu target. Mixing causes runtime failures.\n- **Rootless Docker:** ENTRYPOINT should work regardless of UID. Don't assume /root/ — use $HOME or a configurable path.\n- **install.sh on minimal systems:** Some minimal Docker images don't have `curl`. The script should check for curl and error with a clear message.\n- **Interrupted install:** The trap ensures temp dir cleanup on any exit (EXIT, not just specific signals). Verify install.sh doesn't leave artifacts on Ctrl+C.\n- **Apple Silicon detection:** `uname -m` returns \"arm64\" on macOS but \"aarch64\" on Linux. Both must map to the aarch64 binary.","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T16:31:32.440225Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:55:47.309160Z","closed_at":"2026-02-12T20:55:47.309084Z","close_reason":"Completed by agent swarm","compaction_level":0,"original_size":0,"labels":["ci","phase2"],"dependencies":[{"issue_id":"bd-gvr","depends_on_id":"bd-1lo","type":"parent-child","created_at":"2026-02-12T16:31:32.444746Z","created_by":"tayloreernisse"},{"issue_id":"bd-gvr","depends_on_id":"bd-a7e","type":"blocks","created_at":"2026-02-12T16:31:32.445590Z","created_by":"tayloreernisse"}]} {"id":"bd-hcb","title":"Epic: Config and Cache Infrastructure","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:17.707241Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:33:48.577059Z","closed_at":"2026-02-12T20:33:48.577013Z","close_reason":"All 3 child beads closed: config system, cache write, cache read","compaction_level":0,"original_size":0,"labels":["epic"]} diff --git a/.gitignore b/.gitignore index 2c7f360..455745a 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,6 @@ # bv (beads viewer) local config and caches .bv/ + +# Ideas and planning documents (local only) +ideas/ diff --git a/IMPLEMENTATION_PLAN.md b/IMPLEMENTATION_PLAN.md new file mode 100644 index 0000000..5d7815b --- /dev/null +++ b/IMPLEMENTATION_PLAN.md @@ -0,0 +1,427 @@ +# swagger-cli Refactoring Plan: 5 System Weakness Remediations + +## Epic 1: Output Sink Abstraction + +### Problem +Every command handler takes `robot: bool`, manually branches on it 29+ times, and contains its own formatting logic. The `output/` module is a hollow shell (human.rs is 8 lines). Adding new output formats (YAML is on the roadmap) requires touching every command. Output concerns are tangled into business logic. + +### Solution +Introduce an `OutputMode` enum and an `emit()` function that each command calls once with its typed data. Commands produce data; the output layer decides presentation. + +### Design + +**New type in `src/output/mod.rs`:** +```rust +pub enum OutputMode { + Robot, + Human, + // Future: Yaml, Table, etc. +} +``` + +**New `emit` function pattern:** +Each command defines a `CommandOutput` trait impl or uses a generic `emit()` function. The `HumanDisplay` trait provides the human-readable rendering. + +```rust +pub trait HumanDisplay { + fn display_human(&self, w: &mut dyn Write) -> std::io::Result<()>; +} + +pub fn emit( + data: &T, + mode: OutputMode, + command: &str, + duration: Duration, +) { + match mode { + OutputMode::Robot => robot::robot_success(data, command, duration), + OutputMode::Human => { + let mut stdout = std::io::stdout().lock(); + data.display_human(&mut stdout).expect("stdout write"); + } + } +} +``` + +### Implementation Steps + +1. **Create `OutputMode` enum and `HumanDisplay` trait** in `src/output/mod.rs` +2. **Create `emit()` function** in `src/output/mod.rs` +3. **Convert `tags` command first** (simplest command, 168 lines) as proof-of-concept: + - Impl `HumanDisplay` for `TagsOutput` + - Replace `if robot_mode { ... } else { ... }` with `emit(&output, mode, "tags", duration)` + - Remove `robot: bool` parameter from `execute()`, replace with `OutputMode` +4. **Propagate OutputMode through main.rs** -- replace `let robot = resolve_robot_mode(&cli)` with `let mode = resolve_output_mode(&cli)` returning `OutputMode` +5. **Convert remaining simple commands** one at a time (show, schemas, diff, search, cache_cmd, aliases, doctor, list) +6. **Convert fetch and sync** (most complex, last) +7. **Delete dead code** -- `robot: bool` parameters, inline formatting blocks + +### TDD Plan + +**RED tests to write first (in `src/output/mod.rs` tests):** +- `test_emit_robot_mode_produces_valid_json` -- call `emit()` with Robot mode, capture stdout, parse as `RobotEnvelope` +- `test_emit_human_mode_produces_readable_text` -- call `emit()` with Human mode, verify no JSON in output +- `test_human_display_tags_output` -- verify `TagsOutput::display_human()` produces table with correct headers + +**GREEN:** Implement `OutputMode`, `HumanDisplay`, `emit()` to pass tests. + +**REFACTOR:** Convert each command one at a time, running full test suite between each. + +### Acceptance Criteria +- Zero `if robot` / `if !robot` branches in command handlers +- `output/human.rs` contains all human formatting logic (not 8 lines) +- Adding a new output format requires zero changes to command handlers +- All existing integration tests and golden tests pass unchanged + +### Files Changed +- `src/output/mod.rs` -- add `OutputMode`, `HumanDisplay`, `emit()` +- `src/output/human.rs` -- move all human formatting here +- `src/main.rs` -- `resolve_output_mode()` replaces `resolve_robot_mode()` +- `src/cli/tags.rs` -- proof of concept conversion +- `src/cli/show.rs`, `src/cli/schemas.rs`, `src/cli/diff.rs`, `src/cli/search.rs`, `src/cli/cache_cmd.rs`, `src/cli/aliases.rs`, `src/cli/doctor.rs`, `src/cli/list.rs` -- convert each +- `src/cli/fetch.rs`, `src/cli/sync_cmd.rs` -- convert last (most complex) +- `src/cli/mod.rs` -- no changes needed (Cli struct keeps `robot` flag, OutputMode resolves from it) + +### Dependency Note +This epic should be completed before Epic 2 (sync split) since sync_cmd conversion is part of this work. + +--- + +## Epic 2: Split sync_cmd.rs + +### Problem +`sync_cmd.rs` is 1,843 lines containing 5+ distinct responsibilities: CLI args, diff computation, checkpoint/resume, per-host rate limiting, concurrent job pool, single-alias sync, batch sync, and output formatting. It's the most complex module and the hardest to reason about. + +### Solution +Split into a `sync/` directory with focused modules while preserving the existing public API (`execute()` function). + +### Design + +**New directory: `src/cli/sync/`** + +| File | Responsibility | Lines (est.) | +|------|---------------|-------------| +| `mod.rs` | Re-exports, `Args` struct, `execute()` entry point | ~100 | +| `types.rs` | All Serialize structs: `SyncOutput`, `AliasSyncResult`, `SyncAllOutput`, `ChangeSummary`, `ChangeDetails`, `EndpointKey`, `SchemaDiff`, `EndpointDiff` | ~100 | +| `diff.rs` | `compute_diff()`, `endpoint_key()`, `endpoint_fingerprint()` | ~120 | +| `checkpoint.rs` | `SyncCheckpoint`, `load_checkpoint()`, `save_checkpoint()`, `remove_checkpoint()` | ~60 | +| `throttle.rs` | `PerHostThrottle`, `extract_host()` | ~50 | +| `single.rs` | `sync_inner()`, `sync_one_alias()`, `sync_one_alias_inner()`, `output_no_changes()`, `output_changes()` | ~250 | +| `batch.rs` | `sync_all_inner()` with concurrent stream logic | ~300 | + +### Implementation Steps + +1. **Create `src/cli/sync/` directory** +2. **Move types first** -- extract all `#[derive(Serialize)]` structs into `types.rs` +3. **Extract `diff.rs`** -- `compute_diff()`, `endpoint_key()`, `endpoint_fingerprint()`, `MAX_DETAIL_ITEMS` +4. **Extract `checkpoint.rs`** -- checkpoint load/save/remove + `CHECKPOINT_FILE` const +5. **Extract `throttle.rs`** -- `PerHostThrottle` and `extract_host()` +6. **Extract `single.rs`** -- single-alias sync logic and output helpers +7. **Extract `batch.rs`** -- `sync_all_inner()` concurrent execution +8. **Create `mod.rs`** -- `Args` struct, `execute()`, re-exports +9. **Update `src/cli/mod.rs`** -- replace `pub mod sync_cmd` with `pub mod sync` +10. **Update `src/main.rs`** -- `Commands::Sync` references + +### TDD Plan + +**This is a pure refactor -- no new behavior.** The TDD approach is: +- Run full test suite before starting: `cargo test` +- After each extraction step, verify: `cargo test && cargo clippy --all-targets -- -D warnings` +- No new tests needed (existing tests in `sync_cmd::tests` move to `sync/diff.rs` tests) + +**Specific test verification after each step:** +- `test_diff_no_changes`, `test_diff_added_endpoint`, `test_diff_removed_endpoint`, `test_diff_modified_endpoint`, `test_diff_added_schema`, `test_diff_removed_schema`, `test_diff_endpoint_modified_by_params` -- move to `sync/diff.rs` +- Integration tests (`tests/integration_test.rs`) that test `sync` command -- must pass unchanged + +### Acceptance Criteria +- `sync_cmd.rs` is deleted, replaced by `sync/` directory +- No file in `sync/` exceeds 350 lines +- All existing tests pass with zero modifications +- `pub async fn execute()` signature unchanged +- `cargo clippy --all-targets -- -D warnings` passes + +### Files Changed +- **Delete:** `src/cli/sync_cmd.rs` +- **Create:** `src/cli/sync/mod.rs`, `types.rs`, `diff.rs`, `checkpoint.rs`, `throttle.rs`, `single.rs`, `batch.rs` +- **Modify:** `src/cli/mod.rs` (module declaration), `src/main.rs` (import path if needed) + +### Dependency Note +Should be done after Epic 1 (output sink) since that will have already simplified the output formatting within sync. If done before Epic 1, the output formatting moves to `single.rs` and `batch.rs` and then needs to be re-extracted during Epic 1. + +--- + +## Epic 3: spawn_blocking for Cache I/O + +### Problem +`CacheManager` uses `std::fs` operations and `fs2::FileExt` (flock) -- all blocking I/O. But command handlers are `async fn` running on tokio. Under `sync --all --jobs=4`, multiple concurrent tasks hit blocking cache I/O, potentially starving the tokio runtime. + +### Solution +Wrap cache operations in `tokio::task::spawn_blocking()`. Create async wrapper methods on a new `AsyncCacheManager` or add async variants directly to `CacheManager`. + +### Design + +**Option A (recommended): Async wrapper struct** +```rust +pub struct AsyncCache { + inner: CacheManager, +} + +impl AsyncCache { + pub async fn load_index(&self, alias: &str) -> Result<(SpecIndex, CacheMetadata), SwaggerCliError> { + let inner = self.inner.clone(); // CacheManager is just a PathBuf, cheap + let alias = alias.to_string(); + tokio::task::spawn_blocking(move || inner.load_index(&alias)) + .await + .map_err(|e| SwaggerCliError::Cache(format!("task join error: {e}")))? + } + // ... same pattern for write_cache, load_raw, list_aliases, etc. +} +``` + +This preserves the existing sync `CacheManager` for tests and simple cases while providing an async-safe interface for the runtime. + +### Implementation Steps + +1. **Derive `Clone` for `CacheManager`** (it's just a `PathBuf` wrapper -- trivial) +2. **Create `AsyncCache` in `src/core/cache.rs`** with async wrappers for each public method: + - `load_index()`, `load_raw()`, `write_cache()`, `list_aliases()`, `ensure_dirs()`, `alias_dir()`, `delete_alias_dir()`, `update_last_accessed()` +3. **Write tests for `AsyncCache`** verifying it produces identical results to sync `CacheManager` +4. **Convert command handlers** to use `AsyncCache` instead of `CacheManager`: + - Start with `tags.rs` (simplest) + - Then `list.rs`, `show.rs`, `search.rs`, `schemas.rs` + - Then `sync_cmd.rs` / `sync/` (most critical -- this is where contention happens) + - Then `fetch.rs`, `doctor.rs`, `cache_cmd.rs`, `aliases.rs` + +### TDD Plan + +**RED tests first:** +```rust +#[tokio::test] +async fn test_async_cache_load_index_matches_sync() { + // Setup: write a spec to cache using sync CacheManager + // Act: load via AsyncCache + // Assert: result matches sync CacheManager::load_index() +} + +#[tokio::test] +async fn test_async_cache_write_then_load_roundtrip() { + // Write via AsyncCache, load via AsyncCache, verify integrity +} + +#[tokio::test] +async fn test_async_cache_concurrent_reads_no_panic() { + // Spawn 10 concurrent load_index tasks on same alias + // All should succeed (shared read lock) +} +``` + +**GREEN:** Implement `AsyncCache` wrapper. + +**REFACTOR:** Convert command handlers one at a time. + +### Acceptance Criteria +- All cache I/O from async command handlers goes through `spawn_blocking` +- Existing sync `CacheManager` preserved for non-async tests +- `cargo test` passes (including lock_contention_test.rs) +- No direct `std::fs` calls from async contexts in command handlers + +### Files Changed +- `src/core/cache.rs` -- add `Clone` derive, add `AsyncCache` struct +- All `src/cli/*.rs` files -- replace `CacheManager::new()` with `AsyncCache::new()` + +### Dependency Note +Independent of Epics 1 and 2. Can be done in parallel. + +--- + +## Epic 4: Property Tests for Search Engine and Parser + +### Problem +Property tests currently cover only 4 trivial properties (hash determinism, JSON roundtrip, index ordering, hash format). The search engine (722 LOC with scoring, tokenization, Unicode snippet handling) and the OpenAPI parser/indexer (659 LOC) have zero property test coverage. + +### Solution +Add targeted property tests using proptest for the search engine and format detection/parsing. + +### Design + +**New test file: `tests/property_search_test.rs`** + +Property tests for search engine: +1. **Score monotonicity:** adding a matching field to an endpoint should never decrease its score +2. **Deterministic ordering:** same query + same index = same result order, always +3. **Limit respected:** result count <= opts.limit for any query +4. **Coverage boost property:** matching N/N terms scores >= matching 1/N terms +5. **Case insensitivity:** lowercased query on lowercased data = same results as mixed case +6. **Empty query safety:** any whitespace-only string returns empty +7. **Unicode safety:** search never panics on arbitrary Unicode input (including emoji, RTL, zero-width chars) +8. **Snippet bounds:** snippet length never exceeds 50 chars + ellipsis markers + +**New test file: `tests/property_indexer_test.rs`** + +Property tests for indexer: +1. **Format detection idempotent:** detect_format(bytes, hint, ct) called twice = same result +2. **JSON sniffing correct:** bytes starting with `{` or `[` always detected as JSON +3. **Normalize roundtrip:** normalize_to_json(bytes, Json) preserves semantic content +4. **build_index never panics on valid OpenAPI:** generate random valid-ish OpenAPI structures, verify build_index produces a valid SpecIndex +5. **Index endpoint count matches paths:** number of IndexedEndpoints = sum of methods across all paths +6. **Content hash deterministic:** same bytes -> same hash, every time + +### Implementation Steps + +1. **Create `tests/property_search_test.rs`** with proptest strategies for generating `SpecIndex` and `SearchOptions` +2. **Implement search property tests** (8 tests above) +3. **Create `tests/property_indexer_test.rs`** with proptest strategies for generating OpenAPI-like JSON +4. **Implement indexer property tests** (6 tests above) +5. **Run with extended case count** to verify: `cargo test -- --ignored` or `PROPTEST_CASES=1000 cargo test` + +### TDD Plan + +These ARE the tests. Write them, then verify they pass against the existing implementation. If any fail, that's a real bug to fix. + +**Proptest strategy for SpecIndex generation:** +```rust +fn arb_indexed_endpoint() -> impl Strategy { + ( + "/[a-z]{1,5}(/[a-z]{1,5}){0,3}", // path + prop_oneof!["GET", "POST", "PUT", "DELETE", "PATCH"], // method + proptest::option::of("[A-Za-z ]{1,50}"), // summary + proptest::option::of("[A-Za-z ]{1,100}"), // description + ).prop_map(|(path, method, summary, description)| { + IndexedEndpoint { path, method, summary, description, /* ... defaults ... */ } + }) +} + +fn arb_spec_index() -> impl Strategy { + proptest::collection::vec(arb_indexed_endpoint(), 0..50) + .prop_map(|endpoints| SpecIndex { endpoints, /* ... */ }) +} +``` + +### Acceptance Criteria +- 14 new property tests (8 search + 6 indexer) +- All pass with default proptest case count (256) +- No new dependencies (proptest already in dev-deps) +- Tests catch at least one real edge case (likely Unicode snippet handling) + +### Files Changed +- **Create:** `tests/property_search_test.rs`, `tests/property_indexer_test.rs` +- **Possibly modify:** `src/core/search.rs` if property tests reveal bugs + +### Dependency Note +Fully independent. Can be done in parallel with all other epics. + +--- + +## Epic 5: Shared Command Pipeline + +### Problem +Every command handler independently: captures `Instant::now()`, creates `CacheManager::new(cache_dir())`, loads index with `cm.load_index(alias)`, does work, formats output, passes duration. This is ~15-20 lines of identical scaffolding per command. Adding cross-cutting concerns (telemetry, logging, caching headers) requires touching every command. + +### Solution +Create a `CommandContext` struct that encapsulates the common setup, and a `run_command()` helper that handles timing and output. + +### Design + +```rust +/// Shared context created once per command invocation. +pub struct CommandContext { + pub cache: AsyncCache, // or CacheManager + pub mode: OutputMode, + pub start: Instant, + pub network_policy: NetworkPolicy, + pub config_override: Option, +} + +impl CommandContext { + pub fn new(mode: OutputMode, network_flag: &str, config_override: Option<&Path>) -> Result { + Ok(Self { + cache: AsyncCache::new(cache_dir()), + mode, + start: Instant::now(), + network_policy: resolve_policy(network_flag)?, + config_override: config_override.map(PathBuf::from), + }) + } + + pub fn load_index(&self, alias: &str) -> Result<(SpecIndex, CacheMetadata), SwaggerCliError> { + self.cache.load_index(alias) + } + + pub fn elapsed(&self) -> Duration { + self.start.elapsed() + } +} +``` + +**Usage in commands:** +```rust +pub async fn execute(args: &Args, ctx: &CommandContext) -> Result<(), SwaggerCliError> { + let (index, meta) = ctx.load_index(&args.alias)?; + let output = build_output(&index); + emit(&output, ctx.mode, "tags", ctx.elapsed()); + Ok(()) +} +``` + +### Implementation Steps + +1. **Define `CommandContext` struct** in `src/cli/context.rs` +2. **Write tests** for `CommandContext::new()` and helper methods +3. **Update `main.rs`** to create `CommandContext` and pass to commands +4. **Convert `tags` first** (simplest) as proof of concept +5. **Convert all other commands** to accept `&CommandContext` +6. **Remove duplicated `CacheManager::new(cache_dir())` + `Instant::now()` from each handler** + +### TDD Plan + +**RED tests first:** +```rust +#[test] +fn test_command_context_creates_valid_cache() { + let ctx = CommandContext::new(OutputMode::Robot, "auto", None).unwrap(); + // cache dir should be the platform default + assert!(ctx.cache.cache_dir().exists() || true); // dir may not exist in test +} + +#[test] +fn test_command_context_elapsed_increases() { + let ctx = CommandContext::new(OutputMode::Human, "auto", None).unwrap(); + std::thread::sleep(Duration::from_millis(10)); + assert!(ctx.elapsed().as_millis() >= 10); +} + +#[test] +fn test_command_context_offline_policy() { + let ctx = CommandContext::new(OutputMode::Robot, "offline", None).unwrap(); + assert_eq!(ctx.network_policy, NetworkPolicy::Offline); +} +``` + +### Acceptance Criteria +- `CommandContext` created once in `main.rs`, passed to all commands +- No command handler creates its own `CacheManager` or `Instant` +- Each command's `execute()` signature includes `&CommandContext` +- All tests pass unchanged + +### Files Changed +- **Create:** `src/cli/context.rs` +- **Modify:** `src/cli/mod.rs` (add `pub mod context`) +- **Modify:** `src/main.rs` (create CommandContext, pass to dispatch) +- **Modify:** All `src/cli/*.rs` execute functions (accept `&CommandContext`) + +### Dependency Note +Depends on Epic 1 (OutputMode) and Epic 3 (AsyncCache). Should be done last. + +--- + +## Execution Order + +``` +Epic 4 (property tests) ─────────────────────────────────────┐ +Epic 3 (spawn_blocking) ─────────────────────────────────────┤ +Epic 1 (output sink) ──> Epic 2 (split sync) ──> Epic 5 (pipeline) +``` + +Epics 3 and 4 are fully independent and can run in parallel with everything. +Epic 1 should precede Epic 2 (simplifies sync output formatting before split). +Epic 5 depends on Epics 1 and 3 (needs OutputMode and AsyncCache). diff --git a/tests/.DS_Store b/tests/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..c30ea2d307e5eb26a1b5342b745ad48a0e2f1ffa GIT binary patch literal 6148 zcmeHKO-jQ+6n;|+M(UzVmjj+4;Id#W@dVkr(Eb;irc?_qa}m*Py+Kdl9lV5B@O$%y zCNT}V5|Q^{-ZzhTF^C#q-*>IyC0rWQ6ngH^ zn^Q(he%lLOOl)G7_3e7^!MW&bn76F+dv;Z?Q=v&6PzTh3CI_%)a|9hh&FX+UpbqRE z;O~P)V~jlJg6`9SKV# zZ$6B_vhh0<Oj?jBRTBy`hVTL|F0J5kvgCb{3{1cJ5J(0Zpp8$ xwVUI$)}q~_aWF3z+?Sx^mSS*uDLzAUfpN