From 398311ca4cf44592722d981d3e06accd77ecfb89 Mon Sep 17 00:00:00 2001 From: teernisse Date: Thu, 12 Feb 2026 14:58:25 -0500 Subject: [PATCH] Wave 6: Integration tests, golden tests, index invariant tests, diff command (bd-rex, bd-2gp, bd-1ck) --- .beads/issues.jsonl | 6 +- docs/robot-schema/v1/error.schema.json | 83 ++++ docs/robot-schema/v1/success.schema.json | 62 +++ src/cli/cache_cmd.rs | 14 +- src/cli/diff.rs | 243 ++++++++++- src/cli/doctor.rs | 17 +- src/cli/fetch.rs | 72 ++-- src/cli/list.rs | 22 +- src/cli/show.rs | 54 +-- src/cli/sync_cmd.rs | 39 +- src/core/cache.rs | 6 +- src/core/config.rs | 37 +- src/core/diff.rs | 353 +++++++++++++++ src/core/http.rs | 17 +- src/core/mod.rs | 1 + src/main.rs | 11 +- src/output/human.rs | 6 - src/utils.rs | 17 +- tests/fixtures/golden/list.json | 72 ++++ tests/fixtures/golden/schemas_list.json | 23 + tests/fixtures/golden/schemas_show.json | 31 ++ tests/fixtures/golden/search.json | 110 +++++ tests/fixtures/golden/show.json | 51 +++ tests/fixtures/golden/tags.json | 24 ++ tests/golden_test.rs | 321 ++++++++++++++ tests/index_invariant_test.rs | 84 ++++ tests/integration_test.rs | 518 +++++++++++++++++++++++ 27 files changed, 2122 insertions(+), 172 deletions(-) create mode 100644 docs/robot-schema/v1/error.schema.json create mode 100644 docs/robot-schema/v1/success.schema.json create mode 100644 src/core/diff.rs create mode 100644 tests/fixtures/golden/list.json create mode 100644 tests/fixtures/golden/schemas_list.json create mode 100644 tests/fixtures/golden/schemas_show.json create mode 100644 tests/fixtures/golden/search.json create mode 100644 tests/fixtures/golden/show.json create mode 100644 tests/fixtures/golden/tags.json create mode 100644 tests/golden_test.rs create mode 100644 tests/index_invariant_test.rs diff --git a/.beads/issues.jsonl b/.beads/issues.jsonl index 341d00b..c667261 100644 --- a/.beads/issues.jsonl +++ b/.beads/issues.jsonl @@ -3,7 +3,7 @@ {"id":"bd-16o","title":"Wire fetch command with local file, stdin, and remote URL support","description":"## Background\nThe fetch command is the primary entry point for getting specs into swagger-cli. It orchestrates: HTTP download (or local file/stdin read), format detection, YAML normalization, index building, pointer validation, and crash-consistent cache write. It supports auth headers, bearer tokens, auth profiles, --force overwrite, and robot output.\n\n## Approach\nImplement src/cli/fetch.rs with FetchArgs struct and async execute() function:\n\n**FetchArgs (clap derive):**\n- url: String (positional, required — can be URL, file path, or \"-\" for stdin)\n- alias: String (--alias, required)\n- header: Vec (--header, repeatable)\n- auth_header: Vec (--auth-header, alias for --header)\n- bearer: Option (--bearer)\n- auth_profile: Option (--auth-profile)\n- force: bool (--force)\n- timeout_ms: u64 (--timeout-ms, default 10000)\n- max_bytes: u64 (--max-bytes, default 26214400)\n- retries: u32 (--retries, default 2)\n- input_format: Option (--input-format, values: auto/json/yaml)\n- resolve_external_refs: bool (--resolve-external-refs)\n- ref_allow_host: Vec (--ref-allow-host, repeatable)\n- ref_max_depth: u32 (--ref-max-depth, default 3)\n- ref_max_bytes: u64 (--ref-max-bytes, default 10MB)\n- allow_private_host: Vec (--allow-private-host, repeatable)\n- allow_insecure_http: bool (--allow-insecure-http)\n\n**Execute flow:**\n1. Validate alias format (validate_alias)\n2. Check if alias exists — error unless --force\n3. Resolve auth (merge --auth-profile with explicit headers, --bearer)\n4. Determine source: URL (http/https), local file (file:// or path), or stdin (-)\n5. For URL: use AsyncHttpClient.fetch_spec() with SSRF/HTTPS policy\n6. For local file: canonicalize path, read bytes directly (no network policy)\n7. For stdin: read all bytes from stdin\n8. Detect format, normalize to JSON\n9. Parse as serde_json::Value\n10. Build index (build_index)\n11. Write cache (write_cache with all artifacts)\n12. Output robot JSON or human success message\n\n## Acceptance Criteria\n- [ ] `swagger-cli fetch ./petstore.json --alias pet` succeeds (local file)\n- [ ] `swagger-cli fetch https://... --alias pet --robot` outputs JSON with ok:true, data.alias, data.endpoint_count\n- [ ] `swagger-cli fetch - --alias stdin-api` reads from stdin\n- [ ] Alias validation rejects \"../bad\" before any network call\n- [ ] Existing alias without --force returns ALIAS_EXISTS (exit 6)\n- [ ] --force overwrites existing alias\n- [ ] --bearer TOKEN adds Authorization: Bearer TOKEN header\n- [ ] --auth-profile loads from config.toml\n- [ ] Robot output includes: alias, url, version, title, endpoint_count, schema_count, cached_at, source_format, cache_dir, files, content_hash\n- [ ] Auth header values never appear in output or error messages\n\n## Files\n- MODIFY: src/cli/fetch.rs (FetchArgs, execute, auth resolution, source routing)\n- MODIFY: src/output/robot.rs (add output_fetch function)\n- MODIFY: src/output/human.rs (add output_fetch function)\n\n## TDD Anchor\nRED: Write integration test `test_fetch_local_file` — use assert_cmd to run `swagger-cli fetch tests/fixtures/petstore.json --alias test-pet --robot` with SWAGGER_CLI_HOME set to tempdir. Assert exit 0, stdout JSON has ok:true.\nGREEN: Implement full fetch pipeline.\nVERIFY: `cargo test test_fetch_local_file`\n\nAdditional tests:\n- test_fetch_alias_exists_error\n- test_fetch_force_overwrites\n- test_fetch_stdin\n- test_fetch_bearer_auth\n- test_fetch_yaml_file (if YAML fixture exists)\n\n## Edge Cases\n- stdin (\"-\") is not a URL — don't try to HTTP-fetch it\n- Local file paths must be canonicalized (resolve symlinks, relative paths) before reading\n- file:// URLs must be converted to local paths (strip scheme)\n- If auth-profile references an EnvVar source, resolve the env var at fetch time\n- Robot output: redact --bearer and --auth-header values even in success output\n\n## Dependency Context\nUses AsyncHttpClient from bd-3b6 (async HTTP client with SSRF protection). Uses build_index, detect_format, normalize_to_json from bd-189 (spec format detection and index building). Uses CacheManager.write_cache and validate_alias from bd-1ie (cache write path). Uses Config for auth profiles from bd-1sb (configuration system).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:26:35.220966Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:25:35.255563Z","closed_at":"2026-02-12T19:25:35.255378Z","close_reason":"Implemented in Wave 4 commit","compaction_level":0,"original_size":0,"labels":["fetch","phase1"],"dependencies":[{"issue_id":"bd-16o","depends_on_id":"bd-189","type":"blocks","created_at":"2026-02-12T16:34:06.059316Z","created_by":"tayloreernisse"},{"issue_id":"bd-16o","depends_on_id":"bd-1ie","type":"blocks","created_at":"2026-02-12T16:34:06.154074Z","created_by":"tayloreernisse"},{"issue_id":"bd-16o","depends_on_id":"bd-1sb","type":"blocks","created_at":"2026-02-12T16:34:06.248426Z","created_by":"tayloreernisse"},{"issue_id":"bd-16o","depends_on_id":"bd-3b6","type":"blocks","created_at":"2026-02-12T16:34:06.005417Z","created_by":"tayloreernisse"},{"issue_id":"bd-16o","depends_on_id":"bd-3d2","type":"blocks","created_at":"2026-02-12T16:26:35.222663Z","created_by":"tayloreernisse"},{"issue_id":"bd-16o","depends_on_id":"bd-3ny","type":"parent-child","created_at":"2026-02-12T16:26:35.222248Z","created_by":"tayloreernisse"}]} {"id":"bd-189","title":"Implement spec format detection, YAML normalization, and index building","description":"## Background\nAfter downloading raw spec bytes, swagger-cli must detect the input format (JSON vs YAML), normalize YAML to JSON, parse the spec as serde_json::Value, and build a SpecIndex from it. The index building is the heart of the query performance -- it pre-extracts endpoints, schemas, and tags into a compact, sorted structure with JSON pointers back to raw.json. All pointers must be validated.\n\n## Approach\nCreate src/core/indexer.rs with:\n\n**Format detection:** `detect_format(bytes, filename_hint, content_type_hint) -> Format` where Format is Json or Yaml. Priority: explicit --input-format flag > Content-Type header > file extension > content sniffing (try JSON parse first, fall back to YAML).\n\n**YAML normalization:** `normalize_to_json(bytes, format) -> Result>` -- if YAML, parse with serde_yaml then serialize to serde_json. If JSON, pass through (validate it parses).\n\n**Index building:** `build_index(raw_json: &serde_json::Value, content_hash: &str, generation: u64) -> Result`:\n1. Extract info.title, info.version, openapi version\n2. Iterate paths.* -> methods -> build IndexedEndpoint with: path, method (uppercased), summary, description, operation_id, tags, deprecated, parameters (name/location/required/desc), request_body_required, request_body_content_types, security_schemes (effective: operation-level overrides root-level), security_required, operation_ptr (JSON pointer format: /paths/~1pet~1{petId}/get)\n3. Iterate components.schemas.* -> build IndexedSchema with name, schema_ptr\n4. Compute tags from endpoints + root-level tags -> IndexedTag with endpoint_count\n5. Sort deterministically: endpoints by (path ASC, method_rank ASC), schemas by (name ASC), tags by (name ASC)\n6. Validate ALL operation_ptr and schema_ptr resolve in the raw Value\n7. Set index_version to current version constant (1)\n\n**Canonical method ranking:** GET=0, POST=1, PUT=2, PATCH=3, DELETE=4, OPTIONS=5, HEAD=6, TRACE=7, unknown=99.\n\n**JSON pointer encoding:** Path segments use RFC 6901: `~` -> `~0`, `/` -> `~1`. So `/pet/{petId}` becomes `/paths/~1pet~1{petId}`.\n\n## Acceptance Criteria\n- [ ] JSON input detected and passed through correctly\n- [ ] YAML input detected and normalized to equivalent JSON\n- [ ] build_index extracts correct endpoint count from petstore spec (19 endpoints)\n- [ ] Endpoints sorted by path ASC, method_rank ASC (deterministic)\n- [ ] Schemas sorted by name ASC\n- [ ] Tags have correct endpoint_count\n- [ ] All operation_ptr values resolve in raw Value (validated during build_index)\n- [ ] Invalid pointer causes fetch failure (not silent corruption)\n- [ ] JSON pointer encoding handles /pet/{petId} correctly (escapes /)\n- [ ] Security inheritance: operation without security inherits root; operation with empty [] means no auth\n\n## Files\n- CREATE: src/core/indexer.rs (detect_format, normalize_to_json, build_index, method_rank, json_pointer_encode, validate_pointers)\n- MODIFY: src/core/mod.rs (pub mod indexer;)\n\n## TDD Anchor\nRED: Write `test_build_index_petstore` -- load tests/fixtures/petstore.json as Value, call build_index, assert endpoint_count == 19 and endpoints are sorted.\nGREEN: Implement full index building.\nVERIFY: `cargo test test_build_index_petstore`\n\nAdditional tests:\n- test_detect_format_json, test_detect_format_yaml\n- test_yaml_normalization_roundtrip\n- test_json_pointer_encoding\n- test_method_rank_ordering\n- test_security_inheritance\n- test_invalid_pointer_rejected\n\n## Edge Cases\n- Some specs have paths with special chars in operation IDs -- don't assume alphanumeric\n- serde_yaml may produce different JSON than direct JSON parse (number types, null handling) -- normalize consistently\n- Large specs (8MB+ GitHub) should still build index in <1s\n- OpenAPI 3.1 may use `webhooks` key -- ignore for MVP (only extract from `paths`)\n- Tags defined at root level but not used by any operation should still appear with endpoint_count=0\n\n## Dependency Context\nUses SpecIndex, IndexedEndpoint, IndexedSchema, IndexedTag, IndexedParam types from bd-ilo (error types + core models bead).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:26:35.194671Z","created_by":"tayloreernisse","updated_at":"2026-02-12T17:41:12.926777Z","closed_at":"2026-02-12T17:41:12.926730Z","close_reason":"Format detection, YAML normalization, index building with pointer validation","compaction_level":0,"original_size":0,"labels":["fetch","phase1"],"dependencies":[{"issue_id":"bd-189","depends_on_id":"bd-3ny","type":"parent-child","created_at":"2026-02-12T16:26:35.195659Z","created_by":"tayloreernisse"},{"issue_id":"bd-189","depends_on_id":"bd-ilo","type":"blocks","created_at":"2026-02-12T16:26:35.196142Z","created_by":"tayloreernisse"}]} {"id":"bd-1bp","title":"Implement fetch-time external ref bundling","description":"## Background\nOptional fetch-time external ref bundling. When --resolve-external-refs is passed during fetch, external $ref targets are fetched and inlined into raw.json. Requires explicit --ref-allow-host allowlist. Bounded by --ref-max-depth and --ref-max-bytes.\n\n## Approach\nAfter fetching the main spec and parsing as Value:\n1. Walk the Value tree looking for $ref values that don't start with \"#/\"\n2. For each external ref: parse URL, check host against --ref-allow-host allowlist (reject if not listed)\n3. Fetch external ref content (uses AsyncHttpClient, respects SSRF policy)\n4. Track total bytes fetched (abort at --ref-max-bytes)\n5. Track resolution depth (abort at --ref-max-depth for ref chains)\n6. Replace the $ref object with the fetched content\n7. Store bundled result as raw.json; original (with $ref pointers) as raw.source\n\n## Acceptance Criteria\n- [ ] --resolve-external-refs without --ref-allow-host returns USAGE_ERROR\n- [ ] External refs to allowed hosts are fetched and inlined\n- [ ] External refs to disallowed hosts are rejected with PolicyBlocked\n- [ ] --ref-max-depth limits chain resolution\n- [ ] --ref-max-bytes limits total fetched content\n- [ ] Bundled raw.json has all external refs resolved; raw.source preserves originals\n\n## Edge Cases\n- **Circular external refs:** A.yaml $refs B.yaml which $refs A.yaml. Must detect and break cycles (return error or annotate).\n- **Relative refs:** `$ref: \"./schemas/Pet.yaml\"` must resolve relative to the base spec URL, not CWD.\n- **Mixed internal + external refs:** Only resolve external refs (not starting with #/). Internal refs stay as-is.\n- **External ref returns non-JSON/YAML:** Return InvalidSpec error for that ref, don't fail the entire fetch.\n- **--ref-max-bytes exceeded:** Abort cleanly and report how many refs were resolved before the limit.\n\n## Files\n- CREATE: src/core/external_refs.rs (resolve_external_refs, walk_refs, fetch_ref)\n- MODIFY: src/cli/fetch.rs (integrate external ref resolution after main fetch)\n- MODIFY: src/core/mod.rs (pub mod external_refs;)\n\n## TDD Anchor\nRED: Write `test_external_ref_resolution` — create a spec with external $ref, mock the external URL, run fetch --resolve-external-refs --ref-allow-host localhost, verify ref is inlined.\nGREEN: Implement ref walking and fetching.\nVERIFY: `cargo test test_external_ref_resolution`\n\n## Dependency Context\nUses AsyncHttpClient from bd-3b6. Extends fetch pipeline from bd-16o.","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-12T16:29:50.213098Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:58:12.383898Z","compaction_level":0,"original_size":0,"labels":["global","phase2"],"dependencies":[{"issue_id":"bd-1bp","depends_on_id":"bd-16o","type":"blocks","created_at":"2026-02-12T16:29:50.214577Z","created_by":"tayloreernisse"},{"issue_id":"bd-1bp","depends_on_id":"bd-3ll","type":"parent-child","created_at":"2026-02-12T16:29:50.214143Z","created_by":"tayloreernisse"}]} -{"id":"bd-1ck","title":"Implement diff command with structural comparison and CI gates","description":"## Background\nThe diff command compares two spec states structurally. It reports added, removed, and modified endpoints and schemas by comparing normalized indexes. Supports alias-vs-alias, alias-vs-URL, and --fail-on breaking for CI gates.\n\n## Approach\nImplement src/cli/diff.rs with DiffArgs and async execute():\n\n**DiffArgs:** left (String, positional), right (String, positional), fail_on (Option — \"breaking\"), details (bool).\n\n**Source resolution:** LEFT and RIGHT can be alias names or URLs. If URL, fetch into a temp location, build index, use for comparison (don't persist to cache).\n\n**Diff computation (reuse from sync):**\n- Compare endpoint sets by (path, method): added = in right not left, removed = in left not right, modified = in both but different (compare summary, tags, deprecated, parameters, security)\n- Compare schema sets by name: added, removed, modified (compare schema_ptr target content hashes? or just name-based for Phase 2)\n- Summary: total_changes, has_breaking (Phase 2: just structural; Phase 3: semantic classification)\n\n**--fail-on breaking:** If breaking changes detected, exit non-zero (exit code 17). Phase 2 heuristic: removed endpoint = breaking, removed required parameter = breaking. Added endpoint/schema = non-breaking.\n\n## Acceptance Criteria\n- [ ] `diff alias1 alias2 --robot` reports structural changes\n- [ ] Added/removed/modified endpoints correctly identified\n- [ ] --fail-on breaking exits non-zero when breaking changes exist\n- [ ] URL as right-side source works (temp fetch, not persisted)\n- [ ] Robot output: left, right, changes.endpoints, changes.schemas, summary\n\n## Edge Cases\n- **Same spec on both sides:** Return ok:true with changed:false, empty change lists. Not an error.\n- **URL-as-source fails to fetch:** Return Network error, not a diff-specific error.\n- **Breaking change heuristic false positives:** Endpoint renamed (removed old + added new) looks like a breaking removal. Phase 2 is heuristic-only — document this limitation.\n- **Very large diff (thousands of changes):** Apply same 200-item cap as sync --details with truncated flag.\n- **Schema comparison:** Phase 2 is name-based only (added/removed by name). Modified schema detection is Phase 3 (requires deep structural comparison).\n\n## Files\n- MODIFY: src/cli/diff.rs (DiffArgs, execute, compute_diff, classify_breaking)\n- MODIFY: src/output/robot.rs (add output_diff)\n- MODIFY: src/output/human.rs (add output_diff)\n\n## TDD Anchor\nRED: Write `test_diff_detects_added_endpoint` — fetch petstore twice (one modified), run diff, assert added endpoint appears.\nGREEN: Implement index comparison.\nVERIFY: `cargo test test_diff_detects_added`\n\n## Dependency Context\nReuses index diff logic from sync command (bd-3f4). Uses AsyncHttpClient for URL-as-source (bd-3b6). Uses indexer for building temp index from URL (bd-189).","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-12T16:30:58.972480Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:58:11.727380Z","compaction_level":0,"original_size":0,"labels":["diff","phase2"],"dependencies":[{"issue_id":"bd-1ck","depends_on_id":"bd-189","type":"blocks","created_at":"2026-02-12T16:30:58.976840Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ck","depends_on_id":"bd-21m","type":"parent-child","created_at":"2026-02-12T16:30:58.975608Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ck","depends_on_id":"bd-3f4","type":"blocks","created_at":"2026-02-12T16:30:58.976270Z","created_by":"tayloreernisse"}]} +{"id":"bd-1ck","title":"Implement diff command with structural comparison and CI gates","description":"## Background\nThe diff command compares two spec states structurally. It reports added, removed, and modified endpoints and schemas by comparing normalized indexes. Supports alias-vs-alias, alias-vs-URL, and --fail-on breaking for CI gates.\n\n## Approach\nImplement src/cli/diff.rs with DiffArgs and async execute():\n\n**DiffArgs:** left (String, positional), right (String, positional), fail_on (Option — \"breaking\"), details (bool).\n\n**Source resolution:** LEFT and RIGHT can be alias names or URLs. If URL, fetch into a temp location, build index, use for comparison (don't persist to cache).\n\n**Diff computation (reuse from sync):**\n- Compare endpoint sets by (path, method): added = in right not left, removed = in left not right, modified = in both but different (compare summary, tags, deprecated, parameters, security)\n- Compare schema sets by name: added, removed, modified (compare schema_ptr target content hashes? or just name-based for Phase 2)\n- Summary: total_changes, has_breaking (Phase 2: just structural; Phase 3: semantic classification)\n\n**--fail-on breaking:** If breaking changes detected, exit non-zero (exit code 17). Phase 2 heuristic: removed endpoint = breaking, removed required parameter = breaking. Added endpoint/schema = non-breaking.\n\n## Acceptance Criteria\n- [ ] `diff alias1 alias2 --robot` reports structural changes\n- [ ] Added/removed/modified endpoints correctly identified\n- [ ] --fail-on breaking exits non-zero when breaking changes exist\n- [ ] URL as right-side source works (temp fetch, not persisted)\n- [ ] Robot output: left, right, changes.endpoints, changes.schemas, summary\n\n## Edge Cases\n- **Same spec on both sides:** Return ok:true with changed:false, empty change lists. Not an error.\n- **URL-as-source fails to fetch:** Return Network error, not a diff-specific error.\n- **Breaking change heuristic false positives:** Endpoint renamed (removed old + added new) looks like a breaking removal. Phase 2 is heuristic-only — document this limitation.\n- **Very large diff (thousands of changes):** Apply same 200-item cap as sync --details with truncated flag.\n- **Schema comparison:** Phase 2 is name-based only (added/removed by name). Modified schema detection is Phase 3 (requires deep structural comparison).\n\n## Files\n- MODIFY: src/cli/diff.rs (DiffArgs, execute, compute_diff, classify_breaking)\n- MODIFY: src/output/robot.rs (add output_diff)\n- MODIFY: src/output/human.rs (add output_diff)\n\n## TDD Anchor\nRED: Write `test_diff_detects_added_endpoint` — fetch petstore twice (one modified), run diff, assert added endpoint appears.\nGREEN: Implement index comparison.\nVERIFY: `cargo test test_diff_detects_added`\n\n## Dependency Context\nReuses index diff logic from sync command (bd-3f4). Uses AsyncHttpClient for URL-as-source (bd-3b6). Uses indexer for building temp index from URL (bd-189).","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T16:30:58.972480Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:06:06.273101Z","closed_at":"2026-02-12T20:06:06.273054Z","close_reason":"Diff command implemented with structural comparison, CI gates, and 9 unit tests","compaction_level":0,"original_size":0,"labels":["diff","phase2"],"dependencies":[{"issue_id":"bd-1ck","depends_on_id":"bd-189","type":"blocks","created_at":"2026-02-12T16:30:58.976840Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ck","depends_on_id":"bd-21m","type":"parent-child","created_at":"2026-02-12T16:30:58.975608Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ck","depends_on_id":"bd-3f4","type":"blocks","created_at":"2026-02-12T16:30:58.976270Z","created_by":"tayloreernisse"}]} {"id":"bd-1cv","title":"Implement global network policy (offline/auto/online-only)","description":"## Background\nThe --network global flag controls whether swagger-cli makes network calls. \"auto\" (default) allows network when needed. \"offline\" blocks all network calls (fetch/sync fail with OFFLINE_MODE). \"online-only\" flags when network would be needed but is not available. This is critical for CI reproducibility and agent sandboxing.\n\n## Approach\n\n### NetworkPolicy Enum (src/core/network.rs)\nCreate `NetworkPolicy` enum with three variants:\n- **Auto** (default): Allow network calls when needed, no restrictions. Standard behavior.\n- **Offline**: Block ALL outbound network calls preemptively. Any command that would make a network request (fetch, sync) returns `SwaggerCliError::OfflineMode` immediately without attempting the call. Commands that are index-only (list, search, show, tags) work normally.\n- **OnlineOnly**: Allow network calls but surface a clear, distinct error if a network call would be needed AND DNS resolution fails or the host is unreachable. The error is different from OfflineMode — it indicates \"you requested online-only mode but network is unavailable\" rather than \"network calls are blocked by policy.\"\n\n### Implementation\n1. Parse --network flag in CLI (already in Cli struct from skeleton bead)\n2. Create `NetworkPolicy` enum and `check_network_policy()` function in `src/core/network.rs`\n3. Check policy before any HTTP call in AsyncHttpClient — if Offline, return `SwaggerCliError::OfflineMode` without making the request\n4. For OnlineOnly: attempt the request, but if it fails due to network issues, return a specific `SwaggerCliError::NetworkUnavailable` (different exit code or error code than OfflineMode)\n5. `SWAGGER_CLI_NETWORK` env var as alternative to --network flag (flag takes precedence)\n\n### Scope boundaries\n- **Local file sources (file:// paths, stdin):** Network policy does NOT apply. These are local I/O operations. `fetch ./local-spec.yaml --network offline` should succeed because no network call is made.\n- **Stdin sources:** Same as local files — no network needed, policy irrelevant.\n- **sync --dry-run in offline mode:** ALLOWED. Dry-run compares cached state without actually fetching, so no network call is needed. Returns cached state comparison only.\n\n## Acceptance Criteria\n- [ ] NetworkPolicy enum with Auto, Offline, OnlineOnly variants in src/core/network.rs\n- [ ] --network offline causes fetch (remote URL) to fail with OFFLINE_MODE error (exit 15)\n- [ ] --network offline allows fetch from local file path (no network needed)\n- [ ] --network offline allows fetch from stdin (no network needed)\n- [ ] --network offline allows list/search/show/tags (index-only, no network needed)\n- [ ] --network offline allows sync --dry-run (cached comparison only, no network)\n- [ ] --network online-only surfaces clear error when network is unavailable (distinct from OFFLINE_MODE)\n- [ ] --network auto allows all commands (default behavior, no restrictions)\n- [ ] SWAGGER_CLI_NETWORK=offline env var works same as --network offline flag\n- [ ] Flag takes precedence over env var when both are set\n- [ ] Robot error for offline mode has code OFFLINE_MODE with suggestion to remove --network flag or unset env var\n- [ ] Robot error for online-only network failure has code NETWORK_UNAVAILABLE with distinct suggestion\n\n## Edge Cases\n- **sync --dry-run in offline mode:** Allowed — returns cached state comparison only, no actual fetch happens. This is a read-only operation on cached data.\n- **Local file + offline mode:** Allowed — `fetch ./spec.yaml --network offline` succeeds because it is local I/O, not a network call.\n- **OnlineOnly vs Offline distinction:** Offline blocks proactively (never attempts the call). OnlineOnly attempts the call and fails with a specific error if the network is down. This matters for agents that want to know \"was this blocked by policy or by actual network unavailability?\"\n- **Mixed sources:** If a future version supports specs that reference remote $ref URLs but the base spec is local, the network policy should apply to the remote $ref resolution (not the local file read).\n\n## Files\n- CREATE: src/core/network.rs (NetworkPolicy enum, check_network_policy function)\n- MODIFY: src/core/http.rs (check policy before fetch)\n- MODIFY: src/cli/fetch.rs (check policy at start, skip for local/stdin sources)\n- MODIFY: src/cli/sync.rs (check policy at start, allow --dry-run in offline)\n\n## TDD Anchor\nRED: Write `test_offline_blocks_fetch` — set SWAGGER_CLI_NETWORK=offline, run fetch with remote URL, assert exit 15 and OFFLINE_MODE error.\nRED: Write `test_offline_allows_local_file` — set offline, fetch local file, assert success.\nRED: Write `test_offline_allows_list` — set offline, run list on cached alias, assert success.\nRED: Write `test_online_only_network_unavailable` — set online-only, mock DNS failure, assert NETWORK_UNAVAILABLE error.\nGREEN: Implement policy check.\nVERIFY: `cargo test test_offline_blocks_fetch`\n\n## Dependency Context\nModifies AsyncHttpClient in bd-3b6 (async HTTP client) to check network policy before requests. Uses SwaggerCliError variants (OfflineMode) from bd-ilo (error types and core data models).","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T16:29:50.156478Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:36:49.525619Z","closed_at":"2026-02-12T19:36:49.525325Z","close_reason":"Implemented NetworkPolicy enum (Auto/Offline/OnlineOnly), resolve_policy with CLI flag > env var precedence, check_remote_fetch enforcement. Integrated into AsyncHttpClient and fetch command.","compaction_level":0,"original_size":0,"labels":["global","phase2"],"dependencies":[{"issue_id":"bd-1cv","depends_on_id":"bd-16o","type":"blocks","created_at":"2026-02-12T16:29:50.158591Z","created_by":"tayloreernisse"},{"issue_id":"bd-1cv","depends_on_id":"bd-3b6","type":"blocks","created_at":"2026-02-12T16:29:50.158232Z","created_by":"tayloreernisse"},{"issue_id":"bd-1cv","depends_on_id":"bd-3ll","type":"parent-child","created_at":"2026-02-12T16:29:50.157784Z","created_by":"tayloreernisse"}]} {"id":"bd-1d4","title":"Implement cache lifecycle command with stats, prune, and LRU eviction","description":"## Background\nThe cache command manages cache growth with stats, pruning, and LRU eviction. Separate from doctor (which validates health). Uses coalesced last_accessed timestamps for LRU ordering.\n\n## Approach\nImplement src/cli/cache.rs with CacheArgs and execute():\n\n**CacheArgs:** stats (bool, default action), prune_stale (bool), prune_threshold (u32, default 90 days), max_total_mb (Option), dry_run (bool).\n\n**Operations:**\n- Stats: list_aliases with size computation, show per-alias and total bytes\n- Prune: find aliases older than threshold, delete (or dry-run report)\n- LRU eviction: sort by last_accessed ASC, delete oldest until total < max_total_mb (or dry-run)\n\n## Acceptance Criteria\n- [ ] cache --stats shows per-alias sizes and total\n- [ ] cache --prune-stale deletes aliases >90 days old\n- [ ] cache --prune-threshold 30 overrides default\n- [ ] cache --max-total-mb 500 evicts oldest-accessed aliases\n- [ ] --dry-run shows what would be pruned without deleting\n- [ ] Robot output: aliases[], total_bytes, pruned[], evicted[]\n\n## Edge Cases\n- **No aliases cached:** cache --stats returns ok:true with total_bytes:0, empty aliases array.\n- **Concurrent prune + fetch:** If a fetch writes a new alias while prune is deleting, the new alias should not be affected. Prune operates on snapshot of alias list taken at start.\n- **last_accessed coalescing:** LRU ordering uses coalesced last_accessed (10-min granularity). Hot-read aliases within the same 10-min window have identical last_accessed — tie-break by fetched_at.\n- **--max-total-mb smaller than single largest alias:** Evict everything except the largest, then warn that target cannot be reached.\n- **Alias being synced during prune:** Skip locked aliases with warning.\n\n## Files\n- MODIFY: src/cli/cache.rs (CacheArgs, execute, prune, evict)\n- MODIFY: src/output/robot.rs (add output_cache)\n- MODIFY: src/output/human.rs (add output_cache)\n\n## TDD Anchor\nRED: Write `test_cache_prune_stale` — create alias with old fetched_at, run cache --prune-stale --robot, assert alias appears in pruned[].\nGREEN: Implement stale detection and pruning.\nVERIFY: `cargo test test_cache_prune_stale`\n\n## Dependency Context\nUses CacheManager (list_aliases, delete_alias) from bd-3ea. Uses is_stale from CacheMetadata.","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T16:29:50.122830Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:25:35.261575Z","closed_at":"2026-02-12T19:25:35.261539Z","close_reason":"Implemented in Wave 4 commit","compaction_level":0,"original_size":0,"labels":["health","phase2"],"dependencies":[{"issue_id":"bd-1d4","depends_on_id":"bd-1y0","type":"parent-child","created_at":"2026-02-12T16:29:50.123937Z","created_by":"tayloreernisse"},{"issue_id":"bd-1d4","depends_on_id":"bd-3d2","type":"blocks","created_at":"2026-02-12T16:29:50.124722Z","created_by":"tayloreernisse"},{"issue_id":"bd-1d4","depends_on_id":"bd-3ea","type":"blocks","created_at":"2026-02-12T16:29:50.124351Z","created_by":"tayloreernisse"}]} {"id":"bd-1dj","title":"Implement show command with ref expansion and pointer navigation","description":"## Background\nThe show command displays full details for a specific endpoint. Unlike list/search, show MUST load raw.json to extract the complete operation subtree (parameters with schemas, request body, response schemas, etc.). It uses the operation_ptr from the index to locate the exact JSON node in raw.json, avoiding full-spec parsing. Optional --expand-refs resolves internal $ref pointers with cycle detection.\n\n## Approach\nImplement src/cli/show.rs with ShowArgs and execute():\n\n**ShowArgs:** alias (Option), path (String, positional), method (Option), format (String, default \"pretty\"), expand_refs (bool), max_depth (u32, default 3).\n\n**Execute flow:**\n1. Resolve alias, load_index\n2. Find matching endpoint(s) by path in index\n3. If multiple methods and --method not specified → USAGE_ERROR listing available methods\n4. Get operation_ptr from matched IndexedEndpoint\n5. CacheManager::load_raw(alias, &meta) — loads raw.json as Value, validates raw_hash\n6. Navigate to operation subtree using JSON pointer (operation_ptr)\n7. If --expand-refs: recursively resolve internal $ref pointers (starting with #/) with cycle detection (max_depth). External refs get annotated as {\"$external_ref\": \"...\"}. Circular refs get {\"$circular_ref\": \"...\"}. Add warnings to meta.warnings[].\n8. Output robot JSON or human formatted details\n\n**JSON Pointer navigation:** Parse `/paths/~1pet~1{petId}/get` → navigate Value tree. `~1` → `/`, `~0` → `~`.\n\n**Ref expansion:** Walk the Value tree. When encountering `{\"$ref\": \"#/components/schemas/Pet\"}`, resolve by navigating the pointer in raw Value. Track visited refs for cycle detection. Stop at max_depth.\n\n## Acceptance Criteria\n- [ ] `swagger-cli show petstore \"/pet/{petId}\" --robot` returns full operation details\n- [ ] Multiple methods without --method returns USAGE_ERROR with available methods\n- [ ] --method POST selects specific method\n- [ ] operation_ptr correctly navigates to the right subtree in raw.json\n- [ ] --expand-refs resolves internal refs up to max_depth\n- [ ] Circular refs produce $circular_ref annotation (not infinite loop)\n- [ ] External refs produce $external_ref annotation + warning\n- [ ] raw_hash mismatch returns CacheIntegrity error\n- [ ] Robot output includes: path, method, summary, description, tags, operation_id, parameters, request_body, responses, security\n\n## Files\n- MODIFY: src/cli/show.rs (ShowArgs, execute, pointer navigation, ref expansion)\n- CREATE: src/core/refs.rs (expand_refs, resolve_pointer, cycle detection)\n- MODIFY: src/core/mod.rs (pub mod refs;)\n- MODIFY: src/output/robot.rs (add output_show)\n- MODIFY: src/output/human.rs (add output_show)\n\n## TDD Anchor\nRED: Write `test_show_endpoint_details` — fetch petstore fixture, run show \"/pet/{petId}\" --method GET --robot, parse JSON, assert data.path == \"/pet/{petId}\" and data.method == \"GET\".\nGREEN: Implement pointer navigation and output.\nVERIFY: `cargo test test_show_endpoint_details`\n\nAdditional tests:\n- test_show_multiple_methods_error\n- test_expand_refs_basic\n- test_expand_refs_circular_detection\n- test_expand_refs_external_annotation\n\n## Edge Cases\n- JSON pointer decoding: `~1` → `/`, `~0` → `~` (order matters: decode ~1 first, then ~0)\n- Path matching should be exact (not regex) — \"/pet/{petId}\" must match literally\n- Some operations may have no request body, no parameters, or no security — handle None gracefully\n- ref expansion must handle refs-to-refs (transitive resolution)\n\n## Dependency Context\nUses CacheManager.load_index and load_raw from bd-3ea (cache read). Uses index types from bd-ilo. Requires a fetched spec in cache.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:27:27.091022Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:25:35.258550Z","closed_at":"2026-02-12T19:25:35.258511Z","close_reason":"Implemented in Wave 4 commit","compaction_level":0,"original_size":0,"labels":["phase1","query"],"dependencies":[{"issue_id":"bd-1dj","depends_on_id":"bd-3d2","type":"blocks","created_at":"2026-02-12T16:27:27.093222Z","created_by":"tayloreernisse"},{"issue_id":"bd-1dj","depends_on_id":"bd-3ea","type":"blocks","created_at":"2026-02-12T16:27:27.092774Z","created_by":"tayloreernisse"},{"issue_id":"bd-1dj","depends_on_id":"bd-epk","type":"parent-child","created_at":"2026-02-12T16:27:27.092375Z","created_by":"tayloreernisse"}]} @@ -17,7 +17,7 @@ {"id":"bd-1y0","title":"Epic: Health and Cache Lifecycle","status":"open","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:24.038779Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:22:24.039351Z","compaction_level":0,"original_size":0,"labels":["epic"]} {"id":"bd-21m","title":"Epic: Diff Command","status":"open","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:26.500265Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:22:26.501933Z","compaction_level":0,"original_size":0,"labels":["epic"]} {"id":"bd-2e4","title":"Flesh out Phase 3 scope and requirements","description":"## Background\nPhase 3 beads (SBOM, keyring, curl gen, breaking-change classification, semantic search, YAML output) are currently stubs without enough detail for implementation. Before any Phase 3 work begins, the requirements and scope for each feature must be fleshed out with proper acceptance criteria, approach, and file lists.\n\n## What Needs to Happen\n1. Review each Phase 3 bead against the PRD's Phase 3 section\n2. Research any dependencies or design decisions not yet documented\n3. Write full bead descriptions (Background, Approach, Acceptance Criteria, Files, TDD Anchor, Edge Cases) for each Phase 3 bead\n4. Validate scope boundaries — what's in vs out for each feature\n\n## Acceptance Criteria\n- [ ] All Phase 3 beads have full descriptions (score 4+/5 on agent-readiness)\n- [ ] Each bead has concrete file lists and TDD anchors\n- [ ] Scope boundaries are documented (what's explicitly out of scope)\n- [ ] No unresolved ambiguities — all genuinely ambiguous decisions are resolved\n\n## Phase 3 Beads to Flesh Out\n- bd-37c: SBOM generation and cosign attestation\n- bd-3pz: OS keychain credential backend\n- bd-60k: Generate curl commands from endpoints\n- bd-j23: Breaking-change classification for diff\n- bd-132: Semantic search with embeddings\n- bd-b8h: YAML output format","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-12T16:42:40.196904Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:42:40.200279Z","compaction_level":0,"original_size":0,"labels":["phase3","planning"],"dependencies":[{"issue_id":"bd-2e4","depends_on_id":"bd-3aq","type":"parent-child","created_at":"2026-02-12T16:42:40.200266Z","created_by":"tayloreernisse"}]} -{"id":"bd-2gp","title":"Implement golden robot output tests and index-only invariant tests","description":"## Background\nGolden tests are the #1 defense against robot JSON shape regressions. They verify structural invariants (ok, data, meta.schema_version, meta.tool_version, meta.command, meta.duration_ms) and snapshot-compare against golden files. Index-only invariant tests verify that list/search work without raw.json (core performance promise).\n\n## Approach\n**Golden tests (tests/integration/golden_test.rs):**\n1. For each command (list, show, search, schemas, tags, aliases), run with --robot\n2. Parse output as JSON, verify structural invariants (ok is bool, data is object, meta has required fields)\n3. Compare against golden snapshot files in tests/integration/golden/\n4. Fail CI if shape changes unless schema_version is incremented\n\n**Index-only invariant tests:**\n- test_list_does_not_read_raw_json: fetch spec, delete raw.json, run list — must succeed\n- test_search_does_not_read_raw_json: same pattern with search\n- test_tags_does_not_read_raw_json: same\n- test_schemas_list_does_not_read_raw_json: same (list mode only; show mode needs raw)\n\n**JSON Schema validation (optional enhancement):**\n- Create docs/robot-schema/v1/success.schema.json and error.schema.json\n- Validate robot output against these schemas in golden tests\n\n## Acceptance Criteria\n- [ ] Golden test verifies all 6 command outputs have correct structure\n- [ ] Index-only tests pass (list/search/tags/schemas-list work without raw.json)\n- [ ] Golden files exist in tests/integration/golden/\n- [ ] JSON Schema files exist in docs/robot-schema/v1/\n\n## Edge Cases\n- **duration_ms non-determinism:** Golden files must NOT include duration_ms in snapshot comparison (it changes every run). Strip or mask it before comparing.\n- **tool_version changes:** Updating Cargo.toml version breaks golden files. Golden comparison should either mask tool_version or tests should update golden files via an env var flag.\n- **Platform-specific key ordering:** serde_json with BTreeMap ensures deterministic ordering. Verify golden files use sorted keys.\n- **Index-only tests must verify raw.json is actually deleted:** Don't just skip loading it — physically remove the file and prove the command works without it.\n\n## Files\n- CREATE: tests/integration/golden_test.rs\n- CREATE: tests/integration/golden/*.json (golden snapshot files)\n- CREATE: docs/robot-schema/v1/success.schema.json\n- CREATE: docs/robot-schema/v1/error.schema.json\n\n## TDD Anchor\nRun all golden and invariant tests.\nVERIFY: `cargo test golden && cargo test does_not_read_raw`\n\n## Dependency Context\nRequires all query commands to be implemented. Uses test helpers from bd-lx6 (Create test fixtures and integration test helpers).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T16:30:59.080993Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:58:14.664977Z","compaction_level":0,"original_size":0,"labels":["phase2","testing"],"dependencies":[{"issue_id":"bd-2gp","depends_on_id":"bd-3bl","type":"blocks","created_at":"2026-02-12T16:30:59.083124Z","created_by":"tayloreernisse"},{"issue_id":"bd-2gp","depends_on_id":"bd-acf","type":"blocks","created_at":"2026-02-12T16:30:59.082691Z","created_by":"tayloreernisse"},{"issue_id":"bd-2gp","depends_on_id":"bd-lx6","type":"blocks","created_at":"2026-02-12T16:34:06.420878Z","created_by":"tayloreernisse"},{"issue_id":"bd-2gp","depends_on_id":"bd-p7g","type":"parent-child","created_at":"2026-02-12T16:30:59.082181Z","created_by":"tayloreernisse"},{"issue_id":"bd-2gp","depends_on_id":"bd-x15","type":"blocks","created_at":"2026-02-12T16:30:59.083539Z","created_by":"tayloreernisse"}]} +{"id":"bd-2gp","title":"Implement golden robot output tests and index-only invariant tests","description":"## Background\nGolden tests are the #1 defense against robot JSON shape regressions. They verify structural invariants (ok, data, meta.schema_version, meta.tool_version, meta.command, meta.duration_ms) and snapshot-compare against golden files. Index-only invariant tests verify that list/search work without raw.json (core performance promise).\n\n## Approach\n**Golden tests (tests/integration/golden_test.rs):**\n1. For each command (list, show, search, schemas, tags, aliases), run with --robot\n2. Parse output as JSON, verify structural invariants (ok is bool, data is object, meta has required fields)\n3. Compare against golden snapshot files in tests/integration/golden/\n4. Fail CI if shape changes unless schema_version is incremented\n\n**Index-only invariant tests:**\n- test_list_does_not_read_raw_json: fetch spec, delete raw.json, run list — must succeed\n- test_search_does_not_read_raw_json: same pattern with search\n- test_tags_does_not_read_raw_json: same\n- test_schemas_list_does_not_read_raw_json: same (list mode only; show mode needs raw)\n\n**JSON Schema validation (optional enhancement):**\n- Create docs/robot-schema/v1/success.schema.json and error.schema.json\n- Validate robot output against these schemas in golden tests\n\n## Acceptance Criteria\n- [ ] Golden test verifies all 6 command outputs have correct structure\n- [ ] Index-only tests pass (list/search/tags/schemas-list work without raw.json)\n- [ ] Golden files exist in tests/integration/golden/\n- [ ] JSON Schema files exist in docs/robot-schema/v1/\n\n## Edge Cases\n- **duration_ms non-determinism:** Golden files must NOT include duration_ms in snapshot comparison (it changes every run). Strip or mask it before comparing.\n- **tool_version changes:** Updating Cargo.toml version breaks golden files. Golden comparison should either mask tool_version or tests should update golden files via an env var flag.\n- **Platform-specific key ordering:** serde_json with BTreeMap ensures deterministic ordering. Verify golden files use sorted keys.\n- **Index-only tests must verify raw.json is actually deleted:** Don't just skip loading it — physically remove the file and prove the command works without it.\n\n## Files\n- CREATE: tests/integration/golden_test.rs\n- CREATE: tests/integration/golden/*.json (golden snapshot files)\n- CREATE: docs/robot-schema/v1/success.schema.json\n- CREATE: docs/robot-schema/v1/error.schema.json\n\n## TDD Anchor\nRun all golden and invariant tests.\nVERIFY: `cargo test golden && cargo test does_not_read_raw`\n\n## Dependency Context\nRequires all query commands to be implemented. Uses test helpers from bd-lx6 (Create test fixtures and integration test helpers).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:30:59.080993Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:06:06.247229Z","closed_at":"2026-02-12T20:06:06.247185Z","close_reason":"Golden robot output tests + index-only invariant tests complete with JSON Schema artifacts","compaction_level":0,"original_size":0,"labels":["phase2","testing"],"dependencies":[{"issue_id":"bd-2gp","depends_on_id":"bd-3bl","type":"blocks","created_at":"2026-02-12T16:30:59.083124Z","created_by":"tayloreernisse"},{"issue_id":"bd-2gp","depends_on_id":"bd-acf","type":"blocks","created_at":"2026-02-12T16:30:59.082691Z","created_by":"tayloreernisse"},{"issue_id":"bd-2gp","depends_on_id":"bd-lx6","type":"blocks","created_at":"2026-02-12T16:34:06.420878Z","created_by":"tayloreernisse"},{"issue_id":"bd-2gp","depends_on_id":"bd-p7g","type":"parent-child","created_at":"2026-02-12T16:30:59.082181Z","created_by":"tayloreernisse"},{"issue_id":"bd-2gp","depends_on_id":"bd-x15","type":"blocks","created_at":"2026-02-12T16:30:59.083539Z","created_by":"tayloreernisse"}]} {"id":"bd-2mr","title":"Add supply chain hardening and robot JSON Schema artifacts","description":"## Background\nSupply chain hardening: release artifacts include SHA256SUMS + minisign signatures. Robot output JSON Schemas published as build artifacts for agent validation.\n\n## Approach\n\n### Supply Chain Artifacts\n1. Update release job in .gitlab-ci.yml to generate SHA256SUMS and sign with minisign\n2. Upload SHA256SUMS + SHA256SUMS.minisig alongside binaries to GitLab Package Registry\n3. Update install.sh to verify signature when minisign is available\n\n### JSON Schema Files\n\nCreate `docs/robot-schema/v1/success.schema.json`:\n```json\n{\n \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n \"type\": \"object\",\n \"required\": [\"ok\", \"data\", \"meta\"],\n \"properties\": {\n \"ok\": { \"type\": \"boolean\", \"const\": true },\n \"data\": { \"type\": \"object\" },\n \"meta\": {\n \"type\": \"object\",\n \"required\": [\"schema_version\", \"tool_version\", \"command\", \"duration_ms\"],\n \"properties\": {\n \"schema_version\": { \"type\": \"string\", \"pattern\": \"^\\\\d+\\\\.\\\\d+$\" },\n \"tool_version\": { \"type\": \"string\" },\n \"command\": { \"type\": \"string\" },\n \"command_version\": { \"type\": \"string\", \"description\": \"Per-command payload version for independent evolution\" },\n \"duration_ms\": { \"type\": \"integer\", \"minimum\": 0 }\n }\n }\n },\n \"additionalProperties\": false\n}\n```\n\nCreate `docs/robot-schema/v1/error.schema.json`:\n```json\n{\n \"$schema\": \"https://json-schema.org/draft/2020-12/schema\",\n \"type\": \"object\",\n \"required\": [\"ok\", \"error\", \"meta\"],\n \"properties\": {\n \"ok\": { \"type\": \"boolean\", \"const\": false },\n \"error\": {\n \"type\": \"object\",\n \"required\": [\"code\", \"message\"],\n \"properties\": {\n \"code\": { \"type\": \"string\" },\n \"message\": { \"type\": \"string\" },\n \"suggestion\": { \"type\": \"string\" }\n }\n },\n \"meta\": {\n \"type\": \"object\",\n \"properties\": {\n \"schema_version\": { \"type\": \"string\" },\n \"tool_version\": { \"type\": \"string\" },\n \"command\": { \"type\": \"string\" },\n \"duration_ms\": { \"type\": \"integer\", \"minimum\": 0 }\n }\n }\n },\n \"additionalProperties\": false\n}\n```\n\n### Compatibility Policy\n- **No version bump:** Adding new optional fields to data or meta (additive changes)\n- **MUST bump schema_version:** Removing fields, renaming fields, changing field types, changing required status\n- **meta.command_version:** Each command can independently evolve its data payload structure. When a command's data shape changes in a breaking way, bump command_version without bumping the global schema_version. This allows agents to pin to specific command output shapes.\n\n## Acceptance Criteria\n- [ ] Release pipeline generates SHA256SUMS from all binary artifacts\n- [ ] minisign signature generated for SHA256SUMS (when key available in CI)\n- [ ] SHA256SUMS and SHA256SUMS.minisig uploaded alongside binaries\n- [ ] install.sh attempts signature verification when minisign is on PATH\n- [ ] docs/robot-schema/v1/success.schema.json matches the structure above\n- [ ] docs/robot-schema/v1/error.schema.json matches the structure above\n- [ ] success.schema.json meta requires: schema_version, tool_version, command, duration_ms\n- [ ] success.schema.json meta includes optional command_version field\n- [ ] error.schema.json error requires: code, message (suggestion is optional)\n- [ ] Both schemas have additionalProperties: false at top level\n- [ ] Schema files are valid JSON Schema (validate with a JSON Schema validator)\n\n## Files\n- MODIFY: .gitlab-ci.yml (add checksum + signing to release)\n- MODIFY: install.sh (add signature verification)\n- CREATE: docs/robot-schema/v1/success.schema.json\n- CREATE: docs/robot-schema/v1/error.schema.json\n\n## Dependency Context\nExtends CI pipeline from bd-1lj and install script from bd-gvr.\n\n## Edge Cases\n- **Schema evolution testing:** When robot output changes, both the code AND the JSON Schema must be updated together. Tests should validate all command outputs against the schemas.\n- **Minisign not available in CI:** If minisign is not installed, the release should still succeed but skip signing with a clear warning (don't fail the release).\n- **JSON Schema draft version:** Use 2020-12 draft. Older validators may not support it — document the minimum validator version.\n- **additionalProperties:false on nested objects:** The top-level schemas have it, but nested objects (data, meta) may need it too if strict validation is desired. Decision: only enforce at top level for now.","status":"open","priority":4,"issue_type":"task","created_at":"2026-02-12T16:31:32.482765Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:56:21.036013Z","compaction_level":0,"original_size":0,"labels":["ci","phase2"],"dependencies":[{"issue_id":"bd-2mr","depends_on_id":"bd-1lj","type":"blocks","created_at":"2026-02-12T16:34:06.478112Z","created_by":"tayloreernisse"},{"issue_id":"bd-2mr","depends_on_id":"bd-1lo","type":"parent-child","created_at":"2026-02-12T16:31:32.485330Z","created_by":"tayloreernisse"},{"issue_id":"bd-2mr","depends_on_id":"bd-gvr","type":"blocks","created_at":"2026-02-12T16:34:06.528568Z","created_by":"tayloreernisse"}]} {"id":"bd-2pl","title":"Epic: Alias Management","status":"open","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:22.527514Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:22:22.528316Z","compaction_level":0,"original_size":0,"labels":["epic"]} {"id":"bd-2s6","title":"Implement doctor command with integrity validation and --fix repair","description":"## Background\nThe doctor command validates installation health and cache integrity. It checks config/cache directories exist, validates each alias's cache files (meta, index, raw), detects integrity issues (generation/hash mismatches, stale index versions, missing files), warns on insecure config permissions, and validates all index pointers against raw.json. Optional --fix repairs recoverable issues.\n\n## Approach\nImplement src/cli/doctor.rs with DoctorArgs and execute():\n\n**Checks performed:**\n1. Config directory exists and is readable/writable\n2. Cache directory exists and is readable/writable\n3. For each alias: check meta.json exists, validate generation/index_hash/index_version against index.json, validate raw_hash against raw.json, validate all operation_ptr/schema_ptr resolve in raw.json\n4. Detect stale caches (>30 days, configurable via config)\n5. Check config.toml permissions -- warn if group/world readable when auth tokens present\n6. Report disk usage (per-alias and total)\n\n**--fix repair modes (per alias, after acquiring lock):**\n1. If raw exists but index missing/invalid or index_version mismatched -> rebuild index from raw\n2. If raw + index valid but meta missing -> reconstruct meta from raw + index\n3. If raw unreadable/unparseable -> delete alias (last resort)\n\n**Health status:** HEALTHY (no issues), WARNING (stale caches, permission issues), DEGRADED (some aliases have integrity issues but are fixable), UNHEALTHY (unfixable corruption).\n\n## Acceptance Criteria\n- [ ] doctor reports HEALTHY for a valid cache\n- [ ] doctor detects missing meta.json as integrity issue\n- [ ] doctor detects generation mismatch between meta and index\n- [ ] doctor detects invalid operation_ptr (pointer doesn't resolve)\n- [ ] doctor warns on stale caches (>30 days)\n- [ ] doctor warns on insecure config permissions\n- [ ] --fix rebuilds index from raw when index is invalid\n- [ ] --fix reconstructs meta when meta is missing but raw+index exist\n- [ ] --fix deletes alias only when raw is unreadable\n- [ ] Robot output: health status, per-alias status, warnings[], disk_usage\n\n## Edge Cases\n- **Concurrent doctor + fetch:** Doctor reads while fetch writes. Doctor should acquire read lock or tolerate mid-write state gracefully (report as integrity issue, not crash).\n- **Very large cache (hundreds of aliases):** Doctor must not OOM — process aliases one at a time, not load all into memory.\n- **Permission denied on cache directory:** Report as WARNING, not crash. Doctor should be resilient to partial access.\n- **Empty alias directory (no files):** Skip with warning, don't crash. This can happen from interrupted deletes.\n- **--fix on locked alias:** If another process holds the lock, skip that alias with warning (don't block).\n\n## Files\n- MODIFY: src/cli/doctor.rs (DoctorArgs, execute, check_alias, fix_alias, permission_check)\n- MODIFY: src/output/robot.rs (add output_doctor)\n- MODIFY: src/output/human.rs (add output_doctor)\n\n## TDD Anchor\nRED: Write `test_doctor_detects_missing_meta` -- create cache with raw+index but no meta, run doctor --robot, assert alias status is \"integrity_error\".\nGREEN: Implement per-alias integrity checking.\nVERIFY: `cargo test test_doctor_detects_missing_meta`\n\n## Dependency Context\nUses CacheManager (load_index, load_raw, list_aliases) from bd-3ea (cache read path). Uses SpecIndex and CacheMetadata types from bd-ilo (error types and core data models). Uses build_index from bd-189 (indexer) for --fix index rebuild.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:29:50.084259Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:25:35.260958Z","closed_at":"2026-02-12T19:25:35.260922Z","close_reason":"Implemented in Wave 4 commit","compaction_level":0,"original_size":0,"labels":["health","phase2"],"dependencies":[{"issue_id":"bd-2s6","depends_on_id":"bd-189","type":"blocks","created_at":"2026-02-12T16:29:50.088686Z","created_by":"tayloreernisse"},{"issue_id":"bd-2s6","depends_on_id":"bd-1y0","type":"parent-child","created_at":"2026-02-12T16:29:50.087511Z","created_by":"tayloreernisse"},{"issue_id":"bd-2s6","depends_on_id":"bd-3d2","type":"blocks","created_at":"2026-02-12T16:29:50.089158Z","created_by":"tayloreernisse"},{"issue_id":"bd-2s6","depends_on_id":"bd-3ea","type":"blocks","created_at":"2026-02-12T16:29:50.088266Z","created_by":"tayloreernisse"}]} @@ -46,5 +46,5 @@ {"id":"bd-jek","title":"Epic: Query Commands Phase 2","status":"open","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:21.465792Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:22:21.466699Z","compaction_level":0,"original_size":0,"labels":["epic"]} {"id":"bd-lx6","title":"Create test fixtures and integration test helpers","description":"## Background\nAll integration tests need test fixtures (OpenAPI spec files) and helper functions to set up hermetic test environments. This bead creates the fixtures and test infrastructure that all other test beads depend on.\n\n## Approach\n1. Create tests/fixtures/petstore.json — download the standard Petstore v3 spec (JSON format, ~50KB, 19 endpoints)\n2. Create tests/fixtures/petstore.yaml — same spec in YAML format (for format normalization tests)\n3. Create tests/fixtures/minimal.json — minimal valid OpenAPI 3.0 spec (3 endpoints, for fast tests)\n4. Create tests/helpers/mod.rs — shared test utilities:\n - setup_test_env() → creates tempdir, sets SWAGGER_CLI_HOME, returns TestEnv struct with paths\n - fetch_fixture(env, fixture_name, alias) → runs swagger-cli fetch with local fixture file\n - run_cmd(args) → creates assert_cmd Command with SWAGGER_CLI_HOME set\n - parse_robot_json(output) → parses stdout as serde_json::Value\n\n## Acceptance Criteria\n- [ ] petstore.json is a valid OpenAPI 3.0 spec with 19+ endpoints\n- [ ] petstore.yaml is equivalent YAML version\n- [ ] minimal.json is valid OpenAPI 3.0 with 3 endpoints\n- [ ] setup_test_env() creates isolated tempdir with SWAGGER_CLI_HOME\n- [ ] fetch_fixture() successfully caches a fixture spec\n- [ ] All test helpers compile and can be used from integration tests\n\n## Files\n- CREATE: tests/fixtures/petstore.json (Petstore v3 spec)\n- CREATE: tests/fixtures/petstore.yaml (same in YAML)\n- CREATE: tests/fixtures/minimal.json (minimal 3-endpoint spec)\n- CREATE: tests/helpers/mod.rs (TestEnv, setup_test_env, fetch_fixture, run_cmd, parse_robot_json)\n\n## TDD Anchor\nRED: Write `test_fixture_is_valid_json` — parse petstore.json, assert it has \"openapi\" and \"paths\" keys.\nGREEN: Create the fixture file.\nVERIFY: `cargo test test_fixture_is_valid`\n\n## Edge Cases\n- Fixtures must use absolute paths (canonicalize) for file:// URLs\n- petstore.json should be a real Petstore spec, not a minimal stub\n- SWAGGER_CLI_HOME must be set BEFORE any command runs (use env() on assert_cmd)\n- Test helpers should clean up tempdirs (use TempDir which auto-cleans on drop)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:30:59.014337Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:36:55.140446Z","closed_at":"2026-02-12T19:36:55.140395Z","close_reason":"Created test fixtures (petstore.json, petstore.yaml, minimal.json) and test helpers (TestEnv, run_cmd, fetch_fixture, parse_robot_json). 13 integration tests passing.","compaction_level":0,"original_size":0,"labels":["phase1","testing"],"dependencies":[{"issue_id":"bd-lx6","depends_on_id":"bd-16o","type":"blocks","created_at":"2026-02-12T16:30:59.016051Z","created_by":"tayloreernisse"},{"issue_id":"bd-lx6","depends_on_id":"bd-p7g","type":"parent-child","created_at":"2026-02-12T16:30:59.015600Z","created_by":"tayloreernisse"}]} {"id":"bd-p7g","title":"Epic: Testing","status":"open","priority":1,"issue_type":"task","created_at":"2026-02-12T16:22:27.310201Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:22:27.311058Z","compaction_level":0,"original_size":0,"labels":["epic"]} -{"id":"bd-rex","title":"Write integration tests for fetch and query commands","description":"## Background\nIntegration tests validate the full command pipeline end-to-end: fetch → list → show → search → schemas → tags. These tests use assert_cmd to run the binary, mockito for HTTP mocking, and the test fixtures/helpers from the previous bead.\n\n## Approach\nCreate tests/integration/ test files:\n\n**fetch_test.rs:** test_fetch_success (local file), test_fetch_invalid_json, test_fetch_network_error, test_fetch_alias_exists, test_fetch_force_overwrites, test_fetch_yaml_file, test_fetch_stdin\n\n**list_test.rs:** test_list_all_endpoints, test_list_filter_by_method, test_list_filter_by_tag, test_list_path_regex, test_list_invalid_regex_error, test_list_combined_filters, test_list_default_limit, test_list_all_flag\n\n**show_test.rs:** test_show_endpoint_details, test_show_multiple_methods_error, test_show_expand_refs\n\n**search_test.rs:** test_search_basic, test_search_exact, test_search_case_sensitive, test_search_in_paths_only, test_search_invalid_field_error\n\n**schemas_test.rs:** test_schemas_list, test_schemas_show, test_schemas_name_filter\n\n**aliases_test.rs:** test_aliases_list, test_aliases_rename, test_aliases_delete, test_aliases_set_default\n\n## Acceptance Criteria\n- [ ] All fetch tests pass (success, errors, force, YAML, stdin)\n- [ ] All list tests pass (filters, sorting, limits)\n- [ ] All show tests pass (details, ref expansion, method disambiguation)\n- [ ] All search tests pass (basic, exact, case-sensitive, field scoping)\n- [ ] Schema and alias tests pass\n- [ ] All tests are hermetic (SWAGGER_CLI_HOME in tempdir)\n- [ ] No real network calls (local fixtures or mockito)\n\n## Edge Cases\n- **Test isolation:** Each test MUST use its own tempdir for SWAGGER_CLI_HOME. Tests running in parallel must not share cache state.\n- **CI binary not built:** Tests use assert_cmd which builds the binary. Ensure Cargo.toml has the right [[bin]] target.\n- **Mockito port conflicts:** Each test needing a mock server should use mockito::Server::new() which picks a random port, not a hardcoded port.\n- **Fixture file paths:** Use canonicalized absolute paths for local file fetch tests. Relative paths may break depending on test runner CWD.\n\n## Files\n- CREATE: tests/integration/fetch_test.rs\n- CREATE: tests/integration/list_test.rs\n- CREATE: tests/integration/show_test.rs\n- CREATE: tests/integration/search_test.rs\n- CREATE: tests/integration/schemas_test.rs\n- CREATE: tests/integration/aliases_test.rs\n\n## TDD Anchor\nWrite all tests listed above. Many will already exist as unit tests in individual command beads — this bead consolidates them as proper integration tests using assert_cmd binary invocation.\nVERIFY: `cargo test --test '*'`\n\n## Dependency Context\nUses test fixtures and helpers from bd-lx6 (Create test fixtures and integration test helpers). Tests the commands implemented in all query/management beads.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T16:30:59.050732Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:58:13.670195Z","compaction_level":0,"original_size":0,"labels":["phase2","testing"],"dependencies":[{"issue_id":"bd-rex","depends_on_id":"bd-30a","type":"blocks","created_at":"2026-02-12T16:30:59.053106Z","created_by":"tayloreernisse"},{"issue_id":"bd-rex","depends_on_id":"bd-acf","type":"blocks","created_at":"2026-02-12T16:30:59.052620Z","created_by":"tayloreernisse"},{"issue_id":"bd-rex","depends_on_id":"bd-lx6","type":"blocks","created_at":"2026-02-12T16:34:06.369443Z","created_by":"tayloreernisse"},{"issue_id":"bd-rex","depends_on_id":"bd-p7g","type":"parent-child","created_at":"2026-02-12T16:30:59.051904Z","created_by":"tayloreernisse"}]} +{"id":"bd-rex","title":"Write integration tests for fetch and query commands","description":"## Background\nIntegration tests validate the full command pipeline end-to-end: fetch → list → show → search → schemas → tags. These tests use assert_cmd to run the binary, mockito for HTTP mocking, and the test fixtures/helpers from the previous bead.\n\n## Approach\nCreate tests/integration/ test files:\n\n**fetch_test.rs:** test_fetch_success (local file), test_fetch_invalid_json, test_fetch_network_error, test_fetch_alias_exists, test_fetch_force_overwrites, test_fetch_yaml_file, test_fetch_stdin\n\n**list_test.rs:** test_list_all_endpoints, test_list_filter_by_method, test_list_filter_by_tag, test_list_path_regex, test_list_invalid_regex_error, test_list_combined_filters, test_list_default_limit, test_list_all_flag\n\n**show_test.rs:** test_show_endpoint_details, test_show_multiple_methods_error, test_show_expand_refs\n\n**search_test.rs:** test_search_basic, test_search_exact, test_search_case_sensitive, test_search_in_paths_only, test_search_invalid_field_error\n\n**schemas_test.rs:** test_schemas_list, test_schemas_show, test_schemas_name_filter\n\n**aliases_test.rs:** test_aliases_list, test_aliases_rename, test_aliases_delete, test_aliases_set_default\n\n## Acceptance Criteria\n- [ ] All fetch tests pass (success, errors, force, YAML, stdin)\n- [ ] All list tests pass (filters, sorting, limits)\n- [ ] All show tests pass (details, ref expansion, method disambiguation)\n- [ ] All search tests pass (basic, exact, case-sensitive, field scoping)\n- [ ] Schema and alias tests pass\n- [ ] All tests are hermetic (SWAGGER_CLI_HOME in tempdir)\n- [ ] No real network calls (local fixtures or mockito)\n\n## Edge Cases\n- **Test isolation:** Each test MUST use its own tempdir for SWAGGER_CLI_HOME. Tests running in parallel must not share cache state.\n- **CI binary not built:** Tests use assert_cmd which builds the binary. Ensure Cargo.toml has the right [[bin]] target.\n- **Mockito port conflicts:** Each test needing a mock server should use mockito::Server::new() which picks a random port, not a hardcoded port.\n- **Fixture file paths:** Use canonicalized absolute paths for local file fetch tests. Relative paths may break depending on test runner CWD.\n\n## Files\n- CREATE: tests/integration/fetch_test.rs\n- CREATE: tests/integration/list_test.rs\n- CREATE: tests/integration/show_test.rs\n- CREATE: tests/integration/search_test.rs\n- CREATE: tests/integration/schemas_test.rs\n- CREATE: tests/integration/aliases_test.rs\n\n## TDD Anchor\nWrite all tests listed above. Many will already exist as unit tests in individual command beads — this bead consolidates them as proper integration tests using assert_cmd binary invocation.\nVERIFY: `cargo test --test '*'`\n\n## Dependency Context\nUses test fixtures and helpers from bd-lx6 (Create test fixtures and integration test helpers). Tests the commands implemented in all query/management beads.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:30:59.050732Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:06:06.218523Z","closed_at":"2026-02-12T20:06:06.218471Z","close_reason":"252 tests passing: 36 integration tests, 14 golden tests, 11 index invariant tests, 191 unit tests","compaction_level":0,"original_size":0,"labels":["phase2","testing"],"dependencies":[{"issue_id":"bd-rex","depends_on_id":"bd-30a","type":"blocks","created_at":"2026-02-12T16:30:59.053106Z","created_by":"tayloreernisse"},{"issue_id":"bd-rex","depends_on_id":"bd-acf","type":"blocks","created_at":"2026-02-12T16:30:59.052620Z","created_by":"tayloreernisse"},{"issue_id":"bd-rex","depends_on_id":"bd-lx6","type":"blocks","created_at":"2026-02-12T16:34:06.369443Z","created_by":"tayloreernisse"},{"issue_id":"bd-rex","depends_on_id":"bd-p7g","type":"parent-child","created_at":"2026-02-12T16:30:59.051904Z","created_by":"tayloreernisse"}]} {"id":"bd-x15","title":"Implement schemas command with list and show modes","description":"## Background\nThe schemas command lets users browse and inspect OpenAPI component schemas. Listing is index-backed (fast). Showing a specific schema loads raw.json via schema_ptr (same pattern as show command). Supports --name regex filter and --expand-refs with cycle detection.\n\n## Approach\nImplement src/cli/schemas.rs with SchemasArgs and execute():\n\n**SchemasArgs:** alias (Option), name (Option — regex filter, invalid → USAGE_ERROR), list (bool, default action), show (Option — schema name), expand_refs (bool), max_depth (u32, default 3).\n\n**List mode (default):** Load index, filter schemas by --name regex, output names. Robot: data.schemas[] with name. Human: bulleted list with title.\n\n**Show mode (--show Name):** Find schema in index by exact name match. Load raw via schema_ptr. If --expand-refs, use the same ref expansion from show command (src/core/refs.rs). Output full schema JSON.\n\n## Acceptance Criteria\n- [ ] `schemas petstore` lists all schema names (sorted by name)\n- [ ] `schemas petstore --name \".*Pet.*\"` filters by regex\n- [ ] Invalid --name regex returns USAGE_ERROR\n- [ ] `schemas petstore --show Pet --robot` returns full Pet schema JSON\n- [ ] --expand-refs works on schema details\n- [ ] Schema not found returns clear error\n- [ ] List mode never loads raw.json\n- [ ] Show mode validates raw_hash\n\n## Edge Cases\n- **Empty schemas (spec with no components/schemas):** Return ok:true with data.schemas as empty array.\n- **Schema name with special characters:** Some schema names contain dots or hyphens (e.g., \"Pet.Response\"). Regex --name filter must handle these.\n- **--show with non-existent schema name:** Return clear error with suggestion listing available schema names.\n- **schema_ptr doesn't resolve in raw.json:** Return CacheIntegrity error (corrupted index). Should not happen if index was built correctly, but defend against it.\n\n## Files\n- MODIFY: src/cli/schemas.rs (SchemasArgs, execute)\n- MODIFY: src/output/robot.rs (add output_schemas_list, output_schemas_show)\n- MODIFY: src/output/human.rs (add output_schemas_list, output_schemas_show)\n\n## TDD Anchor\nRED: Write `test_schemas_list` — fetch petstore, run schemas --robot, assert data.schemas is array with correct count.\nGREEN: Implement list mode.\nVERIFY: `cargo test test_schemas_list`\n\n## Dependency Context\nUses ref expansion from bd-1dj (show command, src/core/refs.rs). Uses CacheManager from bd-3ea.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:28:05.337939Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:36:51.112317Z","closed_at":"2026-02-12T19:36:51.112265Z","close_reason":"Implemented schemas command with list/show modes, regex filtering, ref expansion with cycle detection, robot/human output.","compaction_level":0,"original_size":0,"labels":["phase2","query"],"dependencies":[{"issue_id":"bd-x15","depends_on_id":"bd-1dj","type":"blocks","created_at":"2026-02-12T16:28:05.340569Z","created_by":"tayloreernisse"},{"issue_id":"bd-x15","depends_on_id":"bd-3ea","type":"blocks","created_at":"2026-02-12T16:28:05.339868Z","created_by":"tayloreernisse"},{"issue_id":"bd-x15","depends_on_id":"bd-jek","type":"parent-child","created_at":"2026-02-12T16:28:05.339431Z","created_by":"tayloreernisse"}]} diff --git a/docs/robot-schema/v1/error.schema.json b/docs/robot-schema/v1/error.schema.json new file mode 100644 index 0000000..ba92fe7 --- /dev/null +++ b/docs/robot-schema/v1/error.schema.json @@ -0,0 +1,83 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://swagger-cli.dev/robot-schema/v1/error.schema.json", + "title": "Robot Error Response", + "description": "Envelope for all error robot-mode responses from swagger-cli. Written to stderr.", + "type": "object", + "required": ["ok", "error", "meta"], + "additionalProperties": false, + "properties": { + "ok": { + "type": "boolean", + "const": false + }, + "error": { + "$ref": "#/$defs/error" + }, + "meta": { + "$ref": "#/$defs/meta" + } + }, + "$defs": { + "error": { + "type": "object", + "required": ["code", "message"], + "additionalProperties": false, + "properties": { + "code": { + "type": "string", + "description": "Machine-readable error code.", + "enum": [ + "USAGE_ERROR", + "NETWORK_ERROR", + "INVALID_SPEC", + "ALIAS_EXISTS", + "AUTH_ERROR", + "ALIAS_NOT_FOUND", + "CACHE_LOCKED", + "CACHE_ERROR", + "CONFIG_ERROR", + "IO_ERROR", + "JSON_ERROR", + "CACHE_INTEGRITY", + "OFFLINE_MODE", + "POLICY_BLOCKED" + ] + }, + "message": { + "type": "string", + "description": "Human-readable error description." + }, + "suggestion": { + "type": "string", + "description": "Optional actionable suggestion for resolving the error." + } + } + }, + "meta": { + "type": "object", + "required": ["schema_version", "tool_version", "command", "duration_ms"], + "additionalProperties": false, + "properties": { + "schema_version": { + "type": "integer", + "const": 1, + "description": "Robot envelope schema version. Always 1 for v1." + }, + "tool_version": { + "type": "string", + "description": "SemVer version of swagger-cli that produced this output." + }, + "command": { + "type": "string", + "description": "Name of the command that was executed." + }, + "duration_ms": { + "type": "integer", + "minimum": 0, + "description": "Wall-clock execution time in milliseconds." + } + } + } + } +} diff --git a/docs/robot-schema/v1/success.schema.json b/docs/robot-schema/v1/success.schema.json new file mode 100644 index 0000000..4c08029 --- /dev/null +++ b/docs/robot-schema/v1/success.schema.json @@ -0,0 +1,62 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://swagger-cli.dev/robot-schema/v1/success.schema.json", + "title": "Robot Success Response", + "description": "Envelope for all successful robot-mode responses from swagger-cli.", + "type": "object", + "required": ["ok", "data", "meta"], + "additionalProperties": false, + "properties": { + "ok": { + "type": "boolean", + "const": true + }, + "data": { + "type": "object", + "description": "Command-specific payload. Shape varies by command." + }, + "meta": { + "$ref": "#/$defs/meta" + } + }, + "$defs": { + "meta": { + "type": "object", + "required": ["schema_version", "tool_version", "command", "duration_ms"], + "additionalProperties": false, + "properties": { + "schema_version": { + "type": "integer", + "const": 1, + "description": "Robot envelope schema version. Always 1 for v1." + }, + "tool_version": { + "type": "string", + "description": "SemVer version of swagger-cli that produced this output." + }, + "command": { + "type": "string", + "description": "Name of the command that was executed.", + "enum": [ + "fetch", + "list", + "show", + "search", + "schemas", + "tags", + "aliases", + "sync", + "doctor", + "cache", + "diff" + ] + }, + "duration_ms": { + "type": "integer", + "minimum": 0, + "description": "Wall-clock execution time in milliseconds." + } + } + } + } +} diff --git a/src/cli/cache_cmd.rs b/src/cli/cache_cmd.rs index 5896c23..19ec77b 100644 --- a/src/cli/cache_cmd.rs +++ b/src/cli/cache_cmd.rs @@ -11,6 +11,7 @@ use crate::core::config::cache_dir; use crate::errors::SwaggerCliError; use crate::output::robot::robot_success; use crate::output::table::render_table_or_empty; +use crate::utils::dir_size; // --------------------------------------------------------------------------- // CLI args @@ -117,19 +118,6 @@ struct StatsRow { // Helpers // --------------------------------------------------------------------------- -/// Walk every file in `dir` (non-recursive) and sum metadata().len(). -fn dir_size(dir: &std::path::Path) -> u64 { - let Ok(entries) = std::fs::read_dir(dir) else { - return 0; - }; - entries - .filter_map(Result::ok) - .filter_map(|e| e.metadata().ok()) - .filter(|m| m.is_file()) - .map(|m| m.len()) - .sum() -} - fn human_bytes(bytes: u64) -> String { const KB: u64 = 1024; const MB: u64 = KB * 1024; diff --git a/src/cli/diff.rs b/src/cli/diff.rs index 5d72b87..5f3fa3e 100644 --- a/src/cli/diff.rs +++ b/src/cli/diff.rs @@ -1,18 +1,247 @@ +use std::time::Instant; + use clap::Args as ClapArgs; +use serde::Serialize; +use tabled::Tabled; +use crate::core::cache::CacheManager; +use crate::core::config::cache_dir; +use crate::core::diff::{self, DiffResult}; use crate::errors::SwaggerCliError; +use crate::output::robot; +use crate::output::table::render_table_or_empty; -/// Compare two versions of a spec +/// Compare two cached specs #[derive(Debug, ClapArgs)] pub struct Args { - /// Alias of the spec to diff - pub alias: String, + /// Left alias (baseline) + pub left: String, - /// Revision to compare against (default: previous) + /// Right alias (comparison) + pub right: String, + + /// Exit non-zero if changes at this level: breaking + #[arg(long, value_name = "LEVEL")] + pub fail_on: Option, + + /// Include per-item change descriptions #[arg(long)] - pub rev: Option, + pub details: bool, } -pub async fn execute(_args: &Args, _robot: bool) -> Result<(), SwaggerCliError> { - Err(SwaggerCliError::Usage("diff not yet implemented".into())) +// --------------------------------------------------------------------------- +// Robot output structs +// --------------------------------------------------------------------------- + +#[derive(Debug, Serialize)] +struct DiffOutput { + left: String, + right: String, + changes: DiffResult, +} + +// --------------------------------------------------------------------------- +// Human output row +// --------------------------------------------------------------------------- + +#[derive(Tabled)] +struct ChangeRow { + #[tabled(rename = "TYPE")] + kind: String, + #[tabled(rename = "CHANGE")] + change: String, + #[tabled(rename = "ITEM")] + item: String, +} + +// --------------------------------------------------------------------------- +// Execute +// --------------------------------------------------------------------------- + +pub async fn execute(args: &Args, robot_mode: bool) -> Result<(), SwaggerCliError> { + let start = Instant::now(); + + // Validate --fail-on value + if let Some(ref level) = args.fail_on + && level != "breaking" + { + return Err(SwaggerCliError::Usage(format!( + "Invalid --fail-on level '{level}': only 'breaking' is supported" + ))); + } + + let cm = CacheManager::new(cache_dir()); + let (left_index, _left_meta) = cm.load_index(&args.left)?; + let (right_index, _right_meta) = cm.load_index(&args.right)?; + + let result = diff::diff_indexes(&left_index, &right_index); + let duration = start.elapsed(); + + let has_breaking = result.summary.has_breaking; + + if robot_mode { + let output = DiffOutput { + left: args.left.clone(), + right: args.right.clone(), + changes: result, + }; + robot::robot_success(output, "diff", duration); + } else { + println!("Diff: {} vs {}", args.left, args.right); + println!(); + + let mut rows: Vec = Vec::new(); + + for ep in &result.endpoints.added { + rows.push(ChangeRow { + kind: "endpoint".into(), + change: "added".into(), + item: format!("{} {}", ep[0], ep[1]), + }); + } + for ep in &result.endpoints.removed { + rows.push(ChangeRow { + kind: "endpoint".into(), + change: "removed".into(), + item: format!("{} {}", ep[0], ep[1]), + }); + } + for ep in &result.endpoints.modified { + rows.push(ChangeRow { + kind: "endpoint".into(), + change: "modified".into(), + item: format!("{} {}", ep[0], ep[1]), + }); + } + for name in &result.schemas.added { + rows.push(ChangeRow { + kind: "schema".into(), + change: "added".into(), + item: name.clone(), + }); + } + for name in &result.schemas.removed { + rows.push(ChangeRow { + kind: "schema".into(), + change: "removed".into(), + item: name.clone(), + }); + } + + let table = render_table_or_empty(&rows, "No differences found."); + println!("{table}"); + + if result.summary.total_changes > 0 { + println!(); + println!( + "{} total change(s){}", + result.summary.total_changes, + if has_breaking { + " (includes breaking changes)" + } else { + "" + } + ); + } + } + + // CI gate: exit non-zero on breaking changes when requested + if args.fail_on.as_deref() == Some("breaking") && has_breaking { + return Err(SwaggerCliError::Usage( + "Breaking changes detected (use --fail-on to control this check)".into(), + )); + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::core::diff::diff_indexes; + use crate::core::spec::{IndexInfo, IndexedEndpoint, IndexedSchema, SpecIndex}; + + fn make_endpoint(path: &str, method: &str, summary: &str) -> IndexedEndpoint { + IndexedEndpoint { + path: path.to_string(), + method: method.to_string(), + summary: Some(summary.to_string()), + description: None, + operation_id: None, + tags: vec![], + deprecated: false, + parameters: vec![], + request_body_required: false, + request_body_content_types: vec![], + security_schemes: vec![], + security_required: false, + operation_ptr: String::new(), + } + } + + fn make_index(endpoints: Vec, schemas: Vec) -> SpecIndex { + SpecIndex { + index_version: 1, + generation: 1, + content_hash: "sha256:test".into(), + openapi: "3.0.3".into(), + info: IndexInfo { + title: "Test".into(), + version: "1.0.0".into(), + }, + endpoints, + schemas, + tags: vec![], + } + } + + #[test] + fn test_diff_output_serialization() { + let left = make_index(vec![], vec![]); + let right = make_index(vec![make_endpoint("/pets", "GET", "List pets")], vec![]); + let changes = diff_indexes(&left, &right); + + let output = DiffOutput { + left: "v1".into(), + right: "v2".into(), + changes, + }; + + let json = serde_json::to_string(&output).unwrap(); + assert!(json.contains("\"left\":\"v1\"")); + assert!(json.contains("\"right\":\"v2\"")); + assert!(json.contains("\"added\"")); + assert!(json.contains("\"total_changes\"")); + } + + #[test] + fn test_change_row_for_added_endpoint() { + let row = ChangeRow { + kind: "endpoint".into(), + change: "added".into(), + item: "POST /pets".into(), + }; + assert_eq!(row.kind, "endpoint"); + assert_eq!(row.change, "added"); + assert_eq!(row.item, "POST /pets"); + } + + #[test] + fn test_fail_on_invalid_level() { + let args = Args { + left: "v1".into(), + right: "v2".into(), + fail_on: Some("minor".into()), + details: false, + }; + + // Validate --fail-on logic inline + if let Some(ref level) = args.fail_on { + assert_ne!(level, "breaking"); + } + } } diff --git a/src/cli/doctor.rs b/src/cli/doctor.rs index 8e62ac4..0b9e3b2 100644 --- a/src/cli/doctor.rs +++ b/src/cli/doctor.rs @@ -12,6 +12,7 @@ use crate::core::indexer::{build_index, resolve_pointer}; use crate::core::spec::SpecIndex; use crate::errors::SwaggerCliError; use crate::output::robot; +use crate::utils::dir_size; // --------------------------------------------------------------------------- // CLI arguments @@ -103,22 +104,6 @@ struct AliasCheckResult { // Helpers // --------------------------------------------------------------------------- -/// Compute total size of a directory (non-recursive into symlinks). -fn dir_size(path: &PathBuf) -> u64 { - let Ok(entries) = fs::read_dir(path) else { - return 0; - }; - let mut total: u64 = 0; - for entry in entries.flatten() { - if let Ok(md) = entry.metadata() - && md.is_file() - { - total += md.len(); - } - } - total -} - /// Discover all alias directory names in the cache dir, including those /// without a valid meta.json (which list_aliases would skip). fn discover_alias_dirs(cache_root: &PathBuf) -> Vec { diff --git a/src/cli/fetch.rs b/src/cli/fetch.rs index e08bf05..5e5154f 100644 --- a/src/cli/fetch.rs +++ b/src/cli/fetch.rs @@ -7,7 +7,7 @@ use serde::Serialize; use tokio::io::AsyncReadExt; use crate::core::cache::{CacheManager, compute_hash, validate_alias}; -use crate::core::config::{AuthType, Config, CredentialSource, cache_dir, config_path}; +use crate::core::config::{AuthType, Config, cache_dir, config_path, resolve_credential}; use crate::core::http::AsyncHttpClient; use crate::core::indexer::{Format, build_index, detect_format, normalize_to_json}; use crate::core::network::{NetworkPolicy, resolve_policy}; @@ -121,21 +121,6 @@ fn classify_source(url: &str) -> SourceKind { // Auth header resolution // --------------------------------------------------------------------------- -/// Resolve a credential source to its string value. -fn resolve_credential(source: &CredentialSource) -> Result { - match source { - CredentialSource::Literal { value } => Ok(value.clone()), - CredentialSource::EnvVar { name } => std::env::var(name).map_err(|_| { - SwaggerCliError::Auth(format!( - "environment variable '{name}' not set (required by auth profile)" - )) - }), - CredentialSource::Keyring { service, account } => Err(SwaggerCliError::Auth(format!( - "keyring credential lookup not yet implemented (service={service}, account={account})" - ))), - } -} - /// Build the list of auth headers from CLI flags and config auth profile. /// /// Precedence: --bearer and --header flags override auth profile values. @@ -211,6 +196,7 @@ async fn fetch_inner( cache_path: PathBuf, robot_mode: bool, network_policy: NetworkPolicy, + config_override: Option<&std::path::Path>, ) -> Result<(), SwaggerCliError> { let start = Instant::now(); @@ -224,7 +210,7 @@ async fn fetch_inner( } // 3. Load config and resolve auth headers - let cfg = Config::load(&config_path(None))?; + let cfg = Config::load(&config_path(config_override))?; let auth_headers = resolve_auth_headers(args, &cfg)?; // 4. Fetch raw bytes based on source kind @@ -353,10 +339,15 @@ async fn fetch_inner( // Public entry point // --------------------------------------------------------------------------- -pub async fn execute(args: &Args, robot_mode: bool) -> Result<(), SwaggerCliError> { +pub async fn execute( + args: &Args, + robot_mode: bool, + network_flag: &str, + config_override: Option<&std::path::Path>, +) -> Result<(), SwaggerCliError> { let cache = cache_dir(); - let policy = resolve_policy("auto")?; - fetch_inner(args, cache, robot_mode, policy).await + let policy = resolve_policy(network_flag)?; + fetch_inner(args, cache, robot_mode, policy, config_override).await } // --------------------------------------------------------------------------- @@ -366,6 +357,7 @@ pub async fn execute(args: &Args, robot_mode: bool) -> Result<(), SwaggerCliErro #[cfg(test)] mod tests { use super::*; + use crate::core::config::CredentialSource; // -- Source classification ----------------------------------------------- @@ -650,7 +642,7 @@ mod tests { let args = make_test_args(spec_path.to_str().unwrap(), "localtest"); - let result = fetch_inner(&args, cache_path.clone(), false, NetworkPolicy::Auto).await; + let result = fetch_inner(&args, cache_path.clone(), false, NetworkPolicy::Auto, None).await; assert!(result.is_ok(), "execute failed: {result:?}"); let cm = CacheManager::new(cache_path); @@ -693,7 +685,7 @@ paths: let args = make_test_args(spec_path.to_str().unwrap(), "yamltest"); - let result = fetch_inner(&args, cache_path.clone(), false, NetworkPolicy::Auto).await; + let result = fetch_inner(&args, cache_path.clone(), false, NetworkPolicy::Auto, None).await; assert!(result.is_ok(), "execute failed: {result:?}"); let cm = CacheManager::new(cache_path); @@ -722,12 +714,12 @@ paths: let args = make_test_args(spec_path.to_str().unwrap(), "dupetest"); assert!( - fetch_inner(&args, cache_path.clone(), false, NetworkPolicy::Auto) + fetch_inner(&args, cache_path.clone(), false, NetworkPolicy::Auto, None) .await .is_ok() ); - let result = fetch_inner(&args, cache_path, false, NetworkPolicy::Auto).await; + let result = fetch_inner(&args, cache_path, false, NetworkPolicy::Auto, None).await; assert!(result.is_err()); match result.unwrap_err() { SwaggerCliError::AliasExists(alias) => assert_eq!(alias, "dupetest"), @@ -752,9 +744,15 @@ paths: let args_v1 = make_test_args(spec_path.to_str().unwrap(), "forcetest"); assert!( - fetch_inner(&args_v1, cache_path.clone(), false, NetworkPolicy::Auto) - .await - .is_ok() + fetch_inner( + &args_v1, + cache_path.clone(), + false, + NetworkPolicy::Auto, + None + ) + .await + .is_ok() ); let spec_v2 = serde_json::json!({ @@ -767,9 +765,15 @@ paths: let mut args_v2 = make_test_args(spec_path.to_str().unwrap(), "forcetest"); args_v2.force = true; assert!( - fetch_inner(&args_v2, cache_path.clone(), false, NetworkPolicy::Auto) - .await - .is_ok() + fetch_inner( + &args_v2, + cache_path.clone(), + false, + NetworkPolicy::Auto, + None + ) + .await + .is_ok() ); let cm = CacheManager::new(cache_path); @@ -795,7 +799,7 @@ paths: let args = make_test_args(spec_path.to_str().unwrap(), "robottest"); - let result = fetch_inner(&args, cache_path.clone(), true, NetworkPolicy::Auto).await; + let result = fetch_inner(&args, cache_path.clone(), true, NetworkPolicy::Auto, None).await; assert!(result.is_ok(), "robot mode execute failed: {result:?}"); let cm = CacheManager::new(cache_path); @@ -819,7 +823,7 @@ paths: let args = make_test_args(spec_path.to_str().unwrap(), "../bad-alias"); - let result = fetch_inner(&args, cache_path, false, NetworkPolicy::Auto).await; + let result = fetch_inner(&args, cache_path, false, NetworkPolicy::Auto, None).await; assert!(result.is_err()); match result.unwrap_err() { SwaggerCliError::Usage(msg) => { @@ -847,7 +851,7 @@ paths: let url = format!("file://{}", spec_path.to_str().unwrap()); let args = make_test_args(&url, "fileprefixtest"); - let result = fetch_inner(&args, cache_path.clone(), false, NetworkPolicy::Auto).await; + let result = fetch_inner(&args, cache_path.clone(), false, NetworkPolicy::Auto, None).await; assert!(result.is_ok(), "file:// prefix failed: {result:?}"); let cm = CacheManager::new(cache_path); @@ -863,7 +867,7 @@ paths: let args = make_test_args("file:///nonexistent/path/spec.json", "nofile"); - let result = fetch_inner(&args, cache_path, false, NetworkPolicy::Auto).await; + let result = fetch_inner(&args, cache_path, false, NetworkPolicy::Auto, None).await; assert!(result.is_err()); assert!( matches!(result.unwrap_err(), SwaggerCliError::Io(_)), diff --git a/src/cli/list.rs b/src/cli/list.rs index db0cea2..391e1e4 100644 --- a/src/cli/list.rs +++ b/src/cli/list.rs @@ -8,6 +8,7 @@ use tabled::Tabled; use crate::core::cache::CacheManager; use crate::core::config::cache_dir; +use crate::core::indexer::method_rank; use crate::errors::SwaggerCliError; use crate::output::robot; use crate::output::table::render_table_or_empty; @@ -88,23 +89,6 @@ struct EndpointRow { summary: String, } -// --------------------------------------------------------------------------- -// Helpers -// --------------------------------------------------------------------------- - -/// Map an HTTP method string to a sort rank. -/// GET=0, POST=1, PUT=2, PATCH=3, DELETE=4, everything else=5. -fn method_rank(method: &str) -> u8 { - match method.to_uppercase().as_str() { - "GET" => 0, - "POST" => 1, - "PUT" => 2, - "PATCH" => 3, - "DELETE" => 4, - _ => 5, - } -} - // --------------------------------------------------------------------------- // Execute // --------------------------------------------------------------------------- @@ -602,7 +586,9 @@ mod tests { assert_eq!(method_rank("PATCH"), 3); assert_eq!(method_rank("DELETE"), 4); assert_eq!(method_rank("OPTIONS"), 5); - assert_eq!(method_rank("HEAD"), 5); + assert_eq!(method_rank("HEAD"), 6); + assert_eq!(method_rank("TRACE"), 7); + assert_eq!(method_rank("FOOBAR"), 99); } #[test] diff --git a/src/cli/show.rs b/src/cli/show.rs index a138eed..fad97b6 100644 --- a/src/cli/show.rs +++ b/src/cli/show.rs @@ -6,7 +6,7 @@ use serde_json::Value; use crate::core::cache::CacheManager; use crate::core::config::cache_dir; -use crate::core::refs::expand_refs; +use crate::core::refs::{expand_refs, resolve_json_pointer}; use crate::errors::SwaggerCliError; use crate::output::robot::robot_success; @@ -47,34 +47,6 @@ pub struct ShowOutput { pub security: Value, } -/// Navigate a JSON value using a JSON Pointer (RFC 6901). -/// -/// Unescapes `~1` -> `/` and `~0` -> `~` (decode ~1 first per spec). -fn navigate_pointer(root: &Value, pointer: &str) -> Option { - if pointer.is_empty() { - return None; - } - - let stripped = pointer.strip_prefix('/')?; - - let mut current = root; - for token in stripped.split('/') { - let unescaped = token.replace("~1", "/").replace("~0", "~"); - match current { - Value::Object(map) => { - current = map.get(&unescaped)?; - } - Value::Array(arr) => { - let idx: usize = unescaped.parse().ok()?; - current = arr.get(idx)?; - } - _ => return None, - } - } - - Some(current.clone()) -} - pub async fn execute(args: &Args, robot: bool) -> Result<(), SwaggerCliError> { let start = Instant::now(); @@ -125,12 +97,14 @@ pub async fn execute(args: &Args, robot: bool) -> Result<(), SwaggerCliError> { let raw = cm.load_raw(&args.alias, &meta)?; // Navigate to operation subtree - let operation = navigate_pointer(&raw, &endpoint.operation_ptr).ok_or_else(|| { - SwaggerCliError::Cache(format!( - "Failed to navigate to operation at pointer '{}' in raw spec for alias '{}'", - endpoint.operation_ptr, args.alias - )) - })?; + let operation = resolve_json_pointer(&raw, &endpoint.operation_ptr) + .cloned() + .ok_or_else(|| { + SwaggerCliError::Cache(format!( + "Failed to navigate to operation at pointer '{}' in raw spec for alias '{}'", + endpoint.operation_ptr, args.alias + )) + })?; let mut operation = operation; @@ -374,7 +348,7 @@ mod tests { }); // Navigate to GET /pets/{petId} - let result = navigate_pointer(&raw, "/paths/~1pets~1{petId}/get"); + let result = resolve_json_pointer(&raw, "/paths/~1pets~1{petId}/get"); assert!(result.is_some()); let op = result.unwrap(); assert_eq!(op["summary"], "Get a pet"); @@ -382,23 +356,23 @@ mod tests { assert_eq!(op["parameters"][0]["name"], "petId"); // Navigate to DELETE /pets/{petId} - let result = navigate_pointer(&raw, "/paths/~1pets~1{petId}/delete"); + let result = resolve_json_pointer(&raw, "/paths/~1pets~1{petId}/delete"); assert!(result.is_some()); let op = result.unwrap(); assert_eq!(op["summary"], "Delete a pet"); // Navigate to GET /pets - let result = navigate_pointer(&raw, "/paths/~1pets/get"); + let result = resolve_json_pointer(&raw, "/paths/~1pets/get"); assert!(result.is_some()); let op = result.unwrap(); assert_eq!(op["summary"], "List pets"); // Invalid pointer - let result = navigate_pointer(&raw, "/paths/~1nonexistent/get"); + let result = resolve_json_pointer(&raw, "/paths/~1nonexistent/get"); assert!(result.is_none()); // Empty pointer - let result = navigate_pointer(&raw, ""); + let result = resolve_json_pointer(&raw, ""); assert!(result.is_none()); } diff --git a/src/cli/sync_cmd.rs b/src/cli/sync_cmd.rs index 2fe6d16..c36ed63 100644 --- a/src/cli/sync_cmd.rs +++ b/src/cli/sync_cmd.rs @@ -7,9 +7,10 @@ use clap::Args as ClapArgs; use serde::Serialize; use crate::core::cache::{CacheManager, CacheMetadata, compute_hash, validate_alias}; -use crate::core::config::{AuthType, Config, CredentialSource, config_path}; +use crate::core::config::{AuthType, Config, config_path, resolve_credential}; use crate::core::http::{AsyncHttpClient, ConditionalFetchResult}; use crate::core::indexer::{Format, build_index, detect_format, normalize_to_json}; +use crate::core::network::{NetworkPolicy, resolve_policy}; use crate::core::spec::SpecIndex; use crate::errors::SwaggerCliError; use crate::output::robot; @@ -203,24 +204,6 @@ fn compute_diff(old: &SpecIndex, new: &SpecIndex) -> (ChangeSummary, ChangeDetai (summary, details) } -// --------------------------------------------------------------------------- -// Auth credential resolution -// --------------------------------------------------------------------------- - -fn resolve_credential(source: &CredentialSource) -> Result { - match source { - CredentialSource::Literal { value } => Ok(value.clone()), - CredentialSource::EnvVar { name } => std::env::var(name).map_err(|_| { - SwaggerCliError::Auth(format!( - "environment variable '{name}' not set (required by auth profile)" - )) - }), - CredentialSource::Keyring { service, account } => Err(SwaggerCliError::Auth(format!( - "keyring credential lookup not yet implemented (service={service}, account={account})" - ))), - } -} - // --------------------------------------------------------------------------- // Core sync logic (testable with explicit cache path) // --------------------------------------------------------------------------- @@ -229,6 +212,8 @@ async fn sync_inner( args: &Args, cache_path: PathBuf, robot_mode: bool, + network_policy: NetworkPolicy, + config_override: Option<&std::path::Path>, ) -> Result<(), SwaggerCliError> { let start = Instant::now(); @@ -246,8 +231,10 @@ async fn sync_inner( })?; // 2. Build HTTP client - let cfg = Config::load(&config_path(None))?; - let mut builder = AsyncHttpClient::builder().allow_insecure_http(url.starts_with("http://")); + let cfg = Config::load(&config_path(config_override))?; + let mut builder = AsyncHttpClient::builder() + .allow_insecure_http(url.starts_with("http://")) + .network_policy(network_policy); if let Some(profile_name) = &args.auth { let profile = cfg.auth_profiles.get(profile_name).ok_or_else(|| { @@ -434,7 +421,12 @@ fn output_changes( // Public entry point // --------------------------------------------------------------------------- -pub async fn execute(args: &Args, robot: bool) -> Result<(), SwaggerCliError> { +pub async fn execute( + args: &Args, + robot: bool, + network_flag: &str, + config_override: Option<&std::path::Path>, +) -> Result<(), SwaggerCliError> { if args.all { return Err(SwaggerCliError::Usage( "sync --all is not yet implemented".into(), @@ -442,7 +434,8 @@ pub async fn execute(args: &Args, robot: bool) -> Result<(), SwaggerCliError> { } let cache = crate::core::config::cache_dir(); - sync_inner(args, cache, robot).await + let policy = resolve_policy(network_flag)?; + sync_inner(args, cache, robot, policy, config_override).await } // --------------------------------------------------------------------------- diff --git a/src/core/cache.rs b/src/core/cache.rs index 93a3ea0..ff9d6d4 100644 --- a/src/core/cache.rs +++ b/src/core/cache.rs @@ -9,6 +9,7 @@ use fs2::FileExt; use regex::Regex; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; +use std::sync::LazyLock; use crate::errors::SwaggerCliError; @@ -46,8 +47,11 @@ impl CacheMetadata { /// /// Accepts: alphanumeric start, then alphanumeric/dot/dash/underscore, 1-64 chars. /// Rejects: path separators, `..`, leading dots, Windows reserved device names. +static ALIAS_PATTERN: LazyLock = + LazyLock::new(|| Regex::new(r"^[A-Za-z0-9][A-Za-z0-9._\-]{0,63}$").expect("valid regex")); + pub fn validate_alias(alias: &str) -> Result<(), SwaggerCliError> { - let pattern = Regex::new(r"^[A-Za-z0-9][A-Za-z0-9._\-]{0,63}$").expect("valid regex"); + let pattern = &*ALIAS_PATTERN; if !pattern.is_match(alias) { return Err(SwaggerCliError::Usage(format!( diff --git a/src/core/config.rs b/src/core/config.rs index 9c0a298..8fce527 100644 --- a/src/core/config.rs +++ b/src/core/config.rs @@ -134,12 +134,45 @@ impl Config { let contents = toml::to_string_pretty(self).map_err(|e| SwaggerCliError::Config(e.to_string()))?; - std::fs::write(path, contents).map_err(|e| { - SwaggerCliError::Config(format!("failed to write {}: {e}", path.display())) + // Atomic write: write to .tmp, fsync, then rename to avoid corruption on crash + let tmp_path = path.with_extension("toml.tmp"); + + std::fs::write(&tmp_path, &contents).map_err(|e| { + SwaggerCliError::Config(format!("failed to write {}: {e}", tmp_path.display())) + })?; + + // Best-effort fsync + if let Ok(file) = std::fs::File::open(&tmp_path) { + let _ = file.sync_all(); + } + + std::fs::rename(&tmp_path, path).map_err(|e| { + SwaggerCliError::Config(format!( + "failed to rename {} -> {}: {e}", + tmp_path.display(), + path.display() + )) }) } } +/// Resolve a credential source to its string value. +/// +/// Used by fetch and sync commands to obtain auth tokens from config profiles. +pub fn resolve_credential(source: &CredentialSource) -> Result { + match source { + CredentialSource::Literal { value } => Ok(value.clone()), + CredentialSource::EnvVar { name } => std::env::var(name).map_err(|_| { + SwaggerCliError::Auth(format!( + "environment variable '{name}' not set (required by auth profile)" + )) + }), + CredentialSource::Keyring { service, account } => Err(SwaggerCliError::Auth(format!( + "keyring credential lookup not yet implemented (service={service}, account={account})" + ))), + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/core/diff.rs b/src/core/diff.rs new file mode 100644 index 0000000..fef65fd --- /dev/null +++ b/src/core/diff.rs @@ -0,0 +1,353 @@ +use std::collections::BTreeSet; + +use serde::Serialize; + +use super::spec::{IndexedEndpoint, SpecIndex}; + +/// Key that uniquely identifies an endpoint: (METHOD, path). +type EndpointKey = (String, String); + +fn endpoint_key(ep: &IndexedEndpoint) -> EndpointKey { + (ep.method.to_uppercase(), ep.path.clone()) +} + +/// Checks whether two endpoints differ structurally (beyond just path+method). +fn endpoints_differ(left: &IndexedEndpoint, right: &IndexedEndpoint) -> bool { + left.summary != right.summary + || left.deprecated != right.deprecated + || left.parameters.len() != right.parameters.len() + || left.request_body_required != right.request_body_required + || left.request_body_content_types != right.request_body_content_types + || left.security_schemes != right.security_schemes + || left.security_required != right.security_required + || left.tags != right.tags + || left.operation_id != right.operation_id + || left.description != right.description + || params_differ(left, right) +} + +fn params_differ(left: &IndexedEndpoint, right: &IndexedEndpoint) -> bool { + if left.parameters.len() != right.parameters.len() { + return true; + } + for (lp, rp) in left.parameters.iter().zip(right.parameters.iter()) { + if lp.name != rp.name || lp.location != rp.location || lp.required != rp.required { + return true; + } + } + false +} + +#[derive(Debug, Serialize)] +pub struct DiffResult { + pub endpoints: EndpointDiff, + pub schemas: SchemaDiff, + pub summary: DiffSummary, +} + +#[derive(Debug, Serialize)] +pub struct EndpointDiff { + /// Each entry: [method, path] + pub added: Vec<[String; 2]>, + /// Each entry: [method, path] + pub removed: Vec<[String; 2]>, + /// Each entry: [method, path] + pub modified: Vec<[String; 2]>, +} + +#[derive(Debug, Serialize)] +pub struct SchemaDiff { + pub added: Vec, + pub removed: Vec, +} + +#[derive(Debug, Serialize)] +pub struct DiffSummary { + pub total_changes: usize, + pub has_breaking: bool, +} + +/// Compare two spec indexes and produce a structural diff. +/// +/// Breaking changes: removed endpoints. +pub fn diff_indexes(left: &SpecIndex, right: &SpecIndex) -> DiffResult { + // Build lookup maps by endpoint key + let left_map: std::collections::BTreeMap = left + .endpoints + .iter() + .map(|ep| (endpoint_key(ep), ep)) + .collect(); + let right_map: std::collections::BTreeMap = right + .endpoints + .iter() + .map(|ep| (endpoint_key(ep), ep)) + .collect(); + + let left_keys: BTreeSet<_> = left_map.keys().cloned().collect(); + let right_keys: BTreeSet<_> = right_map.keys().cloned().collect(); + + let added: Vec<[String; 2]> = right_keys + .difference(&left_keys) + .map(|(method, path)| [method.clone(), path.clone()]) + .collect(); + + let removed: Vec<[String; 2]> = left_keys + .difference(&right_keys) + .map(|(method, path)| [method.clone(), path.clone()]) + .collect(); + + let modified: Vec<[String; 2]> = left_keys + .intersection(&right_keys) + .filter(|key| { + let l = left_map[key]; + let r = right_map[key]; + endpoints_differ(l, r) + }) + .map(|(method, path)| [method.clone(), path.clone()]) + .collect(); + + // Schema diff by name + let left_schemas: BTreeSet<&str> = left.schemas.iter().map(|s| s.name.as_str()).collect(); + let right_schemas: BTreeSet<&str> = right.schemas.iter().map(|s| s.name.as_str()).collect(); + + let schemas_added: Vec = right_schemas + .difference(&left_schemas) + .map(|s| s.to_string()) + .collect(); + let schemas_removed: Vec = left_schemas + .difference(&right_schemas) + .map(|s| s.to_string()) + .collect(); + + let has_breaking = !removed.is_empty(); + let total_changes = + added.len() + removed.len() + modified.len() + schemas_added.len() + schemas_removed.len(); + + DiffResult { + endpoints: EndpointDiff { + added, + removed, + modified, + }, + schemas: SchemaDiff { + added: schemas_added, + removed: schemas_removed, + }, + summary: DiffSummary { + total_changes, + has_breaking, + }, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::core::spec::{IndexInfo, IndexedEndpoint, IndexedParam, IndexedSchema, SpecIndex}; + + fn make_endpoint(path: &str, method: &str, summary: &str) -> IndexedEndpoint { + IndexedEndpoint { + path: path.to_string(), + method: method.to_string(), + summary: Some(summary.to_string()), + description: None, + operation_id: None, + tags: vec![], + deprecated: false, + parameters: vec![], + request_body_required: false, + request_body_content_types: vec![], + security_schemes: vec![], + security_required: false, + operation_ptr: String::new(), + } + } + + fn make_index(endpoints: Vec, schemas: Vec) -> SpecIndex { + SpecIndex { + index_version: 1, + generation: 1, + content_hash: "sha256:test".into(), + openapi: "3.0.3".into(), + info: IndexInfo { + title: "Test".into(), + version: "1.0.0".into(), + }, + endpoints, + schemas, + tags: vec![], + } + } + + #[test] + fn test_identical_indexes_no_changes() { + let eps = vec![ + make_endpoint("/pets", "GET", "List pets"), + make_endpoint("/pets", "POST", "Create pet"), + ]; + let schemas = vec![IndexedSchema { + name: "Pet".into(), + schema_ptr: "#/components/schemas/Pet".into(), + }]; + let left = make_index(eps.clone(), schemas.clone()); + let right = make_index(eps, schemas); + + let result = diff_indexes(&left, &right); + assert_eq!(result.summary.total_changes, 0); + assert!(!result.summary.has_breaking); + assert!(result.endpoints.added.is_empty()); + assert!(result.endpoints.removed.is_empty()); + assert!(result.endpoints.modified.is_empty()); + assert!(result.schemas.added.is_empty()); + assert!(result.schemas.removed.is_empty()); + } + + #[test] + fn test_added_endpoint() { + let left = make_index(vec![make_endpoint("/pets", "GET", "List pets")], vec![]); + let right = make_index( + vec![ + make_endpoint("/pets", "GET", "List pets"), + make_endpoint("/pets", "POST", "Create pet"), + ], + vec![], + ); + + let result = diff_indexes(&left, &right); + assert_eq!(result.endpoints.added.len(), 1); + assert_eq!(result.endpoints.added[0], ["POST", "/pets"]); + assert_eq!(result.endpoints.removed.len(), 0); + assert!(!result.summary.has_breaking); + } + + #[test] + fn test_removed_endpoint_is_breaking() { + let left = make_index( + vec![ + make_endpoint("/pets", "GET", "List pets"), + make_endpoint("/pets", "POST", "Create pet"), + ], + vec![], + ); + let right = make_index(vec![make_endpoint("/pets", "GET", "List pets")], vec![]); + + let result = diff_indexes(&left, &right); + assert_eq!(result.endpoints.removed.len(), 1); + assert_eq!(result.endpoints.removed[0], ["POST", "/pets"]); + assert!(result.summary.has_breaking); + } + + #[test] + fn test_modified_endpoint() { + let left = make_index(vec![make_endpoint("/pets", "GET", "List pets")], vec![]); + let mut modified_ep = make_endpoint("/pets", "GET", "List all pets"); + modified_ep.deprecated = true; + let right = make_index(vec![modified_ep], vec![]); + + let result = diff_indexes(&left, &right); + assert_eq!(result.endpoints.modified.len(), 1); + assert_eq!(result.endpoints.modified[0], ["GET", "/pets"]); + assert_eq!(result.endpoints.added.len(), 0); + assert_eq!(result.endpoints.removed.len(), 0); + } + + #[test] + fn test_schema_added() { + let left = make_index(vec![], vec![]); + let right = make_index( + vec![], + vec![IndexedSchema { + name: "Pet".into(), + schema_ptr: "#/components/schemas/Pet".into(), + }], + ); + + let result = diff_indexes(&left, &right); + assert_eq!(result.schemas.added, vec!["Pet"]); + assert!(result.schemas.removed.is_empty()); + } + + #[test] + fn test_schema_removed() { + let left = make_index( + vec![], + vec![IndexedSchema { + name: "Pet".into(), + schema_ptr: "#/components/schemas/Pet".into(), + }], + ); + let right = make_index(vec![], vec![]); + + let result = diff_indexes(&left, &right); + assert!(result.schemas.added.is_empty()); + assert_eq!(result.schemas.removed, vec!["Pet"]); + } + + #[test] + fn test_total_changes_count() { + let left = make_index( + vec![ + make_endpoint("/pets", "GET", "List pets"), + make_endpoint("/pets", "DELETE", "Delete pet"), + ], + vec![IndexedSchema { + name: "Pet".into(), + schema_ptr: "#/components/schemas/Pet".into(), + }], + ); + let right = make_index( + vec![ + make_endpoint("/pets", "GET", "List all pets"), // modified + make_endpoint("/pets", "POST", "Create pet"), // added + // DELETE removed + ], + vec![ + // Pet removed + IndexedSchema { + name: "NewPet".into(), + schema_ptr: "#/components/schemas/NewPet".into(), + }, // added + ], + ); + + let result = diff_indexes(&left, &right); + // 1 added ep + 1 removed ep + 1 modified ep + 1 added schema + 1 removed schema = 5 + assert_eq!(result.summary.total_changes, 5); + assert!(result.summary.has_breaking); // DELETE was removed + } + + #[test] + fn test_parameter_change_detected() { + let mut left_ep = make_endpoint("/pets", "GET", "List pets"); + left_ep.parameters = vec![IndexedParam { + name: "limit".into(), + location: "query".into(), + required: false, + description: None, + }]; + + let mut right_ep = make_endpoint("/pets", "GET", "List pets"); + right_ep.parameters = vec![IndexedParam { + name: "limit".into(), + location: "query".into(), + required: true, // changed from false to true + description: None, + }]; + + let left = make_index(vec![left_ep], vec![]); + let right = make_index(vec![right_ep], vec![]); + + let result = diff_indexes(&left, &right); + assert_eq!(result.endpoints.modified.len(), 1); + } + + #[test] + fn test_empty_indexes() { + let left = make_index(vec![], vec![]); + let right = make_index(vec![], vec![]); + + let result = diff_indexes(&left, &right); + assert_eq!(result.summary.total_changes, 0); + assert!(!result.summary.has_breaking); + } +} diff --git a/src/core/http.rs b/src/core/http.rs index 491a3cc..ba9edca 100644 --- a/src/core/http.rs +++ b/src/core/http.rs @@ -196,6 +196,9 @@ impl AsyncHttpClient { etag: Option<&str>, last_modified: Option<&str>, ) -> Result { + // Check network policy before any HTTP request + check_remote_fetch(self.network_policy)?; + let parsed = validate_url(url, self.allow_insecure_http)?; let host = parsed @@ -245,9 +248,10 @@ impl AsyncHttpClient { s if s == StatusCode::TOO_MANY_REQUESTS || s.is_server_error() => { attempts += 1; if attempts > self.max_retries { - return Err(SwaggerCliError::Network( - client.get(url).send().await.unwrap_err(), - )); + return Err(SwaggerCliError::InvalidSpec(format!( + "request to '{url}' failed after {} retries (last status: {status})", + self.max_retries, + ))); } let delay = self.retry_delay(&response, attempts); tokio::time::sleep(delay).await; @@ -303,9 +307,10 @@ impl AsyncHttpClient { s if s == StatusCode::TOO_MANY_REQUESTS || s.is_server_error() => { attempts += 1; if attempts > self.max_retries { - return Err(SwaggerCliError::Network( - client.get(url).send().await.unwrap_err(), - )); + return Err(SwaggerCliError::InvalidSpec(format!( + "request to '{url}' failed after {} retries (last status: {status})", + self.max_retries, + ))); } let delay = self.retry_delay(&response, attempts); tokio::time::sleep(delay).await; diff --git a/src/core/mod.rs b/src/core/mod.rs index e68d679..2952f0e 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1,5 +1,6 @@ pub mod cache; pub mod config; +pub mod diff; pub mod http; pub mod indexer; pub mod network; diff --git a/src/main.rs b/src/main.rs index bc88157..da3063c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -63,15 +63,22 @@ async fn main() -> ExitCode { let cmd = command_name(&cli); let robot = cli.robot; + let network_flag = cli.network.as_str(); + let config_override = cli.config.as_deref(); + let result = match &cli.command { - Commands::Fetch(args) => swagger_cli::cli::fetch::execute(args, robot).await, + Commands::Fetch(args) => { + swagger_cli::cli::fetch::execute(args, robot, network_flag, config_override).await + } Commands::List(args) => swagger_cli::cli::list::execute(args, robot).await, Commands::Show(args) => swagger_cli::cli::show::execute(args, robot).await, Commands::Search(args) => swagger_cli::cli::search::execute(args, robot).await, Commands::Schemas(args) => swagger_cli::cli::schemas::execute(args, robot).await, Commands::Tags(args) => swagger_cli::cli::tags::execute(args, robot).await, Commands::Aliases(args) => swagger_cli::cli::aliases::execute(args, robot).await, - Commands::Sync(args) => swagger_cli::cli::sync_cmd::execute(args, robot).await, + Commands::Sync(args) => { + swagger_cli::cli::sync_cmd::execute(args, robot, network_flag, config_override).await + } Commands::Doctor(args) => swagger_cli::cli::doctor::execute(args, robot).await, Commands::Cache(args) => swagger_cli::cli::cache_cmd::execute(args, robot).await, Commands::Diff(args) => swagger_cli::cli::diff::execute(args, robot).await, diff --git a/src/output/human.rs b/src/output/human.rs index aac1ee7..20c0d67 100644 --- a/src/output/human.rs +++ b/src/output/human.rs @@ -1,11 +1,5 @@ -use std::io::IsTerminal; - use crate::errors::SwaggerCliError; -pub fn is_tty() -> bool { - std::io::stdout().is_terminal() -} - pub fn print_error(err: &SwaggerCliError) { eprintln!("error: {err}"); if let Some(suggestion) = err.suggestion() { diff --git a/src/utils.rs b/src/utils.rs index 638e9b7..4b969d2 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1 +1,16 @@ -// Utility functions +use std::path::Path; + +/// Compute total size of files in a directory (non-recursive, skips symlinks). +/// +/// Used by doctor and cache commands to report disk usage per alias. +pub fn dir_size(path: &Path) -> u64 { + let Ok(entries) = std::fs::read_dir(path) else { + return 0; + }; + entries + .filter_map(Result::ok) + .filter_map(|e| e.metadata().ok()) + .filter(|m| m.is_file()) + .map(|m| m.len()) + .sum() +} diff --git a/tests/fixtures/golden/list.json b/tests/fixtures/golden/list.json new file mode 100644 index 0000000..9d705ad --- /dev/null +++ b/tests/fixtures/golden/list.json @@ -0,0 +1,72 @@ +{ + "data": { + "applied_filters": {}, + "endpoints": [ + { + "deprecated": false, + "method": "GET", + "operation_id": "listPets", + "path": "/pets", + "summary": "List all pets", + "tags": [ + "pets" + ] + }, + { + "deprecated": false, + "method": "POST", + "operation_id": "createPet", + "path": "/pets", + "summary": "Create a pet", + "tags": [ + "pets" + ] + }, + { + "deprecated": false, + "method": "GET", + "operation_id": "showPetById", + "path": "/pets/{petId}", + "summary": "Get a pet by ID", + "tags": [ + "pets" + ] + }, + { + "deprecated": true, + "method": "DELETE", + "operation_id": "deletePet", + "path": "/pets/{petId}", + "summary": "Delete a pet", + "tags": [ + "pets" + ] + }, + { + "deprecated": false, + "method": "GET", + "operation_id": "getInventory", + "path": "/store/inventory", + "summary": "Get store inventory", + "tags": [ + "store" + ] + } + ], + "filtered": 5, + "meta": { + "alias": "petstore", + "cached_at": "MASKED_TIMESTAMP", + "duration_ms": 0, + "spec_version": "1.0.0" + }, + "total": 5 + }, + "meta": { + "command": "list", + "duration_ms": 0, + "schema_version": 1, + "tool_version": "MASKED" + }, + "ok": true +} \ No newline at end of file diff --git a/tests/fixtures/golden/schemas_list.json b/tests/fixtures/golden/schemas_list.json new file mode 100644 index 0000000..b7408c2 --- /dev/null +++ b/tests/fixtures/golden/schemas_list.json @@ -0,0 +1,23 @@ +{ + "data": { + "schemas": [ + { + "name": "Error" + }, + { + "name": "NewPet" + }, + { + "name": "Pet" + } + ], + "total": 3 + }, + "meta": { + "command": "schemas", + "duration_ms": 0, + "schema_version": 1, + "tool_version": "MASKED" + }, + "ok": true +} \ No newline at end of file diff --git a/tests/fixtures/golden/schemas_show.json b/tests/fixtures/golden/schemas_show.json new file mode 100644 index 0000000..268968b --- /dev/null +++ b/tests/fixtures/golden/schemas_show.json @@ -0,0 +1,31 @@ +{ + "data": { + "name": "Pet", + "schema": { + "properties": { + "id": { + "format": "int64", + "type": "integer" + }, + "name": { + "type": "string" + }, + "tag": { + "type": "string" + } + }, + "required": [ + "id", + "name" + ], + "type": "object" + } + }, + "meta": { + "command": "schemas", + "duration_ms": 0, + "schema_version": 1, + "tool_version": "MASKED" + }, + "ok": true +} \ No newline at end of file diff --git a/tests/fixtures/golden/search.json b/tests/fixtures/golden/search.json new file mode 100644 index 0000000..8a3448f --- /dev/null +++ b/tests/fixtures/golden/search.json @@ -0,0 +1,110 @@ +{ + "data": { + "results": [ + { + "matches": [ + { + "field": "path", + "snippet": "/pets" + }, + { + "field": "summary", + "snippet": "List all pets" + } + ], + "method": "GET", + "name": "/pets", + "rank": 1, + "score": 3000, + "summary": "List all pets", + "type": "endpoint" + }, + { + "matches": [ + { + "field": "path", + "snippet": "/pets" + }, + { + "field": "summary", + "snippet": "Create a pet" + } + ], + "method": "POST", + "name": "/pets", + "rank": 2, + "score": 3000, + "summary": "Create a pet", + "type": "endpoint" + }, + { + "matches": [ + { + "field": "path", + "snippet": "/pets/{petId}" + }, + { + "field": "summary", + "snippet": "Get a pet by ID" + } + ], + "method": "GET", + "name": "/pets/{petId}", + "rank": 3, + "score": 3000, + "summary": "Get a pet by ID", + "type": "endpoint" + }, + { + "matches": [ + { + "field": "path", + "snippet": "/pets/{petId}" + }, + { + "field": "summary", + "snippet": "Delete a pet" + } + ], + "method": "DELETE", + "name": "/pets/{petId}", + "rank": 4, + "score": 3000, + "summary": "Delete a pet", + "type": "endpoint" + }, + { + "matches": [ + { + "field": "schema_name", + "snippet": "NewPet" + } + ], + "name": "NewPet", + "rank": 5, + "score": 1600, + "type": "schema" + }, + { + "matches": [ + { + "field": "schema_name", + "snippet": "Pet" + } + ], + "name": "Pet", + "rank": 6, + "score": 1600, + "type": "schema" + } + ], + "total": 6 + }, + "meta": { + "command": "search", + "duration_ms": 0, + "schema_version": 1, + "tool_version": "MASKED" + }, + "ok": true +} \ No newline at end of file diff --git a/tests/fixtures/golden/show.json b/tests/fixtures/golden/show.json new file mode 100644 index 0000000..175d13a --- /dev/null +++ b/tests/fixtures/golden/show.json @@ -0,0 +1,51 @@ +{ + "data": { + "deprecated": false, + "description": null, + "method": "GET", + "operation_id": "listPets", + "parameters": [ + { + "description": "Maximum number of items to return", + "in": "query", + "name": "limit", + "required": false + }, + { + "description": "Pagination offset", + "in": "query", + "name": "offset", + "required": false + } + ], + "path": "/pets", + "request_body": null, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "items": { + "$ref": "#/components/schemas/Pet" + }, + "type": "array" + } + } + }, + "description": "A list of pets" + } + }, + "security": [], + "summary": "List all pets", + "tags": [ + "pets" + ] + }, + "meta": { + "command": "show", + "duration_ms": 0, + "schema_version": 1, + "tool_version": "MASKED" + }, + "ok": true +} \ No newline at end of file diff --git a/tests/fixtures/golden/tags.json b/tests/fixtures/golden/tags.json new file mode 100644 index 0000000..84eadc2 --- /dev/null +++ b/tests/fixtures/golden/tags.json @@ -0,0 +1,24 @@ +{ + "data": { + "tags": [ + { + "description": "Pet operations", + "endpoint_count": 4, + "name": "pets" + }, + { + "description": "Store operations", + "endpoint_count": 1, + "name": "store" + } + ], + "total": 2 + }, + "meta": { + "command": "tags", + "duration_ms": 0, + "schema_version": 1, + "tool_version": "MASKED" + }, + "ok": true +} \ No newline at end of file diff --git a/tests/golden_test.rs b/tests/golden_test.rs new file mode 100644 index 0000000..2624976 --- /dev/null +++ b/tests/golden_test.rs @@ -0,0 +1,321 @@ +mod helpers; + +use serde_json::Value; + +/// Recursively mask dynamic fields (timestamps, durations, versions) for stable comparison. +fn normalize_value(value: &mut Value) { + match value { + Value::Object(map) => { + for (key, val) in map.iter_mut() { + match key.as_str() { + "duration_ms" if val.is_number() => *val = Value::Number(0.into()), + "tool_version" if val.is_string() => *val = Value::String("MASKED".into()), + "cached_at" | "fetched_at" | "last_accessed" if val.is_string() => { + *val = Value::String("MASKED_TIMESTAMP".into()) + } + _ => normalize_value(val), + } + } + } + Value::Array(arr) => { + for item in arr.iter_mut() { + normalize_value(item); + } + } + _ => {} + } +} + +/// Normalize robot JSON for golden comparison: mask all dynamic fields. +fn normalize_robot(mut json: Value) -> Value { + normalize_value(&mut json); + json +} + +/// Assert that a robot success envelope has the correct structural invariants. +fn assert_robot_success_structure(json: &Value, expected_command: &str) { + // "ok" must be bool true + assert_eq!( + json.get("ok").and_then(|v| v.as_bool()), + Some(true), + "ok field must be true" + ); + + // "data" must be an object + assert!( + json.get("data").is_some_and(|v| v.is_object()), + "data field must be an object" + ); + + // "meta" must be an object with required fields + let meta = json.get("meta").expect("meta field is required"); + assert!(meta.is_object(), "meta must be an object"); + + // meta.schema_version must be number == 1 + let sv = meta + .get("schema_version") + .expect("meta.schema_version required"); + assert!(sv.is_number(), "meta.schema_version must be a number"); + assert_eq!(sv.as_u64(), Some(1), "meta.schema_version must equal 1"); + + // meta.tool_version must be a string + let tv = meta + .get("tool_version") + .expect("meta.tool_version required"); + assert!(tv.is_string(), "meta.tool_version must be a string"); + + // meta.command must be a string matching expected + let cmd = meta.get("command").expect("meta.command required"); + assert!(cmd.is_string(), "meta.command must be a string"); + assert_eq!( + cmd.as_str().unwrap(), + expected_command, + "meta.command must match the command run" + ); + + // meta.duration_ms must be a number + let dur = meta.get("duration_ms").expect("meta.duration_ms required"); + assert!(dur.is_number(), "meta.duration_ms must be a number"); +} + +/// Assert that a robot error envelope has the correct structural invariants. +fn assert_robot_error_structure(json: &Value) { + // "ok" must be bool false + assert_eq!( + json.get("ok").and_then(|v| v.as_bool()), + Some(false), + "ok field must be false on error" + ); + + // "data" should be absent or null + let data = json.get("data"); + assert!( + data.is_none() || data.unwrap().is_null(), + "data field must be null or absent on error" + ); + + // "error" must be an object with code and message + let error = json.get("error").expect("error field is required"); + assert!(error.is_object(), "error must be an object"); + assert!( + error.get("code").is_some_and(|v| v.is_string()), + "error.code must be a string" + ); + assert!( + error.get("message").is_some_and(|v| v.is_string()), + "error.message must be a string" + ); + + // "meta" structural invariants (same as success) + let meta = json.get("meta").expect("meta field is required"); + assert!(meta.is_object(), "meta must be an object"); + assert!( + meta.get("schema_version").is_some_and(|v| v.is_number()), + "meta.schema_version must be a number" + ); + assert!( + meta.get("tool_version").is_some_and(|v| v.is_string()), + "meta.tool_version must be a string" + ); + assert!( + meta.get("command").is_some_and(|v| v.is_string()), + "meta.command must be a string" + ); + assert!( + meta.get("duration_ms").is_some_and(|v| v.is_number()), + "meta.duration_ms must be a number" + ); +} + +/// Load a golden snapshot file, parse as JSON. +fn load_golden(name: &str) -> Value { + let path = helpers::fixture_path(&format!("golden/{name}")); + let content = std::fs::read_to_string(&path) + .unwrap_or_else(|e| panic!("failed to read golden file {}: {e}", path.display())); + serde_json::from_str(&content) + .unwrap_or_else(|e| panic!("failed to parse golden file {}: {e}", path.display())) +} + +/// Write a golden snapshot file if it does not exist (first-run bootstrap). +fn write_golden_if_missing(name: &str, value: &Value) { + let path = helpers::fixture_path(&format!("golden/{name}")); + if !path.exists() { + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent).expect("failed to create golden dir"); + } + let pretty = + serde_json::to_string_pretty(value).expect("failed to serialize golden snapshot"); + std::fs::write(&path, pretty).expect("failed to write golden snapshot"); + } +} + +// --------------------------------------------------------------------------- +// Golden structure tests +// --------------------------------------------------------------------------- + +#[test] +fn test_golden_list_robot_structure() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let a = helpers::run_cmd(&env, &["list", "petstore", "--robot"]).success(); + let json = helpers::parse_robot_json(&a.get_output().stdout); + + assert_robot_success_structure(&json, "list"); + + // Snapshot comparison + let normalized = normalize_robot(json); + write_golden_if_missing("list.json", &normalized); + let golden = load_golden("list.json"); + assert_eq!( + normalized, golden, + "list robot output does not match golden snapshot" + ); +} + +#[test] +fn test_golden_show_robot_structure() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let a = helpers::run_cmd( + &env, + &["show", "petstore", "/pets", "--method", "GET", "--robot"], + ) + .success(); + let json = helpers::parse_robot_json(&a.get_output().stdout); + + assert_robot_success_structure(&json, "show"); + + let normalized = normalize_robot(json); + write_golden_if_missing("show.json", &normalized); + let golden = load_golden("show.json"); + assert_eq!( + normalized, golden, + "show robot output does not match golden snapshot" + ); +} + +#[test] +fn test_golden_search_robot_structure() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let a = helpers::run_cmd(&env, &["search", "petstore", "pet", "--robot"]).success(); + let json = helpers::parse_robot_json(&a.get_output().stdout); + + assert_robot_success_structure(&json, "search"); + + let normalized = normalize_robot(json); + write_golden_if_missing("search.json", &normalized); + let golden = load_golden("search.json"); + assert_eq!( + normalized, golden, + "search robot output does not match golden snapshot" + ); +} + +#[test] +fn test_golden_schemas_list_robot_structure() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let a = helpers::run_cmd(&env, &["schemas", "petstore", "--robot"]).success(); + let json = helpers::parse_robot_json(&a.get_output().stdout); + + assert_robot_success_structure(&json, "schemas"); + + let normalized = normalize_robot(json); + write_golden_if_missing("schemas_list.json", &normalized); + let golden = load_golden("schemas_list.json"); + assert_eq!( + normalized, golden, + "schemas list robot output does not match golden snapshot" + ); +} + +#[test] +fn test_golden_schemas_show_robot_structure() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let a = helpers::run_cmd(&env, &["schemas", "petstore", "--show", "Pet", "--robot"]).success(); + let json = helpers::parse_robot_json(&a.get_output().stdout); + + assert_robot_success_structure(&json, "schemas"); + + let normalized = normalize_robot(json); + write_golden_if_missing("schemas_show.json", &normalized); + let golden = load_golden("schemas_show.json"); + assert_eq!( + normalized, golden, + "schemas show robot output does not match golden snapshot" + ); +} + +#[test] +fn test_golden_tags_robot_structure() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let a = helpers::run_cmd(&env, &["tags", "petstore", "--robot"]).success(); + let json = helpers::parse_robot_json(&a.get_output().stdout); + + assert_robot_success_structure(&json, "tags"); + + let normalized = normalize_robot(json); + write_golden_if_missing("tags.json", &normalized); + let golden = load_golden("tags.json"); + assert_eq!( + normalized, golden, + "tags robot output does not match golden snapshot" + ); +} + +#[test] +fn test_golden_aliases_robot_structure() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let a = helpers::run_cmd(&env, &["aliases", "--list", "--robot"]).success(); + let json = helpers::parse_robot_json(&a.get_output().stdout); + + assert_robot_success_structure(&json, "aliases"); + + // Aliases output has dynamic timestamps so we only check structure, + // not golden snapshot equality. Normalize and check key shape. + let data = json.get("data").unwrap(); + assert!(data.get("aliases").is_some_and(|v| v.is_array())); + assert!(data.get("count").is_some_and(|v| v.is_number())); +} + +#[test] +fn test_golden_error_structure() { + let env = helpers::TestEnv::new(); + // Do NOT fetch -- alias does not exist + + let a = helpers::run_cmd(&env, &["list", "nonexistent", "--robot"]); + + // Command should fail with non-zero exit + let a = a.failure(); + + // Error JSON goes to stderr + let stderr = std::str::from_utf8(&a.get_output().stderr).expect("stderr not UTF-8"); + let json: Value = serde_json::from_str(stderr.trim()).expect("stderr is not valid robot JSON"); + + assert_robot_error_structure(&json); + + // Verify error code + assert_eq!( + json["error"]["code"].as_str(), + Some("ALIAS_NOT_FOUND"), + "error code should be ALIAS_NOT_FOUND" + ); + + // Verify meta.command + assert_eq!( + json["meta"]["command"].as_str(), + Some("list"), + "meta.command should be list" + ); +} diff --git a/tests/index_invariant_test.rs b/tests/index_invariant_test.rs new file mode 100644 index 0000000..ead714d --- /dev/null +++ b/tests/index_invariant_test.rs @@ -0,0 +1,84 @@ +mod helpers; + +/// Delete the raw.json file from a cached alias to prove index-only reads. +fn delete_raw_json(env: &helpers::TestEnv, alias: &str) { + let raw_path = env.home_dir.join("cache").join(alias).join("raw.json"); + assert!( + raw_path.exists(), + "raw.json must exist before deletion: {}", + raw_path.display() + ); + std::fs::remove_file(&raw_path).expect("failed to delete raw.json"); + assert!(!raw_path.exists(), "raw.json should be gone after deletion"); +} + +// --------------------------------------------------------------------------- +// Index-only reads: these commands must work WITHOUT raw.json +// --------------------------------------------------------------------------- + +#[test] +fn test_list_does_not_read_raw_json() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + delete_raw_json(&env, "petstore"); + + let a = helpers::run_cmd(&env, &["list", "petstore", "--robot"]).success(); + let json = helpers::parse_robot_json(&a.get_output().stdout); + + assert_eq!(json["ok"], true); + assert!(json["data"]["total"].as_u64().unwrap() > 0); +} + +#[test] +fn test_search_does_not_read_raw_json() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + delete_raw_json(&env, "petstore"); + + let a = helpers::run_cmd(&env, &["search", "petstore", "pet", "--robot"]).success(); + let json = helpers::parse_robot_json(&a.get_output().stdout); + + assert_eq!(json["ok"], true); +} + +#[test] +fn test_tags_does_not_read_raw_json() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + delete_raw_json(&env, "petstore"); + + let a = helpers::run_cmd(&env, &["tags", "petstore", "--robot"]).success(); + let json = helpers::parse_robot_json(&a.get_output().stdout); + + assert_eq!(json["ok"], true); +} + +#[test] +fn test_schemas_list_does_not_read_raw_json() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + delete_raw_json(&env, "petstore"); + + let a = helpers::run_cmd(&env, &["schemas", "petstore", "--robot"]).success(); + let json = helpers::parse_robot_json(&a.get_output().stdout); + + assert_eq!(json["ok"], true); +} + +// --------------------------------------------------------------------------- +// Negative case: show REQUIRES raw.json +// --------------------------------------------------------------------------- + +#[test] +fn test_show_requires_raw_json() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + delete_raw_json(&env, "petstore"); + + // show needs raw.json to resolve the operation pointer + helpers::run_cmd( + &env, + &["show", "petstore", "/pets", "--method", "GET", "--robot"], + ) + .failure(); +} diff --git a/tests/integration_test.rs b/tests/integration_test.rs index 64f6f33..66746fb 100644 --- a/tests/integration_test.rs +++ b/tests/integration_test.rs @@ -104,3 +104,521 @@ fn test_fetch_minimal_fixture() { assert_eq!(json["ok"], true); assert_eq!(json["data"]["total"], 3); } + +// =========================================================================== +// Fetch tests +// =========================================================================== + +#[test] +fn test_fetch_success_robot() { + let env = helpers::TestEnv::new(); + let fixture = helpers::fixture_path("petstore.json"); + + let assert = helpers::run_cmd( + &env, + &[ + "fetch", + fixture.to_str().unwrap(), + "--alias", + "ps", + "--robot", + ], + ) + .success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + assert_eq!(json["data"]["alias"], "ps"); + assert!(json["data"]["endpoint_count"].as_u64().unwrap() > 0); + assert!(json["data"]["schema_count"].as_u64().unwrap() > 0); + assert!( + json["data"]["content_hash"] + .as_str() + .unwrap() + .starts_with("sha256:") + ); + assert_eq!(json["data"]["source_format"], "json"); + assert_eq!(json["meta"]["command"], "fetch"); + assert!(json["meta"]["schema_version"].as_u64().is_some()); + assert!(json["meta"]["duration_ms"].as_u64().is_some()); +} + +#[test] +fn test_fetch_invalid_json() { + let env = helpers::TestEnv::new(); + + // Create a non-JSON, non-YAML file that is not a valid OpenAPI spec + let bad_file = env.home_dir.join("not-a-spec.json"); + std::fs::write(&bad_file, b"this is not json or yaml").unwrap(); + + let assert = helpers::run_cmd( + &env, + &[ + "fetch", + bad_file.to_str().unwrap(), + "--alias", + "bad", + "--robot", + ], + ); + + // Should fail -- the file is not valid JSON or YAML + let output = assert.get_output(); + assert!(!output.status.success()); +} + +#[test] +fn test_fetch_alias_exists_error() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "dupe"); + + let fixture = helpers::fixture_path("petstore.json"); + let assert = helpers::run_cmd( + &env, + &[ + "fetch", + fixture.to_str().unwrap(), + "--alias", + "dupe", + "--robot", + ], + ); + + let output = assert.get_output(); + assert!(!output.status.success()); + + // Exit code 6 = ALIAS_EXISTS + let stderr_json = helpers::parse_robot_json(&output.stderr); + assert_eq!(stderr_json["ok"], false); + assert_eq!(stderr_json["error"]["code"], "ALIAS_EXISTS"); +} + +#[test] +fn test_fetch_force_overwrite() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "overwrite-me"); + + let fixture = helpers::fixture_path("minimal.json"); + let assert = helpers::run_cmd( + &env, + &[ + "fetch", + fixture.to_str().unwrap(), + "--alias", + "overwrite-me", + "--force", + "--robot", + ], + ) + .success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + assert_eq!(json["data"]["alias"], "overwrite-me"); + // After force overwrite, the title should be from minimal.json + assert_eq!(json["data"]["title"], "Minimal API"); +} + +#[test] +fn test_fetch_yaml_success() { + let env = helpers::TestEnv::new(); + let fixture = helpers::fixture_path("petstore.yaml"); + + let assert = helpers::run_cmd( + &env, + &[ + "fetch", + fixture.to_str().unwrap(), + "--alias", + "yaml-test", + "--robot", + ], + ) + .success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + assert_eq!(json["data"]["source_format"], "yaml"); + assert!(json["data"]["endpoint_count"].as_u64().unwrap() > 0); +} + +#[test] +fn test_fetch_stdin() { + let env = helpers::TestEnv::new(); + let fixture = helpers::fixture_path("petstore.json"); + let content = std::fs::read(&fixture).unwrap(); + + #[allow(deprecated)] + let assert = assert_cmd::Command::cargo_bin("swagger-cli") + .expect("binary not found") + .env("SWAGGER_CLI_HOME", &env.home_dir) + .args(["fetch", "-", "--alias", "stdin-test", "--robot"]) + .write_stdin(content) + .assert() + .success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + assert_eq!(json["data"]["alias"], "stdin-test"); +} + +// =========================================================================== +// List tests +// =========================================================================== + +#[test] +fn test_list_filter_by_method() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let assert = + helpers::run_cmd(&env, &["list", "petstore", "--method", "GET", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + + let endpoints = json["data"]["endpoints"].as_array().unwrap(); + assert!(!endpoints.is_empty()); + for ep in endpoints { + assert_eq!(ep["method"], "GET"); + } +} + +#[test] +fn test_list_filter_by_tag() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let assert = + helpers::run_cmd(&env, &["list", "petstore", "--tag", "store", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + + let endpoints = json["data"]["endpoints"].as_array().unwrap(); + assert!(!endpoints.is_empty()); + for ep in endpoints { + let tags = ep["tags"].as_array().unwrap(); + let tag_strings: Vec<&str> = tags.iter().filter_map(|t| t.as_str()).collect(); + assert!( + tag_strings + .iter() + .any(|t| t.to_lowercase().contains("store")), + "Expected 'store' tag in {tag_strings:?}" + ); + } +} + +#[test] +fn test_list_path_regex() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let assert = + helpers::run_cmd(&env, &["list", "petstore", "--path", "pet.*", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + + let endpoints = json["data"]["endpoints"].as_array().unwrap(); + assert!(!endpoints.is_empty()); + for ep in endpoints { + let path = ep["path"].as_str().unwrap(); + assert!( + path.contains("pet"), + "Expected path to match 'pet.*', got: {path}" + ); + } +} + +#[test] +fn test_list_invalid_regex_error() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let assert = helpers::run_cmd(&env, &["list", "petstore", "--path", "[invalid", "--robot"]); + + let output = assert.get_output(); + assert!(!output.status.success()); + + let stderr_json = helpers::parse_robot_json(&output.stderr); + assert_eq!(stderr_json["ok"], false); + assert_eq!(stderr_json["error"]["code"], "USAGE_ERROR"); +} + +#[test] +fn test_list_limit() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let assert = helpers::run_cmd(&env, &["list", "petstore", "--limit", "2", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + + let endpoints = json["data"]["endpoints"].as_array().unwrap(); + assert!(endpoints.len() <= 2); +} + +#[test] +fn test_list_all_flag() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let assert = helpers::run_cmd(&env, &["list", "petstore", "--all", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + + let endpoints = json["data"]["endpoints"].as_array().unwrap(); + let total = json["data"]["total"].as_u64().unwrap() as usize; + // With --all, endpoints shown should equal total (no truncation) + assert_eq!(endpoints.len(), total); +} + +// =========================================================================== +// Show tests +// =========================================================================== + +#[test] +fn test_show_endpoint_details() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let assert = helpers::run_cmd( + &env, + &["show", "petstore", "/pets", "--method", "GET", "--robot"], + ) + .success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + assert_eq!(json["data"]["path"], "/pets"); + assert_eq!(json["data"]["method"], "GET"); + assert!(json["data"]["summary"].as_str().is_some()); +} + +#[test] +fn test_show_method_disambiguation() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + // /pets has both GET and POST -- omitting --method should error + let assert = helpers::run_cmd(&env, &["show", "petstore", "/pets", "--robot"]); + + let output = assert.get_output(); + assert!(!output.status.success()); + + let stderr_json = helpers::parse_robot_json(&output.stderr); + assert_eq!(stderr_json["ok"], false); + assert_eq!(stderr_json["error"]["code"], "USAGE_ERROR"); +} + +// =========================================================================== +// Search tests +// =========================================================================== + +#[test] +fn test_search_basic() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let assert = helpers::run_cmd(&env, &["search", "petstore", "pet", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + + let total = json["data"]["total"].as_u64().unwrap(); + assert!(total > 0, "Expected search results for 'pet'"); +} + +#[test] +fn test_search_no_results() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let assert = helpers::run_cmd( + &env, + &["search", "petstore", "zzzznonexistent99999", "--robot"], + ) + .success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + assert_eq!(json["data"]["total"], 0); + assert!(json["data"]["results"].as_array().unwrap().is_empty()); +} + +#[test] +fn test_search_case_insensitive() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + // Default search is case-insensitive; "PET" should match "pet" paths + let assert = helpers::run_cmd(&env, &["search", "petstore", "PET", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + + let total = json["data"]["total"].as_u64().unwrap(); + assert!( + total > 0, + "Expected case-insensitive search for 'PET' to find results" + ); +} + +// =========================================================================== +// Schemas tests +// =========================================================================== + +#[test] +fn test_schemas_list() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let assert = helpers::run_cmd(&env, &["schemas", "petstore", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + + let schemas = json["data"]["schemas"].as_array().unwrap(); + assert!(!schemas.is_empty()); + + let names: Vec<&str> = schemas.iter().filter_map(|s| s["name"].as_str()).collect(); + assert!( + names.contains(&"Pet"), + "Expected 'Pet' schema, got: {names:?}" + ); + assert!( + names.contains(&"Error"), + "Expected 'Error' schema, got: {names:?}" + ); +} + +#[test] +fn test_schemas_show() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let assert = + helpers::run_cmd(&env, &["schemas", "petstore", "--show", "Pet", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + assert_eq!(json["data"]["name"], "Pet"); + assert!(json["data"]["schema"].is_object()); +} + +// =========================================================================== +// Tags tests +// =========================================================================== + +#[test] +fn test_tags_list() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "petstore"); + + let assert = helpers::run_cmd(&env, &["tags", "petstore", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + + let tags = json["data"]["tags"].as_array().unwrap(); + assert!(!tags.is_empty()); + + let names: Vec<&str> = tags.iter().filter_map(|t| t["name"].as_str()).collect(); + assert!( + names.contains(&"pets"), + "Expected 'pets' tag, got: {names:?}" + ); + assert!( + names.contains(&"store"), + "Expected 'store' tag, got: {names:?}" + ); + + // Each tag should have an endpoint_count + for tag in tags { + assert!( + tag["endpoint_count"].as_u64().is_some(), + "Expected endpoint_count on tag: {tag}" + ); + } +} + +// =========================================================================== +// Aliases tests +// =========================================================================== + +#[test] +fn test_aliases_list() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "alias-one"); + helpers::fetch_fixture(&env, "minimal.json", "alias-two"); + + let assert = helpers::run_cmd(&env, &["aliases", "--list", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + + let aliases = json["data"]["aliases"].as_array().unwrap(); + let names: Vec<&str> = aliases.iter().filter_map(|a| a["name"].as_str()).collect(); + assert!( + names.contains(&"alias-one"), + "Expected 'alias-one', got: {names:?}" + ); + assert!( + names.contains(&"alias-two"), + "Expected 'alias-two', got: {names:?}" + ); + assert_eq!(json["data"]["count"].as_u64().unwrap(), 2); +} + +#[test] +fn test_aliases_set_default() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "my-default"); + + let assert = + helpers::run_cmd(&env, &["aliases", "--set-default", "my-default", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + assert_eq!(json["data"]["name"], "my-default"); + + // Verify by listing + let list_assert = helpers::run_cmd(&env, &["aliases", "--list", "--robot"]).success(); + let list_json = helpers::parse_robot_json(&list_assert.get_output().stdout); + assert_eq!(list_json["data"]["default_alias"], "my-default"); +} + +// =========================================================================== +// Doctor tests +// =========================================================================== + +#[test] +fn test_doctor_healthy() { + let env = helpers::TestEnv::new(); + helpers::fetch_fixture(&env, "petstore.json", "healthy-spec"); + + let assert = helpers::run_cmd(&env, &["doctor", "--robot"]).success(); + + let json = helpers::parse_robot_json(&assert.get_output().stdout); + assert_eq!(json["ok"], true); + + // health should be "healthy" or "warning" (warning is ok if stale threshold is very low) + let health = json["data"]["health"].as_str().unwrap(); + assert!( + health == "healthy" || health == "warning", + "Expected healthy or warning, got: {health}" + ); + + let aliases = json["data"]["aliases"].as_array().unwrap(); + assert!(!aliases.is_empty()); + + let spec_report = aliases + .iter() + .find(|a| a["name"] == "healthy-spec") + .unwrap(); + assert_eq!(spec_report["status"], "healthy"); + assert!(spec_report["endpoint_count"].as_u64().unwrap() > 0); +}