diff --git a/.beads/issues.jsonl b/.beads/issues.jsonl index 10f9cec..2e67dde 100644 --- a/.beads/issues.jsonl +++ b/.beads/issues.jsonl @@ -161,7 +161,7 @@ {"id":"bd-2g50","title":"Audit and fill data gaps: lore detail view vs glab","description":"## Background\nFor lore to be the definitive read path, its single-entity detail view must return everything glab returns PLUS lore-exclusive enrichments.\n\n## Current Issue Detail Output (lore -J issues N)\nFields returned: assignees, author_username, closing_merge_requests, created_at, description, discussions, due_date, id, iid, labels, milestone, project_path, state, status_color, status_icon_name, status_name, status_synced_at, title, updated_at, web_url\n\n## Gap Analysis (Verified 2026-02-12)\n\n### Raw Payload Audit\nIssue raw_payloads store exactly 15 fields: assignees, author, closed_at, created_at, description, due_date, id, iid, labels, milestone, project_id, state, title, updated_at, web_url.\n\nFields NOT in raw payloads (require ingestion pipeline update to capture from GitLab API):\n- closed_by, confidential, upvotes, downvotes, weight, issue_type, time_stats, health_status, references\n\n### Phase 1 — Computed fields (NO schema change, NO ingestion change)\nThese can be derived from existing data:\n1. `references_full`: format!(\"{path_with_namespace}#{iid}\") — project_path already in show.rs:IssueDetail\n2. `user_notes_count`: SELECT COUNT(*) FROM notes n JOIN discussions d ON n.discussion_id = d.id WHERE d.noteable_type = 'Issue' AND d.noteable_id = ? AND n.is_system = 0\n3. `merge_requests_count`: COUNT from closing_merge_requests vec already loaded in show.rs (just .len())\n\n### Phase 2 — Extract from existing raw payloads (schema change, NO ingestion change)\n`closed_at` IS in raw_payloads for closed issues. Can be backfilled:\n1. Add `closed_at TEXT` column to issues table (migration 023)\n2. Backfill: UPDATE issues SET closed_at = json_extract((SELECT payload FROM raw_payloads WHERE id = issues.raw_payload_id), '$.closed_at') WHERE state = 'closed'\n3. Capture during ingestion going forward\n\n### Phase 3 — Requires ingestion pipeline update (schema change + API capture)\nThese fields are in the GitLab Issues API response but NOT captured by lore's ingestion:\n1. `closed_by` (object with username) — add closed_by_username TEXT to issues\n2. `confidential` (boolean) — add confidential INTEGER DEFAULT 0 to issues\n3. Both require updating src/ingestion/ to extract these fields during sync\n\n### Phase 4 — Same audit for MR detail view\nMR detail (src/cli/commands/show.rs MrDetail struct lines 14-33) already includes: closed_at, merged_at, draft, source/target branch, reviewers. Missing: approvers_count, pipeline_status.\n\n## Implementation: show.rs Modifications\n\n### IssueDetail struct (src/cli/commands/show.rs:69-91)\nAdd fields:\n```rust\npub references_full: String, // Phase 1: computed\npub user_notes_count: i64, // Phase 1: computed\npub merge_requests_count: usize, // Phase 1: computed (closing_merge_requests.len())\npub closed_at: Option, // Phase 2: from DB after migration\npub confidential: bool, // Phase 3: from DB after ingestion update\n```\n\n### SQL for computed fields\n```sql\n-- user_notes_count\nSELECT COUNT(*) FROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nWHERE d.noteable_type = 'Issue' AND d.noteable_id = ?1 AND n.is_system = 0\n\n-- references_full (in Rust)\nformat!(\"{}#{}\", project_path, iid)\n\n-- merge_requests_count (in Rust)\nclosing_merge_requests.len()\n```\n\n## Migration 023 (after bd-2l3s takes 022)\n```sql\n-- migrations/023_issue_detail_fields.sql\nALTER TABLE issues ADD COLUMN closed_at TEXT;\nALTER TABLE issues ADD COLUMN confidential INTEGER NOT NULL DEFAULT 0;\n\n-- Backfill closed_at from raw_payloads\nUPDATE issues SET closed_at = (\n SELECT json_extract(rp.payload, '$.closed_at')\n FROM raw_payloads rp\n WHERE rp.id = issues.raw_payload_id\n) WHERE state = 'closed' AND raw_payload_id IS NOT NULL;\n\nINSERT INTO schema_version (version, applied_at, description)\nVALUES (23, strftime('%s', 'now') * 1000, 'Issue detail fields: closed_at, confidential');\n```\n\nNOTE: raw_payload_id column on issues — verify this exists. If issues don't have a direct FK to raw_payloads, the backfill SQL needs adjustment (may need to join through another path).\n\n## TDD Loop\nRED: Tests in src/cli/commands/show.rs:\n- test_show_issue_has_references_full: insert issue with known project_path, assert JSON output contains \"project/path#123\"\n- test_show_issue_has_notes_count: insert issue + 3 user notes + 1 system note, assert user_notes_count = 3\n- test_show_issue_closed_has_closed_at: insert closed issue with closed_at in raw_payload, run migration, verify closed_at appears\n\nGREEN: Add computed fields to IssueDetail, add migration 023 for closed_at + confidential columns\n\nVERIFY:\n```bash\ncargo test show:: && cargo clippy --all-targets -- -D warnings\ncargo run --release -- -J issues 3864 | jq '{references_full, user_notes_count, merge_requests_count}'\n```\n\n## Acceptance Criteria\n- [ ] lore -J issues N includes references_full (string, e.g., \"vs/typescript-code#3864\")\n- [ ] lore -J issues N includes user_notes_count (integer, excludes system notes)\n- [ ] lore -J issues N includes merge_requests_count (integer)\n- [ ] lore -J issues N includes closed_at (ISO string for closed issues, null for open)\n- [ ] lore -J issues N includes confidential (boolean, after Phase 3)\n- [ ] --fields minimal preset updated to include references_full\n- [ ] Migration 023 adds closed_at and confidential columns to issues table\n- [ ] Backfill SQL populates closed_at from existing raw_payloads\n- [ ] cargo test passes with new show:: tests\n\n## Edge Cases\n- Issue with zero notes: user_notes_count = 0 (not null)\n- Issue with no closing MRs: merge_requests_count = 0\n- Open issue: closed_at = null (serialized as JSON null, not omitted)\n- confidential before Phase 3: default false (safe default)\n- MR detail: different computed fields (approvers_count, pipeline_status if available)\n- Raw payload missing for very old issues (raw_payload_id = NULL): closed_at stays NULL\n- raw_payload_id column: verify it exists on the issues table before writing backfill SQL\n\n## Files to Modify\n- src/cli/commands/show.rs (IssueDetail struct + query logic)\n- src/core/db.rs (migration 023: wire into MIGRATIONS array)\n- NEW: migrations/023_issue_detail_fields.sql\n- src/ingestion/ (Phase 3: capture closed_by, confidential during sync — specify exact file after reviewing ingestion pipeline)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T15:45:16.512418Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:49:01.580183Z","closed_at":"2026-02-12T16:49:01.580133Z","close_reason":"Data gaps filled: references_full, user_notes_count, merge_requests_count, closed_at, confidential via migration 023","compaction_level":0,"original_size":0,"labels":["cli","cli-imp","robot-mode"],"dependencies":[{"issue_id":"bd-2g50","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-03-04T20:00:21Z","created_by":"import"}]} {"id":"bd-2h0","title":"[CP1] gi list issues command","description":"List issues from the database.\n\n## Module\nsrc/cli/commands/list.rs\n\n## Clap Definition\nList {\n #[arg(value_parser = [\"issues\", \"mrs\"])]\n entity: String,\n \n #[arg(long, default_value = \"20\")]\n limit: usize,\n \n #[arg(long)]\n project: Option,\n \n #[arg(long, value_parser = [\"opened\", \"closed\", \"all\"])]\n state: Option,\n}\n\n## Output Format\nIssues (showing 20 of 3,801)\n\n #1234 Authentication redesign opened @johndoe 3 days ago\n #1233 Fix memory leak in cache closed @janedoe 5 days ago\n #1232 Add dark mode support opened @bobsmith 1 week ago\n ...\n\n## Implementation\n- Query issues table with filters\n- Join with projects table for display\n- Format updated_at as relative time (\"3 days ago\")\n- Truncate title if too long\n\nFiles: src/cli/commands/list.rs\nDone when: List displays issues with proper filtering and formatting","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:58:23.809829Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.898106Z","closed_at":"2026-01-25T17:02:01.898106Z","deleted_at":"2026-01-25T17:02:01.898102Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-2i10","title":"OBSERV: Add log file diagnostics to lore doctor","description":"## Background\nlore doctor is the diagnostic entry point. Adding log file info lets users verify logging is working and check disk usage. The existing DoctorChecks struct (src/cli/commands/doctor.rs:43-51) has checks for config, database, gitlab, projects, ollama.\n\n## Approach\nAdd a new LoggingCheck struct and field to DoctorChecks:\n\n```rust\n#[derive(Debug, Serialize)]\npub struct LoggingCheck {\n pub result: CheckResult,\n pub log_dir: String,\n pub file_count: usize,\n pub total_bytes: u64,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub oldest_file: Option,\n}\n```\n\nAdd to DoctorChecks (src/cli/commands/doctor.rs:43-51):\n```rust\npub logging: LoggingCheck,\n```\n\nImplement check_logging() function:\n```rust\nfn check_logging() -> LoggingCheck {\n let log_dir = get_log_dir(None); // TODO: accept config override\n let mut file_count = 0;\n let mut total_bytes = 0u64;\n let mut oldest: Option = None;\n\n if let Ok(entries) = std::fs::read_dir(&log_dir) {\n for entry in entries.flatten() {\n let name = entry.file_name().to_string_lossy().to_string();\n if name.starts_with(\"lore.\") && name.ends_with(\".log\") {\n file_count += 1;\n if let Ok(meta) = entry.metadata() {\n total_bytes += meta.len();\n }\n if oldest.as_ref().map_or(true, |o| name < *o) {\n oldest = Some(name);\n }\n }\n }\n }\n\n LoggingCheck {\n result: CheckResult { status: CheckStatus::Ok, message: None },\n log_dir: log_dir.display().to_string(),\n file_count,\n total_bytes,\n oldest_file: oldest,\n }\n}\n```\n\nCall from run_doctor() (src/cli/commands/doctor.rs:91-126) and add to DoctorChecks construction.\n\nFor interactive output in print_doctor_results(), add a section:\n```\nLogging\n Log directory: ~/.local/share/lore/logs/\n Log files: 7 (2.3 MB)\n Oldest: lore.2026-01-28.log\n```\n\n## Acceptance Criteria\n- [ ] lore doctor shows log directory path, file count, total size\n- [ ] lore --robot doctor JSON includes logging field with log_dir, file_count, total_bytes, oldest_file\n- [ ] When no log files exist: file_count=0, total_bytes=0, oldest_file=null\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/commands/doctor.rs (add LoggingCheck struct, check_logging fn, wire into DoctorChecks)\n\n## TDD Loop\nRED: test_check_logging_with_files, test_check_logging_empty_dir\nGREEN: Implement LoggingCheck struct and check_logging function\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Log directory doesn't exist yet (first run before any sync): report file_count=0, status Ok\n- Permission errors on read_dir: report status Warning with message","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:53:55.682986Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:15:04.520915Z","closed_at":"2026-02-04T17:15:04.520868Z","close_reason":"Added LoggingCheck to DoctorChecks with log_dir, file_count, total_bytes; shows in both interactive and robot output","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-2i10","depends_on_id":"bd-1k4","type":"blocks","created_at":"2026-03-04T20:00:21Z","created_by":"import"},{"issue_id":"bd-2i10","depends_on_id":"bd-2nx","type":"parent-child","created_at":"2026-03-04T20:00:21Z","created_by":"import"}]} -{"id":"bd-2i3z","title":"Task 1: Register explain command and wire dispatch","description":"## Background\n\nAdd `lore explain issues N` / `lore explain mrs N` command to auto-generate structured narratives of what happened on an issue or MR. This task sets up the CLI registration, handler dispatch, parameter parsing, and skeleton run_explain() function.\n\n**Parent bead:** bd-9lbr (lore explain feature)\n**Spec:** specs/SPEC_explain.md\n**Phase:** 1 — Setup & Registration\n\n## Why This Matters\n\nThe explain command is the entry point for the entire feature. Without CLI registration and parameter parsing, none of the core logic (key decisions, open threads, timeline) can be invoked. This task creates the skeleton that all other explain tasks build on top of.\n\n## Architecture Decisions\n\n### Command Registration Pattern\nUses inline args on the enum variant (like Drift, Related pattern). No separate ExplainArgs struct needed.\n\nEntity type validated by value_parser = [\"issues\", \"mrs\", \"issue\", \"mr\"].\nHandler normalizes singular forms: \"issue\" -> \"issues\", \"mr\" -> \"mrs\".\n\n### ExplainParams Struct\nControls explain behavior:\n\\`\\`\\`rust\npub struct ExplainParams {\n pub entity_type: String, // \"issues\" or \"mrs\" (already normalized)\n pub iid: i64,\n pub project: Option,\n pub sections: Option>, // None = all sections\n pub no_timeline: bool,\n pub max_decisions: usize, // default 10\n pub since: Option, // ms epoch threshold from --since parsing\n}\n\\`\\`\\`\n\n### ExplainResult and Sub-Types (full definitions)\n\\`\\`\\`rust\n#[derive(Debug, Serialize)]\npub struct ExplainResult {\n pub entity: EntitySummary,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description_excerpt: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub key_decisions: Option>,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub activity: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub open_threads: Option>,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub related: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub timeline_excerpt: Option>,\n}\n\n#[derive(Debug, Serialize)]\npub struct EntitySummary {\n #[serde(rename = \"type\")]\n pub entity_type: String, // \"issue\" or \"merge_request\"\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub author: String,\n pub assignees: Vec,\n pub labels: Vec,\n pub created_at: String, // ISO 8601\n pub updated_at: String, // ISO 8601\n pub url: Option,\n pub status_name: Option,\n}\n\n#[derive(Debug, Serialize)]\npub struct KeyDecision {\n pub timestamp: String, // ISO 8601\n pub actor: String,\n pub action: String, // \"state: closed\" or \"label: +bug\"\n pub context_note: String, // truncated to 500 chars\n}\n\n#[derive(Debug, Serialize)]\npub struct ActivitySummary {\n pub state_changes: usize,\n pub label_changes: usize,\n pub notes: usize, // non-system only\n pub first_event: Option, // ISO 8601\n pub last_event: Option, // ISO 8601\n}\n\n#[derive(Debug, Serialize)]\npub struct OpenThread {\n pub discussion_id: String,\n pub started_by: String,\n pub started_at: String, // ISO 8601\n pub note_count: usize,\n pub last_note_at: String, // ISO 8601\n}\n\n#[derive(Debug, Serialize)]\npub struct RelatedEntities {\n pub closing_mrs: Vec,\n pub related_issues: Vec,\n}\n\n#[derive(Debug, Serialize)]\npub struct ClosingMrInfo {\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub web_url: Option,\n}\n\n#[derive(Debug, Serialize)]\npub struct RelatedEntityInfo {\n pub entity_type: String,\n pub iid: i64,\n pub title: Option,\n pub reference_type: String,\n}\n\n#[derive(Debug, Serialize)]\npub struct TimelineEventSummary {\n pub timestamp: String, // ISO 8601\n pub event_type: String,\n pub actor: Option,\n pub summary: String,\n}\n\\`\\`\\`\n\n### Clap Registration\n\\`\\`\\`rust\n/// Auto-generate a structured narrative of an issue or MR\n#[command(after_help = \"\\x1b[1mExamples:\\x1b[0m\n lore explain issues 42 # Narrative for issue #42\n lore explain mrs 99 -p group/repo # Narrative for MR !99 in specific project\n lore -J explain issues 42 # JSON output for automation\n lore explain issues 42 --sections key_decisions,open_threads # Specific sections only\n lore explain issues 42 --since 30d # Narrative scoped to last 30 days\n lore explain issues 42 --no-timeline # Skip timeline (faster)\")]\nExplain {\n /// Entity type: \"issues\" or \"mrs\" (singular forms also accepted)\n #[arg(value_parser = [\"issues\", \"mrs\", \"issue\", \"mr\"])]\n entity_type: String,\n /// Entity IID\n iid: i64,\n /// Scope to project (fuzzy match)\n #[arg(short, long)]\n project: Option,\n /// Select specific sections (comma-separated)\n /// Valid: entity, description, key_decisions, activity, open_threads, related, timeline\n #[arg(long, value_delimiter = ',', help_heading = \"Output\")]\n sections: Option>,\n /// Skip timeline excerpt (faster execution)\n #[arg(long, help_heading = \"Output\")]\n no_timeline: bool,\n /// Maximum key decisions to include\n #[arg(long, default_value = \"10\", help_heading = \"Output\")]\n max_decisions: usize,\n /// Time scope for events/notes (e.g. 7d, 2w, 1m, or YYYY-MM-DD)\n #[arg(long, help_heading = \"Filters\")]\n since: Option,\n},\n\\`\\`\\`\n\n## Files to Create/Modify\n\n- NEW: src/cli/commands/explain.rs — command module with types, run_explain(), handle_explain(), and tests\n- EDIT: src/cli/mod.rs — add Explain variant to Commands enum with inline args (code above)\n- EDIT: src/cli/commands/mod.rs — add pub mod explain;\n- EDIT: src/main.rs — add Explain match arm calling handle_explain (like Drift/Related pattern)\n\n**Handler location:** Put handle_explain() in src/cli/commands/explain.rs. main.rs imports and calls it. This keeps the handler next to its logic.\n\n## run_explain() Signature and Skeleton\n\n\\`\\`\\`rust\npub fn run_explain(conn: &Connection, params: &ExplainParams) -> Result {\n // 1. Resolve project (if provided)\n let project_id = params.project.as_deref().map(|p| resolve_project(conn, p)).transpose()?;\n\n // 2. Find entity\n let (entity_summary, entity_local_id, project_path) = if params.entity_type == \"issues\" {\n find_explain_issue(conn, params.iid, project_id)?\n } else {\n find_explain_mr(conn, params.iid, project_id)?\n };\n\n // 3. Description excerpt\n let description_excerpt = if should_include(¶ms.sections, \"description\") {\n Some(/* first 500 chars or \"(no description)\" */)\n } else { None };\n\n // 4-7. Stub remaining sections as None (Tasks 2-4 fill these in)\n Ok(ExplainResult {\n entity: entity_summary,\n description_excerpt,\n key_decisions: if should_include(¶ms.sections, \"key_decisions\") { Some(vec![]) } else { None },\n activity: if should_include(¶ms.sections, \"activity\") { Some(ActivitySummary { state_changes: 0, label_changes: 0, notes: 0, first_event: None, last_event: None }) } else { None },\n open_threads: if should_include(¶ms.sections, \"open_threads\") { Some(vec![]) } else { None },\n related: if should_include(¶ms.sections, \"related\") { Some(RelatedEntities { closing_mrs: vec![], related_issues: vec![] }) } else { None },\n timeline_excerpt: None, // Task 4\n })\n}\n\nfn should_include(sections: &Option>, name: &str) -> bool {\n sections.as_ref().map_or(true, |s| s.iter().any(|sec| sec == name))\n}\n\\`\\`\\`\n\n## Key Implementation Notes\n\n- Copy find_issue/find_mr query patterns from show/issue.rs and show/mr.rs — they are private functions (inside include!() files) so cannot be imported\n- Use resolve_project() from crate::core::project for project resolution\n- Use ms_to_iso() from crate::core::time for timestamp conversion\n- Parse --since using crate::core::time::parse_since() — returns Option ms epoch threshold\n- Validate --sections values against allowed set in handle_explain before building ExplainParams\n- Entity section is always included (needed for identification) even if not in --sections list\n\n### Entity Resolution\nThe find_issue pattern in show/issue.rs:\n- fn find_issue(conn: &Connection, iid: i64, project_filter: Option<&str>) -> Result\n- Two SQL paths: with project_id (resolve_project first), or multi-project query\n- Returns NotFound (exit 17) or Ambiguous (exit 18) errors\n- Same pattern for find_mr in show/mr.rs\n\n### IssueRow / MrRow fields (from show/)\nIssue: id, iid, title, description, state, author_username, created_at, updated_at, closed_at, confidential, web_url, project_path, labels (comma-join), assignees (comma-join), status_name, status_category\nMR: id, iid, title, description, state, draft, author_username, source_branch, target_branch, created_at, updated_at, merged_at, closed_at, web_url, project_path, labels, assignees, reviewers\n\n## Test Helper (shared across all explain tests)\n\nCreate a setup_explain_db() helper that creates in-memory DB with migrations, inserts a test project, and returns (conn, project_id). Pattern from src/test_support.rs and existing test modules:\n\n\\`\\`\\`rust\nfn setup_explain_db() -> (Connection, i64) {\n let conn = crate::core::db::create_connection(std::path::Path::new(\":memory:\")).unwrap();\n crate::core::db::run_migrations(&conn).unwrap();\n conn.execute(\n \"INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url) VALUES (100, 'test/project', 'https://gitlab.example.com/test/project')\",\n [],\n ).unwrap();\n let project_id = conn.last_insert_rowid();\n (conn, project_id)\n}\n\\`\\`\\`\n\nInsert test issues/MRs with required columns:\n\\`\\`\\`rust\nfn insert_test_issue(conn: &Connection, project_id: i64, iid: i64, desc: Option<&str>) -> i64 {\n conn.execute(\n \"INSERT INTO issues (gitlab_id, iid, project_id, title, state, author_username, created_at, updated_at, last_seen_at, description) VALUES (?1, ?2, ?3, 'Test Issue', 'opened', 'testuser', 1704067200000, 1704153600000, 1704153600000, ?4)\",\n rusqlite::params![iid * 10, iid, project_id, desc],\n ).unwrap();\n conn.last_insert_rowid()\n}\n\\`\\`\\`\n\n## Boundaries\n\nAutonomous: Register command in CLI, wire dispatch, create all types, implement entity resolution and description_excerpt, write test helpers.\n\nAsk first: Adding new dependencies to Cargo.toml, modifying existing show/ or timeline/ modules.\n\nNever: No LLM calls, no API/network calls, no new DB migrations, do not modify show/ or timeline/ (copy patterns instead).\n\n## TDD Workflow (Red-Green)\n\n### RED — Write these tests first, confirm they FAIL:\n\n1. test_explain_issue_basic: Insert minimal issue + project + 1 discussion + 1 note + 1 state event into in-memory DB, call run_explain() with default ExplainParams, assert all 7 sections present in result (entity is always populated, rest are Some(...))\n2. test_explain_mr: Insert MR with merged_at, call run_explain(), assert entity.entity_type == \"merge_request\"\n3. test_explain_singular_entity_type: Call with entity_type: \"issue\", assert it normalizes to \"issues\" and resolves correctly\n\n### GREEN — Implement:\n\n- Explain variant in Commands enum (with all flags — use exact Clap code above)\n- Match arm in main.rs calling handle_explain\n- handle_explain() that normalizes entity_type, parses --since, validates --sections, builds ExplainParams, calls run_explain()\n- Skeleton run_explain() as shown above\n- find_explain_issue() and find_explain_mr() copied from show/ patterns\n- should_include() helper\n- Test helpers: setup_explain_db(), insert_test_issue(), insert_test_mr()\n\n### Verify:\ncargo test explain::tests:: && cargo clippy --all-targets -- -D warnings && cargo fmt --check\n\n## Acceptance Criteria\n\n- [ ] test_explain_issue_basic passes\n- [ ] test_explain_mr passes\n- [ ] test_explain_singular_entity_type passes\n- [ ] Command appears in lore --help with after_help examples\n- [ ] All ExplainResult types defined with serde Serialize + skip_serializing_if\n- [ ] run_explain() skeleton returns valid ExplainResult with entity populated and stubs for other sections\n- [ ] cargo clippy and cargo fmt clean","status":"in_progress","priority":2,"issue_type":"task","created_at":"2026-03-10T17:36:51.671656Z","created_by":"tayloreernisse","updated_at":"2026-03-10T18:20:02.901037Z","compaction_level":0,"original_size":0,"labels":["cli-imp","explain"],"dependencies":[{"issue_id":"bd-2i3z","depends_on_id":"bd-9lbr","type":"parent-child","created_at":"2026-03-10T17:36:51.673556Z","created_by":"tayloreernisse"}]} +{"id":"bd-2i3z","title":"Task 1: Register explain command and wire dispatch","description":"## Background\n\nAdd `lore explain issues N` / `lore explain mrs N` command to auto-generate structured narratives of what happened on an issue or MR. This task sets up the CLI registration, handler dispatch, parameter parsing, and skeleton run_explain() function.\n\n**Parent bead:** bd-9lbr (lore explain feature)\n**Spec:** specs/SPEC_explain.md\n**Phase:** 1 — Setup & Registration\n\n## Why This Matters\n\nThe explain command is the entry point for the entire feature. Without CLI registration and parameter parsing, none of the core logic (key decisions, open threads, timeline) can be invoked. This task creates the skeleton that all other explain tasks build on top of.\n\n## Architecture Decisions\n\n### Command Registration Pattern\nUses inline args on the enum variant (like Drift, Related pattern). No separate ExplainArgs struct needed.\n\nEntity type validated by value_parser = [\"issues\", \"mrs\", \"issue\", \"mr\"].\nHandler normalizes singular forms: \"issue\" -> \"issues\", \"mr\" -> \"mrs\".\n\n### ExplainParams Struct\nControls explain behavior:\n\\`\\`\\`rust\npub struct ExplainParams {\n pub entity_type: String, // \"issues\" or \"mrs\" (already normalized)\n pub iid: i64,\n pub project: Option,\n pub sections: Option>, // None = all sections\n pub no_timeline: bool,\n pub max_decisions: usize, // default 10\n pub since: Option, // ms epoch threshold from --since parsing\n}\n\\`\\`\\`\n\n### ExplainResult and Sub-Types (full definitions)\n\\`\\`\\`rust\n#[derive(Debug, Serialize)]\npub struct ExplainResult {\n pub entity: EntitySummary,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub description_excerpt: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub key_decisions: Option>,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub activity: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub open_threads: Option>,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub related: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub timeline_excerpt: Option>,\n}\n\n#[derive(Debug, Serialize)]\npub struct EntitySummary {\n #[serde(rename = \"type\")]\n pub entity_type: String, // \"issue\" or \"merge_request\"\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub author: String,\n pub assignees: Vec,\n pub labels: Vec,\n pub created_at: String, // ISO 8601\n pub updated_at: String, // ISO 8601\n pub url: Option,\n pub status_name: Option,\n}\n\n#[derive(Debug, Serialize)]\npub struct KeyDecision {\n pub timestamp: String, // ISO 8601\n pub actor: String,\n pub action: String, // \"state: closed\" or \"label: +bug\"\n pub context_note: String, // truncated to 500 chars\n}\n\n#[derive(Debug, Serialize)]\npub struct ActivitySummary {\n pub state_changes: usize,\n pub label_changes: usize,\n pub notes: usize, // non-system only\n pub first_event: Option, // ISO 8601\n pub last_event: Option, // ISO 8601\n}\n\n#[derive(Debug, Serialize)]\npub struct OpenThread {\n pub discussion_id: String,\n pub started_by: String,\n pub started_at: String, // ISO 8601\n pub note_count: usize,\n pub last_note_at: String, // ISO 8601\n}\n\n#[derive(Debug, Serialize)]\npub struct RelatedEntities {\n pub closing_mrs: Vec,\n pub related_issues: Vec,\n}\n\n#[derive(Debug, Serialize)]\npub struct ClosingMrInfo {\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub web_url: Option,\n}\n\n#[derive(Debug, Serialize)]\npub struct RelatedEntityInfo {\n pub entity_type: String,\n pub iid: i64,\n pub title: Option,\n pub reference_type: String,\n}\n\n#[derive(Debug, Serialize)]\npub struct TimelineEventSummary {\n pub timestamp: String, // ISO 8601\n pub event_type: String,\n pub actor: Option,\n pub summary: String,\n}\n\\`\\`\\`\n\n### Clap Registration\n\\`\\`\\`rust\n/// Auto-generate a structured narrative of an issue or MR\n#[command(after_help = \"\\x1b[1mExamples:\\x1b[0m\n lore explain issues 42 # Narrative for issue #42\n lore explain mrs 99 -p group/repo # Narrative for MR !99 in specific project\n lore -J explain issues 42 # JSON output for automation\n lore explain issues 42 --sections key_decisions,open_threads # Specific sections only\n lore explain issues 42 --since 30d # Narrative scoped to last 30 days\n lore explain issues 42 --no-timeline # Skip timeline (faster)\")]\nExplain {\n /// Entity type: \"issues\" or \"mrs\" (singular forms also accepted)\n #[arg(value_parser = [\"issues\", \"mrs\", \"issue\", \"mr\"])]\n entity_type: String,\n /// Entity IID\n iid: i64,\n /// Scope to project (fuzzy match)\n #[arg(short, long)]\n project: Option,\n /// Select specific sections (comma-separated)\n /// Valid: entity, description, key_decisions, activity, open_threads, related, timeline\n #[arg(long, value_delimiter = ',', help_heading = \"Output\")]\n sections: Option>,\n /// Skip timeline excerpt (faster execution)\n #[arg(long, help_heading = \"Output\")]\n no_timeline: bool,\n /// Maximum key decisions to include\n #[arg(long, default_value = \"10\", help_heading = \"Output\")]\n max_decisions: usize,\n /// Time scope for events/notes (e.g. 7d, 2w, 1m, or YYYY-MM-DD)\n #[arg(long, help_heading = \"Filters\")]\n since: Option,\n},\n\\`\\`\\`\n\n## Files to Create/Modify\n\n- NEW: src/cli/commands/explain.rs — command module with types, run_explain(), handle_explain(), and tests\n- EDIT: src/cli/mod.rs — add Explain variant to Commands enum with inline args (code above)\n- EDIT: src/cli/commands/mod.rs — add pub mod explain;\n- EDIT: src/main.rs — add Explain match arm calling handle_explain (like Drift/Related pattern)\n\n**Handler location:** Put handle_explain() in src/cli/commands/explain.rs. main.rs imports and calls it. This keeps the handler next to its logic.\n\n## run_explain() Signature and Skeleton\n\n\\`\\`\\`rust\npub fn run_explain(conn: &Connection, params: &ExplainParams) -> Result {\n // 1. Resolve project (if provided)\n let project_id = params.project.as_deref().map(|p| resolve_project(conn, p)).transpose()?;\n\n // 2. Find entity\n let (entity_summary, entity_local_id, project_path) = if params.entity_type == \"issues\" {\n find_explain_issue(conn, params.iid, project_id)?\n } else {\n find_explain_mr(conn, params.iid, project_id)?\n };\n\n // 3. Description excerpt\n let description_excerpt = if should_include(¶ms.sections, \"description\") {\n Some(/* first 500 chars or \"(no description)\" */)\n } else { None };\n\n // 4-7. Stub remaining sections as None (Tasks 2-4 fill these in)\n Ok(ExplainResult {\n entity: entity_summary,\n description_excerpt,\n key_decisions: if should_include(¶ms.sections, \"key_decisions\") { Some(vec![]) } else { None },\n activity: if should_include(¶ms.sections, \"activity\") { Some(ActivitySummary { state_changes: 0, label_changes: 0, notes: 0, first_event: None, last_event: None }) } else { None },\n open_threads: if should_include(¶ms.sections, \"open_threads\") { Some(vec![]) } else { None },\n related: if should_include(¶ms.sections, \"related\") { Some(RelatedEntities { closing_mrs: vec![], related_issues: vec![] }) } else { None },\n timeline_excerpt: None, // Task 4\n })\n}\n\nfn should_include(sections: &Option>, name: &str) -> bool {\n sections.as_ref().map_or(true, |s| s.iter().any(|sec| sec == name))\n}\n\\`\\`\\`\n\n## Key Implementation Notes\n\n- Copy find_issue/find_mr query patterns from show/issue.rs and show/mr.rs — they are private functions (inside include!() files) so cannot be imported\n- Use resolve_project() from crate::core::project for project resolution\n- Use ms_to_iso() from crate::core::time for timestamp conversion\n- Parse --since using crate::core::time::parse_since() — returns Option ms epoch threshold\n- Validate --sections values against allowed set in handle_explain before building ExplainParams\n- Entity section is always included (needed for identification) even if not in --sections list\n\n### Entity Resolution\nThe find_issue pattern in show/issue.rs:\n- fn find_issue(conn: &Connection, iid: i64, project_filter: Option<&str>) -> Result\n- Two SQL paths: with project_id (resolve_project first), or multi-project query\n- Returns NotFound (exit 17) or Ambiguous (exit 18) errors\n- Same pattern for find_mr in show/mr.rs\n\n### IssueRow / MrRow fields (from show/)\nIssue: id, iid, title, description, state, author_username, created_at, updated_at, closed_at, confidential, web_url, project_path, labels (comma-join), assignees (comma-join), status_name, status_category\nMR: id, iid, title, description, state, draft, author_username, source_branch, target_branch, created_at, updated_at, merged_at, closed_at, web_url, project_path, labels, assignees, reviewers\n\n## Test Helper (shared across all explain tests)\n\nCreate a setup_explain_db() helper that creates in-memory DB with migrations, inserts a test project, and returns (conn, project_id). Pattern from src/test_support.rs and existing test modules:\n\n\\`\\`\\`rust\nfn setup_explain_db() -> (Connection, i64) {\n let conn = crate::core::db::create_connection(std::path::Path::new(\":memory:\")).unwrap();\n crate::core::db::run_migrations(&conn).unwrap();\n conn.execute(\n \"INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url) VALUES (100, 'test/project', 'https://gitlab.example.com/test/project')\",\n [],\n ).unwrap();\n let project_id = conn.last_insert_rowid();\n (conn, project_id)\n}\n\\`\\`\\`\n\nInsert test issues/MRs with required columns:\n\\`\\`\\`rust\nfn insert_test_issue(conn: &Connection, project_id: i64, iid: i64, desc: Option<&str>) -> i64 {\n conn.execute(\n \"INSERT INTO issues (gitlab_id, iid, project_id, title, state, author_username, created_at, updated_at, last_seen_at, description) VALUES (?1, ?2, ?3, 'Test Issue', 'opened', 'testuser', 1704067200000, 1704153600000, 1704153600000, ?4)\",\n rusqlite::params![iid * 10, iid, project_id, desc],\n ).unwrap();\n conn.last_insert_rowid()\n}\n\\`\\`\\`\n\n## Boundaries\n\nAutonomous: Register command in CLI, wire dispatch, create all types, implement entity resolution and description_excerpt, write test helpers.\n\nAsk first: Adding new dependencies to Cargo.toml, modifying existing show/ or timeline/ modules.\n\nNever: No LLM calls, no API/network calls, no new DB migrations, do not modify show/ or timeline/ (copy patterns instead).\n\n## TDD Workflow (Red-Green)\n\n### RED — Write these tests first, confirm they FAIL:\n\n1. test_explain_issue_basic: Insert minimal issue + project + 1 discussion + 1 note + 1 state event into in-memory DB, call run_explain() with default ExplainParams, assert all 7 sections present in result (entity is always populated, rest are Some(...))\n2. test_explain_mr: Insert MR with merged_at, call run_explain(), assert entity.entity_type == \"merge_request\"\n3. test_explain_singular_entity_type: Call with entity_type: \"issue\", assert it normalizes to \"issues\" and resolves correctly\n\n### GREEN — Implement:\n\n- Explain variant in Commands enum (with all flags — use exact Clap code above)\n- Match arm in main.rs calling handle_explain\n- handle_explain() that normalizes entity_type, parses --since, validates --sections, builds ExplainParams, calls run_explain()\n- Skeleton run_explain() as shown above\n- find_explain_issue() and find_explain_mr() copied from show/ patterns\n- should_include() helper\n- Test helpers: setup_explain_db(), insert_test_issue(), insert_test_mr()\n\n### Verify:\ncargo test explain::tests:: && cargo clippy --all-targets -- -D warnings && cargo fmt --check\n\n## Acceptance Criteria\n\n- [ ] test_explain_issue_basic passes\n- [ ] test_explain_mr passes\n- [ ] test_explain_singular_entity_type passes\n- [ ] Command appears in lore --help with after_help examples\n- [ ] All ExplainResult types defined with serde Serialize + skip_serializing_if\n- [ ] run_explain() skeleton returns valid ExplainResult with entity populated and stubs for other sections\n- [ ] cargo clippy and cargo fmt clean","status":"closed","priority":2,"issue_type":"task","created_at":"2026-03-10T17:36:51.671656Z","created_by":"tayloreernisse","updated_at":"2026-03-10T18:37:44.327202Z","closed_at":"2026-03-10T18:37:44.327144Z","close_reason":"Explain command registered, CLI wired, entity resolution + description working, 6 tests passing","compaction_level":0,"original_size":0,"labels":["cli-imp","explain"],"dependencies":[{"issue_id":"bd-2i3z","depends_on_id":"bd-9lbr","type":"parent-child","created_at":"2026-03-10T17:36:51.673556Z","created_by":"tayloreernisse"}]} {"id":"bd-2ilv","title":"Implement robot JSON output for me command","description":"## Background\n`lore me` robot output must follow the existing lore envelope pattern and expose a stable machine contract. This bead is the canonical serializer for that contract.\n\nCritical schema decision: use `project_path` consistently across work items and activity payloads (matching `Me*` structs), not mixed `project`/`project_path` naming.\n\n## Approach\nImplement `render_robot` in `src/cli/commands/me/render_robot.rs` with a typed envelope:\n\n```rust\n#[derive(Serialize)]\nstruct MeJsonEnvelope {\n ok: bool,\n data: MeDashboard,\n meta: RobotMeta,\n}\n\npub fn render_robot(dashboard: &MeDashboard, elapsed_ms: u64) -> Result\n```\n\nRules:\n- Serialize `MeDashboard` directly under `data`.\n- Keep all section arrays present, including empty arrays.\n- Keep nullable fields as JSON `null`.\n- Emit compact JSON (single-line) for parity with existing robot commands.\n\nDo not apply `--fields` filtering in this bead; filtering behavior is layered by `bd-3jiq`.\n\n## Acceptance Criteria\n- [ ] Output is `{ok,data,meta}` envelope with `ok=true`\n- [ ] `meta.elapsed_ms` is present and numeric\n- [ ] `data` contains: `username`, `since_iso`, `summary`, `open_issues`, `open_mrs_authored`, `reviewing_mrs`, `activity`\n- [ ] Work item payloads use `project_path` field name (no `project` alias)\n- [ ] Activity payload uses `project_path` field name\n- [ ] `attention_state` serializes in snake_case\n- [ ] Issues include nullable `status_name`\n- [ ] Authored MRs include nullable `detailed_merge_status` and `draft`\n- [ ] Reviewing MRs include `author_username` and `draft`\n- [ ] Empty arrays serialize as `[]` (not omitted)\n- [ ] Serializer never panics in normal flow (returns Result and caller handles)\n\n## Files\n- CREATE: `src/cli/commands/me/render_robot.rs`\n- MODIFY: `src/cli/commands/me/mod.rs` (wire renderer)\n\n## TDD Anchor\nRED:\n- `test_robot_envelope_structure`\n- `test_robot_uses_project_path_field`\n- `test_robot_attention_state_snake_case`\n- `test_robot_empty_arrays_present`\n- `test_robot_nullable_fields_serialize_null`\n\nGREEN:\n- Implement typed envelope serializer and wire into handler.\n\nVERIFY:\n- `cargo test robot_envelope`\n- `cargo test me_robot`\n\n## Edge Cases\n- If JSON serialization fails unexpectedly, propagate error into command error path rather than falling back to ad-hoc JSON strings.\n- Keep key order deterministic only where serde guarantees struct order; tests should assert presence/shape, not strict key ordering.\n\n## Dependency Context\nConsumes data structs from `bd-3bwh` and `RobotMeta` from `src/cli/robot.rs`.\nFeeds `bd-3jiq` which applies section-array field projection on top of this full payload.\n\nDependencies:\n -> bd-3bwh (blocks) - Define dashboard data structs for me command\n\nDependents:\n <- bd-3jiq (blocks) - Implement --fields minimal preset for me robot mode","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:40:07.681875Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.063421Z","closed_at":"2026-02-20T16:09:13.063381Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2ilv","depends_on_id":"bd-3bwh","type":"blocks","created_at":"2026-03-04T20:00:21Z","created_by":"import"}]} {"id":"bd-2iq","title":"[CP1] Database migration 002_issues.sql","description":"## Background\n\nThe 002_issues.sql migration creates tables for issues, labels, issue_labels, discussions, and notes. This is the data foundation for Checkpoint 1, enabling issue ingestion with cursor-based sync, label tracking, and discussion storage.\n\n## Approach\n\nCreate `migrations/002_issues.sql` with complete SQL statements.\n\n### Full Migration SQL\n\n```sql\n-- Migration 002: Issue Ingestion Tables\n-- Applies on top of 001_initial.sql\n\n-- Issues table\nCREATE TABLE issues (\n id INTEGER PRIMARY KEY,\n gitlab_id INTEGER UNIQUE NOT NULL,\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n iid INTEGER NOT NULL,\n title TEXT,\n description TEXT,\n state TEXT NOT NULL CHECK (state IN ('opened', 'closed')),\n author_username TEXT,\n created_at INTEGER NOT NULL, -- ms epoch UTC\n updated_at INTEGER NOT NULL, -- ms epoch UTC\n last_seen_at INTEGER NOT NULL, -- updated on every upsert\n discussions_synced_for_updated_at INTEGER, -- watermark for dependent sync\n web_url TEXT,\n raw_payload_id INTEGER REFERENCES raw_payloads(id)\n);\n\nCREATE INDEX idx_issues_project_updated ON issues(project_id, updated_at);\nCREATE INDEX idx_issues_author ON issues(author_username);\nCREATE UNIQUE INDEX uq_issues_project_iid ON issues(project_id, iid);\n\n-- Labels table (name-only for CP1)\nCREATE TABLE labels (\n id INTEGER PRIMARY KEY,\n gitlab_id INTEGER, -- optional, for future Labels API\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n name TEXT NOT NULL,\n color TEXT,\n description TEXT\n);\n\nCREATE UNIQUE INDEX uq_labels_project_name ON labels(project_id, name);\nCREATE INDEX idx_labels_name ON labels(name);\n\n-- Issue-label junction (DELETE before INSERT for stale removal)\nCREATE TABLE issue_labels (\n issue_id INTEGER NOT NULL REFERENCES issues(id) ON DELETE CASCADE,\n label_id INTEGER NOT NULL REFERENCES labels(id) ON DELETE CASCADE,\n PRIMARY KEY(issue_id, label_id)\n);\n\nCREATE INDEX idx_issue_labels_label ON issue_labels(label_id);\n\n-- Discussion threads for issues (MR discussions added in CP2)\nCREATE TABLE discussions (\n id INTEGER PRIMARY KEY,\n gitlab_discussion_id TEXT NOT NULL, -- GitLab string ID (e.g., \"6a9c1750b37d...\")\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n issue_id INTEGER REFERENCES issues(id) ON DELETE CASCADE,\n merge_request_id INTEGER, -- FK added in CP2 via ALTER TABLE\n noteable_type TEXT NOT NULL CHECK (noteable_type IN ('Issue', 'MergeRequest')),\n individual_note INTEGER NOT NULL DEFAULT 0, -- 0=threaded, 1=standalone\n first_note_at INTEGER, -- min(note.created_at) for ordering\n last_note_at INTEGER, -- max(note.created_at) for \"recently active\"\n last_seen_at INTEGER NOT NULL, -- updated on every upsert\n resolvable INTEGER NOT NULL DEFAULT 0, -- MR discussions can be resolved\n resolved INTEGER NOT NULL DEFAULT 0,\n CHECK (\n (noteable_type = 'Issue' AND issue_id IS NOT NULL AND merge_request_id IS NULL) OR\n (noteable_type = 'MergeRequest' AND merge_request_id IS NOT NULL AND issue_id IS NULL)\n )\n);\n\nCREATE UNIQUE INDEX uq_discussions_project_discussion_id ON discussions(project_id, gitlab_discussion_id);\nCREATE INDEX idx_discussions_issue ON discussions(issue_id);\nCREATE INDEX idx_discussions_mr ON discussions(merge_request_id);\nCREATE INDEX idx_discussions_last_note ON discussions(last_note_at);\n\n-- Notes belong to discussions\nCREATE TABLE notes (\n id INTEGER PRIMARY KEY,\n gitlab_id INTEGER UNIQUE NOT NULL,\n discussion_id INTEGER NOT NULL REFERENCES discussions(id) ON DELETE CASCADE,\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n note_type TEXT, -- 'DiscussionNote' | 'DiffNote' | null\n is_system INTEGER NOT NULL DEFAULT 0, -- 1 for system-generated notes\n author_username TEXT,\n body TEXT,\n created_at INTEGER NOT NULL, -- ms epoch\n updated_at INTEGER NOT NULL, -- ms epoch\n last_seen_at INTEGER NOT NULL, -- updated on every upsert\n position INTEGER, -- 0-indexed array order from API\n resolvable INTEGER NOT NULL DEFAULT 0,\n resolved INTEGER NOT NULL DEFAULT 0,\n resolved_by TEXT,\n resolved_at INTEGER,\n -- DiffNote position metadata (populated for MR DiffNotes in CP2)\n position_old_path TEXT,\n position_new_path TEXT,\n position_old_line INTEGER,\n position_new_line INTEGER,\n raw_payload_id INTEGER REFERENCES raw_payloads(id)\n);\n\nCREATE INDEX idx_notes_discussion ON notes(discussion_id);\nCREATE INDEX idx_notes_author ON notes(author_username);\nCREATE INDEX idx_notes_system ON notes(is_system);\n\n-- Update schema version\nINSERT INTO schema_version (version, applied_at, description)\nVALUES (2, strftime('%s', 'now') * 1000, 'Issue ingestion tables');\n```\n\n## Acceptance Criteria\n\n- [ ] Migration file exists at `migrations/002_issues.sql`\n- [ ] All tables created: issues, labels, issue_labels, discussions, notes\n- [ ] All indexes created as specified\n- [ ] CHECK constraints on state and noteable_type work correctly\n- [ ] CASCADE deletes work (project deletion cascades)\n- [ ] Migration applies cleanly on fresh DB after 001_initial.sql\n- [ ] schema_version updated to 2 after migration\n- [ ] `gi doctor` shows schema_version = 2\n\n## Files\n\n- migrations/002_issues.sql (create)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/migration_tests.rs\n#[test] fn migration_002_creates_issues_table()\n#[test] fn migration_002_creates_labels_table()\n#[test] fn migration_002_creates_discussions_table()\n#[test] fn migration_002_creates_notes_table()\n#[test] fn migration_002_enforces_state_check()\n#[test] fn migration_002_enforces_noteable_type_check()\n#[test] fn migration_002_cascades_on_project_delete()\n```\n\nGREEN: Create migration file with all SQL\n\nVERIFY:\n```bash\n# Apply migration to test DB\nsqlite3 :memory: < migrations/001_initial.sql\nsqlite3 :memory: < migrations/002_issues.sql\n\n# Verify schema_version\nsqlite3 test.db \"SELECT version FROM schema_version ORDER BY version DESC LIMIT 1\"\n# Expected: 2\n\ncargo test migration_002\n```\n\n## Edge Cases\n\n- Applying twice - should fail on UNIQUE constraint (idempotency via version check)\n- Missing 001 - foreign key to projects fails\n- Long label names - TEXT handles any length\n- NULL description - allowed by schema\n- Empty discussions_synced_for_updated_at - NULL means never synced","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.128594Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:25:10.309900Z","closed_at":"2026-01-25T22:25:10.309852Z","close_reason":"Created 002_issues.sql with issues/labels/issue_labels/discussions/notes tables, 8 passing tests verify schema, constraints, and cascades","compaction_level":0,"original_size":0} {"id":"bd-2iqk","title":"Implement Doctor + Stats screens","description":"## Background\nDoctor shows environment health checks (config, auth, DB, Ollama). Stats shows database statistics (entity counts, index sizes, FTS coverage). Both are informational screens using ftui JsonView or simple table layouts.\n\n## Approach\nState:\n- DoctorState: checks (Vec), overall_status (Healthy|Warning|Error)\n- StatsState: entity_stats (EntityStats), index_stats (IndexStats), fts_stats (FtsStats)\n\nAction:\n- run_doctor(config, conn) -> Vec: reuses existing lore doctor logic\n- fetch_stats(conn) -> StatsData: reuses existing lore stats logic\n\nView:\n- Doctor: vertical list of health checks with pass/fail/warn indicators\n- Stats: table of entity counts, index sizes, FTS document count, embedding coverage\n\n## Acceptance Criteria\n- [ ] Doctor shows config, auth, DB, and Ollama health status\n- [ ] Stats shows entity counts matching lore --robot stats output\n- [ ] Both screens accessible via navigation (gd for Doctor)\n- [ ] Health check results color-coded: green pass, yellow warn, red fail\n\n## Files\n- CREATE: crates/lore-tui/src/state/doctor.rs\n- CREATE: crates/lore-tui/src/state/stats.rs\n- CREATE: crates/lore-tui/src/view/doctor.rs\n- CREATE: crates/lore-tui/src/view/stats.rs\n- MODIFY: crates/lore-tui/src/action.rs (add run_doctor, fetch_stats)\n\n## TDD Anchor\nRED: Write test_fetch_stats_counts that creates DB with known data, asserts fetch_stats returns correct counts.\nGREEN: Implement fetch_stats with COUNT queries.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_stats\n\n## Edge Cases\n- Ollama not running: Doctor shows warning, not error (optional dependency)\n- Very large databases: stats queries should be fast (use shadow tables for FTS count)\n\n## Dependency Context\nUses existing doctor and stats logic from lore CLI commands.\nUses DbManager from \"Implement DbManager\" task.","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-12T17:02:21.744226Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.357165Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2iqk","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-03-04T20:00:21Z","created_by":"import"},{"issue_id":"bd-2iqk","depends_on_id":"bd-2x2h","type":"blocks","created_at":"2026-03-04T20:00:21Z","created_by":"import"}]} @@ -273,7 +273,7 @@ {"id":"bd-3pxe","title":"Epic: TUI Phase 2.5 — Vertical Slice Gate","description":"## Background\nPhase 2.5 validates that the core screens work together end-to-end: Dashboard -> Issue List -> Issue Detail -> Sync flows correctly, performance SLOs are met, and there are no stuck-input bugs or cancel latency issues. This is a quality gate before investing in power features.\n\n## Acceptance Criteria\n- [ ] Dashboard + IssueList + IssueDetail + Sync screens integrated and navigable\n- [ ] p95 nav latency < 75ms on M-tier fixtures\n- [ ] Zero stuck-input-mode bugs across full flow\n- [ ] Cancel latency p95 < 2s\n- [ ] Bootstrap screen handles empty/incompatible databases gracefully","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T16:59:47.016586Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.211922Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3pxe","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-3pz","title":"OBSERV Epic: Phase 4 - Sync History Enrichment","description":"Wire up sync_runs INSERT/UPDATE lifecycle (table exists but nothing writes to it), schema migration 014, enhanced sync-status with recent runs and metrics.\n\nDepends on: Phase 3 (needs Vec to store in metrics_json)\nUnblocks: nothing (terminal phase)\n\nFiles: migrations/014_sync_runs_enrichment.sql (new), src/core/sync_run.rs (new), src/cli/commands/sync.rs, src/cli/commands/ingest.rs, src/cli/commands/sync_status.rs\n\nAcceptance criteria (PRD Section 6.4):\n- lore sync creates sync_runs row with status=running, updated to succeeded/failed\n- sync_runs.run_id matches log files and robot JSON\n- metrics_json contains serialized Vec\n- lore sync-status shows last 10 runs with metrics\n- Failed syncs record error and partial metrics\n- Migration 014 applies cleanly","status":"closed","priority":2,"issue_type":"epic","created_at":"2026-02-04T15:53:27.469149Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:43:07.375047Z","closed_at":"2026-02-04T17:43:07.375Z","close_reason":"Phase 4 complete: migration 014, SyncRunRecorder, wiring, sync-status enhancement","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-3pz","depends_on_id":"bd-3er","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-3q2","title":"Implement search filters module","description":"## Background\nSearch filters are applied post-retrieval to narrow results by source type, author, project, date, labels, and file paths. The filter module must preserve ranking order from the search pipeline (FTS/RRF scores). It uses SQLite's JSON1 extension (json_each) to pass ranked document IDs efficiently and maintain their original order.\n\n## Approach\nCreate `src/search/filters.rs` per PRD Section 3.3. The full implementation is specified in the PRD including the SQL query.\n\n**Key types:**\n- `SearchFilters` struct with all filter fields + `has_any_filter()` + `clamp_limit()`\n- `PathFilter` enum: `Prefix(String)` (trailing `/`) or `Exact(String)`\n\n**Core function:**\n```rust\npub fn apply_filters(\n conn: &Connection,\n document_ids: &[i64],\n filters: &SearchFilters,\n) -> Result>\n```\n\n**SQL pattern (JSON1 for ordered ID passing):**\n```sql\nSELECT d.id\nFROM json_each(?) AS j\nJOIN documents d ON d.id = j.value\nWHERE 1=1\n AND d.source_type = ? -- if source_type filter set\n AND d.author_username = ? -- if author filter set\n -- ... dynamic WHERE clauses\nORDER BY j.key -- preserves ranking order\nLIMIT ?\n```\n\n**Filter logic:**\n- Labels: AND logic via `EXISTS (SELECT 1 FROM document_labels dl WHERE dl.document_id = d.id AND dl.label_name = ?)`\n- Path prefix: `LIKE ? ESCAPE '\\\\'` with escaped wildcards\n- Path exact: `= ?`\n- Limit: clamped to [1, 100], default 20\n\n## Acceptance Criteria\n- [ ] source_type filter works (issue, merge_request, discussion)\n- [ ] author filter: exact username match\n- [ ] project_id filter: restricts to single project\n- [ ] after filter: created_at >= value\n- [ ] updated_after filter: updated_at >= value\n- [ ] labels filter: AND logic (all specified labels must be present)\n- [ ] path exact filter: matches exact path string\n- [ ] path prefix filter: trailing `/` triggers LIKE with escaped wildcards\n- [ ] Ranking order preserved (ORDER BY j.key from json_each)\n- [ ] Limit clamped: 0 -> 20 (default), 200 -> 100 (max)\n- [ ] Empty document_ids returns empty Vec\n- [ ] Multiple filters compose correctly (all applied via AND)\n- [ ] `cargo test filters` passes\n\n## Files\n- `src/search/filters.rs` — new file\n- `src/search/mod.rs` — add `pub use filters::{SearchFilters, PathFilter, apply_filters};`\n\n## TDD Loop\nRED: Tests in `filters.rs` `#[cfg(test)] mod tests`:\n- `test_no_filters` — all docs returned up to limit\n- `test_source_type_filter` — only issues returned\n- `test_author_filter` — exact match\n- `test_labels_and_logic` — must have ALL specified labels\n- `test_path_exact` — matches exact path\n- `test_path_prefix` — trailing slash matches prefix\n- `test_limit_clamping` — 0 -> 20, 200 -> 100\n- `test_ranking_preserved` — output order matches input order\n- `test_has_any_filter` — true when any filter set, false when default\nGREEN: Implement apply_filters with dynamic SQL\nVERIFY: `cargo test filters`\n\n## Edge Cases\n- Path containing SQL LIKE wildcards (`%`, `_`): must be escaped before LIKE\n- Empty labels list: no label filter applied (not \"must have zero labels\")\n- `has_any_filter()` returns false for default SearchFilters (no filters set)\n- Large document_ids array (1000+): JSON1 handles efficiently","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:13.042512Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:24:38.402483Z","closed_at":"2026-01-30T17:24:38.402302Z","close_reason":"Completed: SearchFilters with has_any_filter/clamp_limit, PathFilter enum, apply_filters with dynamic SQL + json_each ordering, escape_like, 8 tests pass","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3q2","depends_on_id":"bd-36p","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} -{"id":"bd-3q5e","title":"Task 4: Wire timeline excerpt using existing pipeline","description":"## Background\n\nThe timeline excerpt section reuses the existing 5-stage timeline pipeline (SEED -> HYDRATE -> EXPAND -> COLLECT -> RENDER) to provide a chronological event summary. This avoids reimplementing timeline logic.\n\n**Parent bead:** bd-9lbr (lore explain feature)\n**Spec:** specs/SPEC_explain.md — Task 4\n**Phase:** 2 — Core Logic\n**Depends on:** Task 1 (bd-2i3z) — needs ExplainParams, ExplainResult types, run_explain skeleton, and test helpers\n\n## Architecture\n\nImport existing pipeline:\n\\`\\`\\`rust\nuse crate::timeline::seed::seed_timeline_direct;\nuse crate::timeline::collect::collect_events;\nuse crate::timeline::types::{EntityRef, TimelineEvent, TimelineEventType, ExpandedEntityRef};\n\\`\\`\\`\n\n### Exact Function Signatures (verified from codebase):\n\n\\`\\`\\`rust\npub fn seed_timeline_direct(\n conn: &Connection,\n entity_type: &str, // \"issue\" or \"merge_request\" (NOT \"issues\"/\"mrs\")\n iid: i64,\n project_id: Option,\n) -> Result\n\npub struct SeedResult {\n pub seed_entities: Vec,\n pub evidence_notes: Vec,\n pub matched_discussions: Vec,\n pub search_mode: String,\n}\n\npub fn collect_events(\n conn: &Connection,\n seed_entities: &[EntityRef],\n expanded_entities: &[ExpandedEntityRef],\n evidence_notes: &[TimelineEvent],\n matched_discussions: &[MatchedDiscussion],\n since_ms: Option,\n limit: usize,\n) -> Result<(Vec, usize)>\n// IMPORTANT: Returns TUPLE. Second element is total_before_limit.\n\\`\\`\\`\n\n### Key Types\n\n\\`\\`\\`rust\npub struct EntityRef {\n pub entity_type: String, // \"issue\" or \"merge_request\"\n pub entity_id: i64, // LOCAL SQLite row id\n pub entity_iid: i64,\n pub project_path: String,\n}\n\npub struct TimelineEvent {\n pub timestamp: i64, // ms epoch\n pub entity_type: String,\n pub entity_id: i64,\n pub entity_iid: i64,\n pub project_path: String,\n pub event_type: TimelineEventType,\n pub summary: String,\n pub actor: Option,\n pub url: Option,\n pub is_seed: bool,\n}\n\n// Tagged enum with snake_case variant names\n#[serde(tag = \"kind\", rename_all = \"snake_case\")]\npub enum TimelineEventType {\n Created,\n StateChanged { state: String },\n LabelAdded { label: String },\n LabelRemoved { label: String },\n MilestoneSet { milestone: String },\n MilestoneRemoved { milestone: String },\n Merged,\n NoteEvidence { note_id: i64, snippet: String, discussion_id: Option },\n DiscussionThread { discussion_id: i64, notes: Vec },\n CrossReferenced { target: String },\n}\n\\`\\`\\`\n\n### Simplified Pipeline for Explain\n\nWe skip HYDRATE and EXPAND (those are for cross-entity expansion). Simplified flow:\n1. seed_timeline_direct() -> SeedResult\n2. collect_events() with empty expanded_entities slice\n3. Pass limit=20 directly (collect_events caps internally)\n\n### entity_type Mapping\n\nseed_timeline_direct expects \"issue\" or \"merge_request\" (singular, no \"s\").\nExplainParams has \"issues\" or \"mrs\". Map:\n- \"issues\" -> \"issue\"\n- \"mrs\" -> \"merge_request\"\n\n### TimelineEventType to String Mapping\n\nFor TimelineEventSummary.event_type, match on the enum variant:\n\\`\\`\\`rust\nfn event_type_string(et: &TimelineEventType) -> &'static str {\n match et {\n TimelineEventType::Created => \"created\",\n TimelineEventType::StateChanged { .. } => \"state_changed\",\n TimelineEventType::LabelAdded { .. } => \"label_added\",\n TimelineEventType::LabelRemoved { .. } => \"label_removed\",\n TimelineEventType::MilestoneSet { .. } => \"milestone_set\",\n TimelineEventType::MilestoneRemoved { .. } => \"milestone_removed\",\n TimelineEventType::Merged => \"merged\",\n TimelineEventType::NoteEvidence { .. } => \"note\",\n TimelineEventType::DiscussionThread { .. } => \"discussion\",\n TimelineEventType::CrossReferenced { .. } => \"cross_referenced\",\n }\n}\n\\`\\`\\`\n\n## Logic\n\n\\`\\`\\`rust\nfn fetch_timeline_excerpt(\n conn: &Connection,\n params: &ExplainParams,\n entity_local_id: i64,\n project_path: &str,\n project_id: Option,\n) -> Result>> {\n if params.no_timeline || !should_include(¶ms.sections, \"timeline\") {\n return Ok(None);\n }\n\n // Map entity_type for timeline API\n let timeline_entity_type = if params.entity_type == \"issues\" { \"issue\" } else { \"merge_request\" };\n\n let seed = seed_timeline_direct(conn, timeline_entity_type, params.iid, project_id)?;\n let empty_expanded: Vec = vec![];\n let (events, _total) = collect_events(\n conn,\n &seed.seed_entities,\n &empty_expanded,\n &seed.evidence_notes,\n &seed.matched_discussions,\n params.since,\n 20, // cap at 20 events\n )?;\n\n let summaries: Vec = events.iter().map(|e| TimelineEventSummary {\n timestamp: ms_to_iso(e.timestamp),\n event_type: event_type_string(&e.event_type).to_owned(),\n actor: e.actor.clone(),\n summary: e.summary.clone(),\n }).collect();\n\n Ok(Some(summaries))\n}\n\\`\\`\\`\n\n--no-timeline takes precedence over --sections timeline (if both specified, skip timeline).\n\n## Files to Modify\n\n- src/cli/commands/explain.rs — add fetch_timeline_excerpt(), event_type_string(), wire into run_explain()\n\n## TDD Workflow (Red-Green)\n\n### RED — Write these tests first, confirm they FAIL:\n\n1. test_explain_timeline_excerpt: Insert issue + at least 1 state event + 1 note (to generate timeline events), call run_explain() with no_timeline: false and sections: None, assert timeline_excerpt is Some and non-empty and len <= 20\n2. test_explain_no_timeline_flag: Call run_explain() with no_timeline: true, assert timeline_excerpt is None\n\nNote: Timeline tests depend on the full pipeline working, so they need a realistic DB state with project, issue/MR, events, discussions, and notes.\n\n### GREEN — Implement fetch_timeline_excerpt() and event_type_string() as shown above. Wire into run_explain().\n\n### Verify:\ncargo test explain::tests::test_explain_timeline && cargo test explain::tests::test_explain_no_timeline_flag && cargo clippy --all-targets -- -D warnings\n\n## Implementation Notes\n\n- seed_timeline_direct builds seeds directly from entity (no search/embedding/Ollama needed)\n- EntityRef needs: entity_type (\"issue\"/\"merge_request\"), entity_id (LOCAL SQLite row id from find_explain_issue/mr), entity_iid, project_path\n- collect_events filters by since_ms internally — pass params.since directly\n- The timeline is the heaviest part of explain — --no-timeline exists for faster execution\n\n## Acceptance Criteria\n\n- [ ] test_explain_timeline_excerpt passes\n- [ ] test_explain_no_timeline_flag passes\n- [ ] Timeline excerpt present with max 20 events when enabled\n- [ ] Skipped entirely when --no-timeline\n- [ ] Uses existing timeline pipeline (no reimplementation)\n- [ ] event_type field uses snake_case variant names\n- [ ] cargo clippy and cargo fmt clean","status":"open","priority":2,"issue_type":"task","created_at":"2026-03-10T17:37:51.617962Z","created_by":"tayloreernisse","updated_at":"2026-03-10T17:52:01.517006Z","compaction_level":0,"original_size":0,"labels":["cli-imp","explain"],"dependencies":[{"issue_id":"bd-3q5e","depends_on_id":"bd-2i3z","type":"blocks","created_at":"2026-03-10T17:38:18.792136Z","created_by":"tayloreernisse"},{"issue_id":"bd-3q5e","depends_on_id":"bd-9lbr","type":"parent-child","created_at":"2026-03-10T17:37:51.619764Z","created_by":"tayloreernisse"}]} +{"id":"bd-3q5e","title":"Task 4: Wire timeline excerpt using existing pipeline","description":"## Background\n\nThe timeline excerpt section reuses the existing 5-stage timeline pipeline (SEED -> HYDRATE -> EXPAND -> COLLECT -> RENDER) to provide a chronological event summary. This avoids reimplementing timeline logic.\n\n**Parent bead:** bd-9lbr (lore explain feature)\n**Spec:** specs/SPEC_explain.md — Task 4\n**Phase:** 2 — Core Logic\n**Depends on:** Task 1 (bd-2i3z) — needs ExplainParams, ExplainResult types, run_explain skeleton, and test helpers\n\n## Architecture\n\nImport existing pipeline:\n\\`\\`\\`rust\nuse crate::timeline::seed::seed_timeline_direct;\nuse crate::timeline::collect::collect_events;\nuse crate::timeline::types::{EntityRef, TimelineEvent, TimelineEventType, ExpandedEntityRef};\n\\`\\`\\`\n\n### Exact Function Signatures (verified from codebase):\n\n\\`\\`\\`rust\npub fn seed_timeline_direct(\n conn: &Connection,\n entity_type: &str, // \"issue\" or \"merge_request\" (NOT \"issues\"/\"mrs\")\n iid: i64,\n project_id: Option,\n) -> Result\n\npub struct SeedResult {\n pub seed_entities: Vec,\n pub evidence_notes: Vec,\n pub matched_discussions: Vec,\n pub search_mode: String,\n}\n\npub fn collect_events(\n conn: &Connection,\n seed_entities: &[EntityRef],\n expanded_entities: &[ExpandedEntityRef],\n evidence_notes: &[TimelineEvent],\n matched_discussions: &[MatchedDiscussion],\n since_ms: Option,\n limit: usize,\n) -> Result<(Vec, usize)>\n// IMPORTANT: Returns TUPLE. Second element is total_before_limit.\n\\`\\`\\`\n\n### Key Types\n\n\\`\\`\\`rust\npub struct EntityRef {\n pub entity_type: String, // \"issue\" or \"merge_request\"\n pub entity_id: i64, // LOCAL SQLite row id\n pub entity_iid: i64,\n pub project_path: String,\n}\n\npub struct TimelineEvent {\n pub timestamp: i64, // ms epoch\n pub entity_type: String,\n pub entity_id: i64,\n pub entity_iid: i64,\n pub project_path: String,\n pub event_type: TimelineEventType,\n pub summary: String,\n pub actor: Option,\n pub url: Option,\n pub is_seed: bool,\n}\n\n// Tagged enum with snake_case variant names\n#[serde(tag = \"kind\", rename_all = \"snake_case\")]\npub enum TimelineEventType {\n Created,\n StateChanged { state: String },\n LabelAdded { label: String },\n LabelRemoved { label: String },\n MilestoneSet { milestone: String },\n MilestoneRemoved { milestone: String },\n Merged,\n NoteEvidence { note_id: i64, snippet: String, discussion_id: Option },\n DiscussionThread { discussion_id: i64, notes: Vec },\n CrossReferenced { target: String },\n}\n\\`\\`\\`\n\n### Simplified Pipeline for Explain\n\nWe skip HYDRATE and EXPAND (those are for cross-entity expansion). Simplified flow:\n1. seed_timeline_direct() -> SeedResult\n2. collect_events() with empty expanded_entities slice\n3. Pass limit=20 directly (collect_events caps internally)\n\n### entity_type Mapping\n\nseed_timeline_direct expects \"issue\" or \"merge_request\" (singular, no \"s\").\nExplainParams has \"issues\" or \"mrs\". Map:\n- \"issues\" -> \"issue\"\n- \"mrs\" -> \"merge_request\"\n\n### TimelineEventType to String Mapping\n\nFor TimelineEventSummary.event_type, match on the enum variant:\n\\`\\`\\`rust\nfn event_type_string(et: &TimelineEventType) -> &'static str {\n match et {\n TimelineEventType::Created => \"created\",\n TimelineEventType::StateChanged { .. } => \"state_changed\",\n TimelineEventType::LabelAdded { .. } => \"label_added\",\n TimelineEventType::LabelRemoved { .. } => \"label_removed\",\n TimelineEventType::MilestoneSet { .. } => \"milestone_set\",\n TimelineEventType::MilestoneRemoved { .. } => \"milestone_removed\",\n TimelineEventType::Merged => \"merged\",\n TimelineEventType::NoteEvidence { .. } => \"note\",\n TimelineEventType::DiscussionThread { .. } => \"discussion\",\n TimelineEventType::CrossReferenced { .. } => \"cross_referenced\",\n }\n}\n\\`\\`\\`\n\n## Logic\n\n\\`\\`\\`rust\nfn fetch_timeline_excerpt(\n conn: &Connection,\n params: &ExplainParams,\n entity_local_id: i64,\n project_path: &str,\n project_id: Option,\n) -> Result>> {\n if params.no_timeline || !should_include(¶ms.sections, \"timeline\") {\n return Ok(None);\n }\n\n // Map entity_type for timeline API\n let timeline_entity_type = if params.entity_type == \"issues\" { \"issue\" } else { \"merge_request\" };\n\n let seed = seed_timeline_direct(conn, timeline_entity_type, params.iid, project_id)?;\n let empty_expanded: Vec = vec![];\n let (events, _total) = collect_events(\n conn,\n &seed.seed_entities,\n &empty_expanded,\n &seed.evidence_notes,\n &seed.matched_discussions,\n params.since,\n 20, // cap at 20 events\n )?;\n\n let summaries: Vec = events.iter().map(|e| TimelineEventSummary {\n timestamp: ms_to_iso(e.timestamp),\n event_type: event_type_string(&e.event_type).to_owned(),\n actor: e.actor.clone(),\n summary: e.summary.clone(),\n }).collect();\n\n Ok(Some(summaries))\n}\n\\`\\`\\`\n\n--no-timeline takes precedence over --sections timeline (if both specified, skip timeline).\n\n## Files to Modify\n\n- src/cli/commands/explain.rs — add fetch_timeline_excerpt(), event_type_string(), wire into run_explain()\n\n## TDD Workflow (Red-Green)\n\n### RED — Write these tests first, confirm they FAIL:\n\n1. test_explain_timeline_excerpt: Insert issue + at least 1 state event + 1 note (to generate timeline events), call run_explain() with no_timeline: false and sections: None, assert timeline_excerpt is Some and non-empty and len <= 20\n2. test_explain_no_timeline_flag: Call run_explain() with no_timeline: true, assert timeline_excerpt is None\n\nNote: Timeline tests depend on the full pipeline working, so they need a realistic DB state with project, issue/MR, events, discussions, and notes.\n\n### GREEN — Implement fetch_timeline_excerpt() and event_type_string() as shown above. Wire into run_explain().\n\n### Verify:\ncargo test explain::tests::test_explain_timeline && cargo test explain::tests::test_explain_no_timeline_flag && cargo clippy --all-targets -- -D warnings\n\n## Implementation Notes\n\n- seed_timeline_direct builds seeds directly from entity (no search/embedding/Ollama needed)\n- EntityRef needs: entity_type (\"issue\"/\"merge_request\"), entity_id (LOCAL SQLite row id from find_explain_issue/mr), entity_iid, project_path\n- collect_events filters by since_ms internally — pass params.since directly\n- The timeline is the heaviest part of explain — --no-timeline exists for faster execution\n\n## Acceptance Criteria\n\n- [ ] test_explain_timeline_excerpt passes\n- [ ] test_explain_no_timeline_flag passes\n- [ ] Timeline excerpt present with max 20 events when enabled\n- [ ] Skipped entirely when --no-timeline\n- [ ] Uses existing timeline pipeline (no reimplementation)\n- [ ] event_type field uses snake_case variant names\n- [ ] cargo clippy and cargo fmt clean","status":"closed","priority":2,"issue_type":"task","created_at":"2026-03-10T17:37:51.617962Z","created_by":"tayloreernisse","updated_at":"2026-03-10T19:03:29.060847Z","closed_at":"2026-03-10T19:03:29.060702Z","close_reason":"Implemented: all explain command sections (activity, open_threads, related, timeline_excerpt, human/robot renderers) merged into main explain.rs, robot_docs updated, autocorrect registered, 1046 tests passing","compaction_level":0,"original_size":0,"labels":["cli-imp","explain"],"dependencies":[{"issue_id":"bd-3q5e","depends_on_id":"bd-2i3z","type":"blocks","created_at":"2026-03-10T17:38:18.792136Z","created_by":"tayloreernisse"},{"issue_id":"bd-3q5e","depends_on_id":"bd-9lbr","type":"parent-child","created_at":"2026-03-10T17:37:51.619764Z","created_by":"tayloreernisse"}]} {"id":"bd-3qm","title":"[CP1] Final validation - tests, smoke tests, integrity checks","description":"Run all tests and perform data integrity checks.\n\nValidation steps:\n1. Run all unit tests (vitest)\n2. Run all integration tests\n3. Run ESLint\n4. Run TypeScript strict check\n5. Manual smoke tests per PRD table\n6. Data integrity SQL checks:\n - Issue count matches GitLab\n - Every issue has raw_payload\n - Labels in junction exist in labels table\n - sync_cursors has entry per project\n - Re-run fetches 0 new items\n - Discussion count > 0\n - Every discussion has >= 1 note\n - individual_note=true has exactly 1 note\n\nFiles: All CP1 files\nDone when: All gate criteria from Definition of Done pass","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:20:51.994183Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.152852Z","closed_at":"2026-01-25T15:21:35.152852Z","deleted_at":"2026-01-25T15:21:35.152849Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-3qn6","title":"Rewrite who --path to use mr_file_changes for authorship signal","description":"## Problem\n\nwho --path currently only queries DiffNote records (notes.position_new_path), so it only finds people who left inline review comments on that exact file. This is highly misleading -- it reports 'no experts' for files that have been actively authored and reviewed, just without inline comments on that specific path.\n\n## Solution\n\nRewrite query_expert() to incorporate mr_file_changes as a primary signal source:\n\n1. MR authorship signal: JOIN mr_file_changes to find MR authors who touched the file (strongest signal)\n2. MR reviewer signal: JOIN mr_file_changes + merge_request_reviewers to find reviewers of MRs that touched the file (even without DiffNotes on that file)\n3. DiffNote signal: Keep existing DiffNote query as a supplementary signal (inline comments show deep familiarity)\n\n### Scoring weights (to tune):\n- MR author who touched the file: 15 points per MR\n- MR reviewer of MR touching the file: 10 points per MR\n- DiffNote reviewer on that file: 20 points per MR + 1 per note (existing)\n- DiffNote MR author: 12 points per MR (existing)\n\n### Path matching:\n- Reuse build_path_query() but extend DB probes to also check mr_file_changes.new_path\n- For prefix matching, LIKE on mr_file_changes.new_path\n\n### Also fix:\n- build_path_query() probes should check mr_file_changes in addition to notes, so path resolution works even when no DiffNotes exist\n\n## Acceptance Criteria\n- who --path returns results for files touched in MRs even without DiffNotes\n- Existing DiffNote-based scoring still contributes\n- build_path_query probes mr_file_changes for path existence\n- Tests cover: MR-only authorship, DiffNote-only, combined scoring\n- Robot mode JSON output unchanged (same schema)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T18:16:41.991344Z","created_by":"tayloreernisse","updated_at":"2026-02-08T18:34:25.704024Z","closed_at":"2026-02-08T18:34:25.703965Z","close_reason":"Rewrote query_expert() and query_overlap() in who.rs to incorporate mr_file_changes + mr_reviewers as signal sources alongside existing DiffNote data. Uses 4-branch UNION ALL with COUNT(DISTINCT CASE) for proper deduplication across signal types. 8 new tests, all 397 pass.","compaction_level":0,"original_size":0,"labels":["cli","phase-b","who"],"dependencies":[{"issue_id":"bd-3qn6","depends_on_id":"bd-2yo","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-3qs","title":"Implement lore generate-docs CLI command","description":"## Background\nThe generate-docs CLI command is the user-facing wrapper around the document regeneration pipeline. It has two modes: incremental (default, processes dirty_sources queue only) and full (seeds dirty_sources with ALL entities, then drains). Both modes use the same regenerator codepath to avoid logic divergence. Full mode uses keyset pagination (WHERE id > last_id) for seeding to avoid O(n^2) OFFSET degradation on large tables.\n\n## Approach\nCreate `src/cli/commands/generate_docs.rs` per PRD Section 2.4.\n\n**Core function:**\n```rust\npub fn run_generate_docs(\n config: &Config,\n full: bool,\n project_filter: Option<&str>,\n) -> Result\n```\n\n**Full mode seeding (keyset pagination):**\n```rust\nconst FULL_MODE_CHUNK_SIZE: usize = 2000;\n\n// For each source type (issues, MRs, discussions):\nlet mut last_id: i64 = 0;\nloop {\n let tx = conn.transaction()?;\n let inserted = tx.execute(\n \"INSERT INTO dirty_sources (source_type, source_id, queued_at, ...)\n SELECT 'issue', id, ?, 0, NULL, NULL, NULL\n FROM issues WHERE id > ? ORDER BY id LIMIT ?\n ON CONFLICT(source_type, source_id) DO NOTHING\",\n params![now_ms(), last_id, FULL_MODE_CHUNK_SIZE],\n )?;\n if inserted == 0 { tx.commit()?; break; }\n // Advance keyset cursor...\n tx.commit()?;\n}\n```\n\n**After draining (full mode only):**\n```sql\nINSERT INTO documents_fts(documents_fts) VALUES('optimize')\n```\n\n**CLI args:**\n```rust\n#[derive(Args)]\npub struct GenerateDocsArgs {\n #[arg(long)]\n full: bool,\n #[arg(long)]\n project: Option,\n}\n```\n\n**Output:** Human-readable table + JSON robot mode.\n\n## Acceptance Criteria\n- [ ] Default mode (no --full): processes only existing dirty_sources entries\n- [ ] --full mode: seeds dirty_sources with ALL issues, MRs, and discussions\n- [ ] Full mode uses keyset pagination (WHERE id > last_id, not OFFSET)\n- [ ] Full mode chunk size is 2000\n- [ ] Full mode does FTS optimize after completion\n- [ ] Both modes use regenerate_dirty_documents() (same codepath)\n- [ ] Progress bar shown in human mode (via indicatif)\n- [ ] JSON output in robot mode with GenerateDocsResult\n- [ ] GenerateDocsResult has issues/mrs/discussions/total/truncated/skipped counts\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/cli/commands/generate_docs.rs` — new file\n- `src/cli/commands/mod.rs` — add `pub mod generate_docs;`\n- `src/cli/mod.rs` — add GenerateDocsArgs, wire up generate-docs subcommand\n- `src/main.rs` — add generate-docs command handler\n\n## TDD Loop\nRED: Integration test with seeded DB\nGREEN: Implement run_generate_docs with seeding + drain\nVERIFY: `cargo build && cargo test generate_docs`\n\n## Edge Cases\n- Empty database (no issues/MRs/discussions): full mode seeds nothing, returns all-zero counts\n- --project filter in full mode: only seed dirty_sources for entities in that project\n- Interrupted full mode: dirty_sources entries persist (ON CONFLICT DO NOTHING), resume by re-running\n- FTS optimize on empty FTS table: no-op (safe)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:25:55.226666Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:49:23.397157Z","closed_at":"2026-01-30T17:49:23.397098Z","close_reason":"Implemented generate-docs command with incremental + full mode, keyset pagination seeding, FTS optimize, project filter, human + JSON output. Builds clean.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3qs","depends_on_id":"bd-1u1","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"},{"issue_id":"bd-3qs","depends_on_id":"bd-221","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} @@ -296,7 +296,7 @@ {"id":"bd-9av","title":"[CP1] gi sync-status enhancement","description":"Enhance sync-status from CP0 stub to show issue cursors.\n\n## Changes to src/cli/commands/sync_status.rs\n\nUpdate the existing stub to show:\n- Last run timestamp and duration\n- Cursor positions per project (issues resource_type)\n- Entity counts (issues, discussions, notes)\n\n## Output Format\nLast sync: 2026-01-25 10:30:00 (succeeded, 45s)\n\nCursors:\n group/project-one\n issues: 2026-01-25T10:25:00Z (gitlab_id: 12345678)\n\nCounts:\n Issues: 1,234\n Discussions: 5,678\n Notes: 23,456 (4,567 system)\n\nFiles: src/cli/commands/sync_status.rs\nDone when: Shows cursor positions and counts after ingestion","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:58:27.246825Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.968507Z","closed_at":"2026-01-25T17:02:01.968507Z","deleted_at":"2026-01-25T17:02:01.968503Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-9cob","title":"Render MR sections (authored + reviewing) in human mode","description":"## Background\nRender the two MR sections in `lore me` human mode: authored MRs and reviewing MRs. Both share layout primitives but differ in section-specific fields.\n\n## Approach\nImplement in `src/cli/commands/me/render_human.rs`:\n```rust\npub fn render_authored_mrs_section(mrs: &[MeMrAuthored], single_project: bool) -> String\npub fn render_reviewing_mrs_section(mrs: &[MeMrReviewing], single_project: bool) -> String\n```\n\nShared row layout:\n- Line 1: attention icon + `!iid` + title + section-specific fields + relative time.\n- Line 2: dimmed `project_path` unless `single_project == true`.\n\nSection-specific fields:\n- Authored: optional `detailed_merge_status` and optional `DRAFT` indicator.\n- Reviewing: `@author_username` and optional `DRAFT` indicator.\n\nStyling:\n- `Theme::mr_ref()` for `!iid`\n- `Theme::username()` for `@author`\n- `Theme::state_draft()` for `DRAFT`\n- `Theme::dim()` for recency and project path\n- `section_divider()` for headers\n\nTimestamp conversion rule:\n- Convert `updated_at_iso` via `crate::core::time::iso_to_ms` before `format_relative_time`.\n- On parse failure, render `updated_at_iso` text fallback; do not panic.\n\nAttention rule:\n- `NotReady` renders no attention icon; `DRAFT` remains visible.\n\n## Acceptance Criteria\n- [ ] Headers are `Authored MRs (N)` and `Reviewing MRs (N)` via `section_divider`\n- [ ] `!iid` uses MR ref styling\n- [ ] `DRAFT` appears only when `draft=true`\n- [ ] Authored rows show `detailed_merge_status` only when `Some`\n- [ ] Reviewing rows show `@author_username`\n- [ ] Attention icon rendering matches state/tier rules\n- [ ] `NotReady` shows no icon but keeps draft indicator\n- [ ] Project path hidden when `single_project=true`\n- [ ] Empty authored section -> `No authored MRs`\n- [ ] Empty reviewing section -> `No MRs to review`\n- [ ] Invalid `updated_at_iso` does not panic and renders fallback text\n\n## Files\n- MODIFY: `src/cli/commands/me/render_human.rs`\n\n## TDD Anchor\nRED:\n- `test_authored_mr_shows_merge_status`\n- `test_authored_mr_shows_draft_indicator`\n- `test_authored_mr_hides_draft_when_false`\n- `test_reviewing_mr_shows_author_username`\n- `test_reviewing_mr_empty_section`\n- `test_authored_mr_omits_status_when_none`\n- `test_mr_section_invalid_iso_fallback`\n\nGREEN:\n- Implement both section renderers with shared helper + safe timestamp conversion.\n\nVERIFY:\n- `cargo test mr_section`\n\n## Edge Cases\n- Same MR can appear in both sections (author and reviewer); render independently.\n- `detailed_merge_status` may be absent; omit cleanly.\n- Sorting remains handler-owned; renderer does no resorting.\n\n## Dependency Context\nConsumes `MeMrAuthored`/`MeMrReviewing` from `bd-3bwh`, icon/summary context from `bd-1vxq`, and is called by `bd-1vv8`.\n\nDependencies:\n -> bd-1vxq (blocks) - Render summary header and attention legend\n\nDependents:\n <- bd-1vv8 (blocks) - Implement me command handler: wire queries to renderers","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:39:25.770680Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.061026Z","closed_at":"2026-02-20T16:09:13.060991Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-9cob","depends_on_id":"bd-1vxq","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-9dd","title":"Implement 'lore trace' command with human and robot output","description":"## Background\n\nThe trace command is Gate 5's capstone CLI. It answers 'Why was this code introduced?' by building file -> MR -> issue -> discussion chains.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 5.3.\n\n## Codebase Context\n\n- CLI pattern: same as file-history (Commands enum, handler in main.rs)\n- trace.rs (bd-2n4): run_trace() returns TraceResult with chains\n- Path parsing: support 'src/foo.rs:45' syntax (line number for future Tier 2)\n- merge_requests.merged_at exists (migration 006) — use COALESCE(merged_at, updated_at) for ordering\n\n## Approach\n\n### 1. TraceArgs (`src/cli/mod.rs`):\n```rust\n#[derive(Parser)]\npub struct TraceArgs {\n pub path: String, // supports :line suffix\n #[arg(short = 'p', long)] pub project: Option,\n #[arg(long)] pub discussions: bool,\n #[arg(long = \"no-follow-renames\")] pub no_follow_renames: bool,\n #[arg(short = 'n', long = \"limit\", default_value = \"20\")] pub limit: usize,\n}\n```\n\n### 2. Path parsing:\n```rust\nfn parse_trace_path(input: &str) -> (String, Option) {\n if let Some((path, line)) = input.rsplit_once(':') {\n if let Ok(n) = line.parse::() { return (path.to_string(), Some(n)); }\n }\n (input.to_string(), None)\n}\n```\nIf line present: warn 'Line-level tracing requires Tier 2. Showing file-level results.'\n\n### 3. Human output shows chains with MR -> issue -> discussion context\n\n### 4. Robot JSON:\n```json\n{\"ok\": true, \"data\": {\"path\": \"...\", \"resolved_paths\": [...], \"trace_chains\": [...]}, \"meta\": {\"tier\": \"api_only\", \"line_requested\": null}}\n```\n\n## Acceptance Criteria\n\n- [ ] `lore trace src/foo.rs` with human output\n- [ ] `lore --robot trace src/foo.rs` with JSON\n- [ ] :line suffix parses and emits Tier 2 warning\n- [ ] -p, --discussions, --no-follow-renames, -n all work\n- [ ] Rename-aware via resolve_rename_chain\n- [ ] meta.tier = 'api_only'\n- [ ] Added to VALID_COMMANDS and robot-docs\n- [ ] `cargo check --all-targets` passes\n\n## Files\n\n- `src/cli/mod.rs` (TraceArgs + Commands::Trace)\n- `src/cli/commands/trace.rs` (NEW)\n- `src/cli/commands/mod.rs` (re-export)\n- `src/main.rs` (handler + VALID_COMMANDS + robot-docs)\n\n## TDD Loop\n\nRED:\n- `test_parse_trace_path_simple` - \"src/foo.rs\" -> (path, None)\n- `test_parse_trace_path_with_line` - \"src/foo.rs:42\" -> (path, Some(42))\n- `test_parse_trace_path_windows` - \"C:/foo.rs\" -> (path, None) — don't misparse drive letter\n\nGREEN: Implement CLI wiring and handlers.\n\nVERIFY: `cargo check --all-targets`\n\n## Edge Cases\n\n- Windows paths: don't misparse C: as line number\n- No MR data: friendly message with suggestion to sync\n- Very deep rename chain: bounded by resolve_rename_chain","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:32.788530Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:10:55.708488Z","closed_at":"2026-02-18T21:10:55.708445Z","close_reason":"Trace CLI implemented","compaction_level":0,"original_size":0,"labels":["cli","gate-5","phase-b"],"dependencies":[{"issue_id":"bd-9dd","depends_on_id":"bd-1ht","type":"parent-child","created_at":"2026-03-04T20:02:52Z","created_by":"import"},{"issue_id":"bd-9dd","depends_on_id":"bd-2n4","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} -{"id":"bd-9lbr","title":"lore explain: auto-generate issue/MR narrative","description":"## Background\nGiven an issue or MR, auto-generate a structured narrative of what happened: who was involved, what decisions were made, what changed, and what is unresolved. Template-based v1 (no LLM dependency), deterministic and reproducible.\n\n## Current Infrastructure (Verified 2026-02-12)\n- show.rs: IssueDetail (line 69) and MrDetail (line 14) — entity detail with discussions\n- timeline.rs: 5-stage pipeline SHIPPED — chronological event reconstruction\n- notes table: 282K rows with body, author, created_at, is_system, discussion_id\n- discussions table: links notes to parent entity (noteable_type, noteable_id), has resolved flag\n- resource_state_events table: state changes with created_at, user_username (src/core/events_db.rs)\n- resource_label_events table: label add/remove with created_at, user_username\n- entity_references table (src/core/references.rs): cross-references between entities (closing MRs, related issues). Column names: `source_entity_type`, `source_entity_id`, `target_entity_type`, `target_entity_id`, `target_project_path`, `target_entity_iid`, `reference_type`, `source_method`\n\n## Approach\nNew command: `lore explain issues N` / `lore explain mrs N`\n\n### Data Assembly (reuse existing internals as library calls)\n1. Entity detail: reuse show.rs query logic for IssueDetail/MrDetail\n2. Timeline events: reuse timeline pipeline with entity-scoped seed\n3. Discussion notes:\n```sql\nSELECT n.id, n.body, n.author_username, n.created_at\nFROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nWHERE d.noteable_type = ? AND d.noteable_id = ?\n AND n.is_system = 0\nORDER BY n.created_at\n```\n4. Cross-references:\n```sql\nSELECT target_entity_type, target_entity_id, target_project_path,\n target_entity_iid, reference_type, source_method\nFROM entity_references\nWHERE (source_entity_type = ?1 AND source_entity_id = ?2)\nUNION ALL\nSELECT source_entity_type, source_entity_id, NULL,\n NULL, reference_type, source_method\nFROM entity_references\nWHERE (target_entity_type = ?1 AND target_entity_id = ?2)\n```\n\n### Key Decisions Heuristic\nNotes from assignees/author that follow state or label changes within 1 hour:\n```rust\nstruct StateOrLabelEvent {\n created_at: i64, // ms epoch\n user: String,\n description: String, // e.g. \"state: opened -> closed\" or \"label: +bug\"\n}\n\nfn extract_key_decisions(\n state_events: &[ResourceStateEvent],\n label_events: &[ResourceLabelEvent],\n notes: &[Note],\n) -> Vec {\n // Merge both event types into a unified chronological list\n let mut events: Vec = Vec::new();\n for e in state_events {\n events.push(StateOrLabelEvent {\n created_at: e.created_at,\n user: e.user_username.clone(),\n description: format!(\"state: {} -> {}\", e.from_state.as_deref().unwrap_or(\"?\"), e.to_state),\n });\n }\n for e in label_events {\n let action = if e.action == \"add\" { \"+\" } else { \"-\" };\n events.push(StateOrLabelEvent {\n created_at: e.created_at,\n user: e.user_username.clone(),\n description: format!(\"label: {}{}\", action, e.label_name.as_deref().unwrap_or(\"?\")),\n });\n }\n events.sort_by_key(|e| e.created_at);\n\n let mut decisions = Vec::new();\n let one_hour_ms: i64 = 60 * 60 * 1000;\n\n for event in &events {\n // Find notes by same actor within 60 min after the event\n for note in notes {\n if note.author_username == event.user\n && note.created_at >= event.created_at\n && note.created_at <= event.created_at + one_hour_ms\n {\n decisions.push(KeyDecision {\n timestamp: event.created_at,\n actor: event.user.clone(),\n action: event.description.clone(),\n context_note: truncate(¬e.body, 500),\n });\n break; // one note per event\n }\n }\n }\n decisions.truncate(10); // Cap at 10 key decisions\n decisions\n}\n```\n\n### Narrative Sections\n1. **Header**: title, author, opened date, state, assignees, labels, status_name\n2. **Description excerpt**: first 500 chars of description (or full if shorter)\n3. **Key decisions**: notes correlated with state/label changes (heuristic above)\n4. **Activity summary**: counts of state changes, label changes, notes, time range\n5. **Open threads**: discussions WHERE resolved = false\n6. **Related entities**: closing MRs (with state), related issues from entity_references\n7. **Timeline excerpt**: first 20 events from timeline pipeline\n\n## Robot Mode Output Schema\n```json\n{\n \"ok\": true,\n \"data\": {\n \"entity\": {\n \"type\": \"issue\", \"iid\": 3864, \"title\": \"...\", \"state\": \"opened\",\n \"author\": \"teernisse\", \"assignees\": [\"teernisse\"],\n \"labels\": [\"customer:BNSF\"], \"created_at\": \"...\", \"updated_at\": \"...\",\n \"url\": \"...\", \"status_name\": \"In progress\"\n },\n \"description_excerpt\": \"First 500 chars of description...\",\n \"key_decisions\": [{\n \"timestamp\": \"2026-01-15T...\",\n \"actor\": \"teernisse\",\n \"action\": \"state: opened -> in_progress\",\n \"context_note\": \"Starting work on the BNSF throw time integration...\"\n }],\n \"activity\": {\n \"state_changes\": 3, \"label_changes\": 5, \"notes\": 42,\n \"first_event\": \"2026-01-10T...\", \"last_event\": \"2026-02-12T...\"\n },\n \"open_threads\": [{\n \"discussion_id\": \"abc123\",\n \"started_by\": \"cseiber\",\n \"started_at\": \"2026-02-01T...\",\n \"note_count\": 5,\n \"last_note_at\": \"2026-02-10T...\"\n }],\n \"related\": {\n \"closing_mrs\": [{ \"iid\": 200, \"title\": \"...\", \"state\": \"merged\" }],\n \"related_issues\": [{ \"iid\": 3800, \"title\": \"Rail Break Card\", \"relation\": \"related\" }]\n },\n \"timeline_excerpt\": [{ \"timestamp\": \"...\", \"event_type\": \"...\", \"actor\": \"...\", \"summary\": \"...\" }]\n },\n \"meta\": { \"elapsed_ms\": 350 }\n}\n```\n\n## Clap Registration\n```rust\n// In src/main.rs Commands enum, add:\nExplain {\n /// Entity type: \"issues\" or \"mrs\"\n entity_type: String,\n /// Entity IID\n iid: i64,\n /// Scope to project (fuzzy match)\n #[arg(short, long)]\n project: Option,\n},\n```\n\n## TDD Loop\nRED: Tests in src/cli/commands/explain.rs:\n- test_explain_issue_basic: insert issue + notes + state events, run explain, assert all sections present (entity, description_excerpt, key_decisions, activity, open_threads, related, timeline_excerpt)\n- test_explain_key_decision_heuristic: insert state change event + note by same author within 30 min, assert note appears in key_decisions\n- test_explain_key_decision_ignores_unrelated_notes: insert note by different author, assert it does NOT appear in key_decisions\n- test_explain_open_threads: insert 2 discussions (1 resolved, 1 unresolved), assert only unresolved in open_threads\n- test_explain_no_notes: issue with zero notes produces header + description + empty sections\n- test_explain_mr: insert MR with merged_at, assert entity includes type=\"merge_request\"\n- test_explain_activity_counts: insert 3 state events + 2 label events + 10 notes, assert counts match\n\nGREEN: Implement explain command with section assembly\n\nVERIFY:\n```bash\ncargo test explain:: && cargo clippy --all-targets -- -D warnings\ncargo run --release -- -J explain issues 3864 | jq '.data | keys'\n# Should include: entity, description_excerpt, key_decisions, activity, open_threads, related, timeline_excerpt\n```\n\n## Acceptance Criteria\n- [ ] lore explain issues N produces structured output for any synced issue\n- [ ] lore explain mrs N produces structured output for any synced MR\n- [ ] Robot mode returns all 7 sections\n- [ ] Human mode renders readable narrative with headers and indentation\n- [ ] Key decisions heuristic: captures notes within 60 min of state/label changes by same actor\n- [ ] Works fully offline (no API calls, no LLM)\n- [ ] Performance: <500ms for issue with 50 notes\n- [ ] Command registered in main.rs and robot-docs\n- [ ] key_decisions capped at 10, timeline_excerpt capped at 20 events\n\n## Edge Cases\n- Issue with empty description: description_excerpt = \"(no description)\"\n- Issue with 500+ notes: timeline_excerpt capped at 20, key_decisions capped at 10\n- Issue not found in local DB: exit code 17 with suggestion to sync\n- Ambiguous project: exit code 18 with suggestion to use -p flag\n- MR with no review activity: activity section shows zeros\n- Cross-project references: show as unresolved with project path hint\n- Notes that are pure code blocks: include in key_decisions if correlated with events (they may contain implementation decisions)\n- ResourceStateEvent/ResourceLabelEvent field names: check src/core/events_db.rs for exact struct definitions before implementing\n\n## Dependency Context\n- **bd-2g50 (data gaps)**: BLOCKER. Provides `closed_at` field on IssueDetail for the header section. Without it, explain can still show state=\"closed\" but won't have the exact close timestamp.\n\n## Files to Create/Modify\n- NEW: src/cli/commands/explain.rs\n- src/cli/commands/mod.rs (add pub mod explain; re-export)\n- src/main.rs (register Explain subcommand in Commands enum, add handle_explain fn)\n- Reuse: show.rs queries, timeline pipeline, notes/discussions/resource_events queries from src/core/events_db.rs","status":"in_progress","priority":2,"issue_type":"feature","created_at":"2026-02-12T15:46:41.386454Z","created_by":"tayloreernisse","updated_at":"2026-03-10T18:13:22.856696Z","compaction_level":0,"original_size":0,"labels":["cli-imp","intelligence"],"dependencies":[{"issue_id":"bd-9lbr","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} +{"id":"bd-9lbr","title":"lore explain: auto-generate issue/MR narrative","description":"## Background\nGiven an issue or MR, auto-generate a structured narrative of what happened: who was involved, what decisions were made, what changed, and what is unresolved. Template-based v1 (no LLM dependency), deterministic and reproducible.\n\n## Current Infrastructure (Verified 2026-02-12)\n- show.rs: IssueDetail (line 69) and MrDetail (line 14) — entity detail with discussions\n- timeline.rs: 5-stage pipeline SHIPPED — chronological event reconstruction\n- notes table: 282K rows with body, author, created_at, is_system, discussion_id\n- discussions table: links notes to parent entity (noteable_type, noteable_id), has resolved flag\n- resource_state_events table: state changes with created_at, user_username (src/core/events_db.rs)\n- resource_label_events table: label add/remove with created_at, user_username\n- entity_references table (src/core/references.rs): cross-references between entities (closing MRs, related issues). Column names: `source_entity_type`, `source_entity_id`, `target_entity_type`, `target_entity_id`, `target_project_path`, `target_entity_iid`, `reference_type`, `source_method`\n\n## Approach\nNew command: `lore explain issues N` / `lore explain mrs N`\n\n### Data Assembly (reuse existing internals as library calls)\n1. Entity detail: reuse show.rs query logic for IssueDetail/MrDetail\n2. Timeline events: reuse timeline pipeline with entity-scoped seed\n3. Discussion notes:\n```sql\nSELECT n.id, n.body, n.author_username, n.created_at\nFROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nWHERE d.noteable_type = ? AND d.noteable_id = ?\n AND n.is_system = 0\nORDER BY n.created_at\n```\n4. Cross-references:\n```sql\nSELECT target_entity_type, target_entity_id, target_project_path,\n target_entity_iid, reference_type, source_method\nFROM entity_references\nWHERE (source_entity_type = ?1 AND source_entity_id = ?2)\nUNION ALL\nSELECT source_entity_type, source_entity_id, NULL,\n NULL, reference_type, source_method\nFROM entity_references\nWHERE (target_entity_type = ?1 AND target_entity_id = ?2)\n```\n\n### Key Decisions Heuristic\nNotes from assignees/author that follow state or label changes within 1 hour:\n```rust\nstruct StateOrLabelEvent {\n created_at: i64, // ms epoch\n user: String,\n description: String, // e.g. \"state: opened -> closed\" or \"label: +bug\"\n}\n\nfn extract_key_decisions(\n state_events: &[ResourceStateEvent],\n label_events: &[ResourceLabelEvent],\n notes: &[Note],\n) -> Vec {\n // Merge both event types into a unified chronological list\n let mut events: Vec = Vec::new();\n for e in state_events {\n events.push(StateOrLabelEvent {\n created_at: e.created_at,\n user: e.user_username.clone(),\n description: format!(\"state: {} -> {}\", e.from_state.as_deref().unwrap_or(\"?\"), e.to_state),\n });\n }\n for e in label_events {\n let action = if e.action == \"add\" { \"+\" } else { \"-\" };\n events.push(StateOrLabelEvent {\n created_at: e.created_at,\n user: e.user_username.clone(),\n description: format!(\"label: {}{}\", action, e.label_name.as_deref().unwrap_or(\"?\")),\n });\n }\n events.sort_by_key(|e| e.created_at);\n\n let mut decisions = Vec::new();\n let one_hour_ms: i64 = 60 * 60 * 1000;\n\n for event in &events {\n // Find notes by same actor within 60 min after the event\n for note in notes {\n if note.author_username == event.user\n && note.created_at >= event.created_at\n && note.created_at <= event.created_at + one_hour_ms\n {\n decisions.push(KeyDecision {\n timestamp: event.created_at,\n actor: event.user.clone(),\n action: event.description.clone(),\n context_note: truncate(¬e.body, 500),\n });\n break; // one note per event\n }\n }\n }\n decisions.truncate(10); // Cap at 10 key decisions\n decisions\n}\n```\n\n### Narrative Sections\n1. **Header**: title, author, opened date, state, assignees, labels, status_name\n2. **Description excerpt**: first 500 chars of description (or full if shorter)\n3. **Key decisions**: notes correlated with state/label changes (heuristic above)\n4. **Activity summary**: counts of state changes, label changes, notes, time range\n5. **Open threads**: discussions WHERE resolved = false\n6. **Related entities**: closing MRs (with state), related issues from entity_references\n7. **Timeline excerpt**: first 20 events from timeline pipeline\n\n## Robot Mode Output Schema\n```json\n{\n \"ok\": true,\n \"data\": {\n \"entity\": {\n \"type\": \"issue\", \"iid\": 3864, \"title\": \"...\", \"state\": \"opened\",\n \"author\": \"teernisse\", \"assignees\": [\"teernisse\"],\n \"labels\": [\"customer:BNSF\"], \"created_at\": \"...\", \"updated_at\": \"...\",\n \"url\": \"...\", \"status_name\": \"In progress\"\n },\n \"description_excerpt\": \"First 500 chars of description...\",\n \"key_decisions\": [{\n \"timestamp\": \"2026-01-15T...\",\n \"actor\": \"teernisse\",\n \"action\": \"state: opened -> in_progress\",\n \"context_note\": \"Starting work on the BNSF throw time integration...\"\n }],\n \"activity\": {\n \"state_changes\": 3, \"label_changes\": 5, \"notes\": 42,\n \"first_event\": \"2026-01-10T...\", \"last_event\": \"2026-02-12T...\"\n },\n \"open_threads\": [{\n \"discussion_id\": \"abc123\",\n \"started_by\": \"cseiber\",\n \"started_at\": \"2026-02-01T...\",\n \"note_count\": 5,\n \"last_note_at\": \"2026-02-10T...\"\n }],\n \"related\": {\n \"closing_mrs\": [{ \"iid\": 200, \"title\": \"...\", \"state\": \"merged\" }],\n \"related_issues\": [{ \"iid\": 3800, \"title\": \"Rail Break Card\", \"relation\": \"related\" }]\n },\n \"timeline_excerpt\": [{ \"timestamp\": \"...\", \"event_type\": \"...\", \"actor\": \"...\", \"summary\": \"...\" }]\n },\n \"meta\": { \"elapsed_ms\": 350 }\n}\n```\n\n## Clap Registration\n```rust\n// In src/main.rs Commands enum, add:\nExplain {\n /// Entity type: \"issues\" or \"mrs\"\n entity_type: String,\n /// Entity IID\n iid: i64,\n /// Scope to project (fuzzy match)\n #[arg(short, long)]\n project: Option,\n},\n```\n\n## TDD Loop\nRED: Tests in src/cli/commands/explain.rs:\n- test_explain_issue_basic: insert issue + notes + state events, run explain, assert all sections present (entity, description_excerpt, key_decisions, activity, open_threads, related, timeline_excerpt)\n- test_explain_key_decision_heuristic: insert state change event + note by same author within 30 min, assert note appears in key_decisions\n- test_explain_key_decision_ignores_unrelated_notes: insert note by different author, assert it does NOT appear in key_decisions\n- test_explain_open_threads: insert 2 discussions (1 resolved, 1 unresolved), assert only unresolved in open_threads\n- test_explain_no_notes: issue with zero notes produces header + description + empty sections\n- test_explain_mr: insert MR with merged_at, assert entity includes type=\"merge_request\"\n- test_explain_activity_counts: insert 3 state events + 2 label events + 10 notes, assert counts match\n\nGREEN: Implement explain command with section assembly\n\nVERIFY:\n```bash\ncargo test explain:: && cargo clippy --all-targets -- -D warnings\ncargo run --release -- -J explain issues 3864 | jq '.data | keys'\n# Should include: entity, description_excerpt, key_decisions, activity, open_threads, related, timeline_excerpt\n```\n\n## Acceptance Criteria\n- [ ] lore explain issues N produces structured output for any synced issue\n- [ ] lore explain mrs N produces structured output for any synced MR\n- [ ] Robot mode returns all 7 sections\n- [ ] Human mode renders readable narrative with headers and indentation\n- [ ] Key decisions heuristic: captures notes within 60 min of state/label changes by same actor\n- [ ] Works fully offline (no API calls, no LLM)\n- [ ] Performance: <500ms for issue with 50 notes\n- [ ] Command registered in main.rs and robot-docs\n- [ ] key_decisions capped at 10, timeline_excerpt capped at 20 events\n\n## Edge Cases\n- Issue with empty description: description_excerpt = \"(no description)\"\n- Issue with 500+ notes: timeline_excerpt capped at 20, key_decisions capped at 10\n- Issue not found in local DB: exit code 17 with suggestion to sync\n- Ambiguous project: exit code 18 with suggestion to use -p flag\n- MR with no review activity: activity section shows zeros\n- Cross-project references: show as unresolved with project path hint\n- Notes that are pure code blocks: include in key_decisions if correlated with events (they may contain implementation decisions)\n- ResourceStateEvent/ResourceLabelEvent field names: check src/core/events_db.rs for exact struct definitions before implementing\n\n## Dependency Context\n- **bd-2g50 (data gaps)**: BLOCKER. Provides `closed_at` field on IssueDetail for the header section. Without it, explain can still show state=\"closed\" but won't have the exact close timestamp.\n\n## Files to Create/Modify\n- NEW: src/cli/commands/explain.rs\n- src/cli/commands/mod.rs (add pub mod explain; re-export)\n- src/main.rs (register Explain subcommand in Commands enum, add handle_explain fn)\n- Reuse: show.rs queries, timeline pipeline, notes/discussions/resource_events queries from src/core/events_db.rs","status":"closed","priority":2,"issue_type":"feature","created_at":"2026-02-12T15:46:41.386454Z","created_by":"tayloreernisse","updated_at":"2026-03-10T19:03:58.948327Z","closed_at":"2026-03-10T19:03:58.948272Z","close_reason":"All 5 subtasks complete: entity resolution, key_decisions, activity/threads/related, timeline_excerpt, human+robot renderers. 1046 tests passing, clippy clean, autocorrect registered.","compaction_level":0,"original_size":0,"labels":["cli-imp","intelligence"],"dependencies":[{"issue_id":"bd-9lbr","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-9wl5","title":"NOTE-2G: Parent metadata change propagation to note documents","description":"## Background\nNote documents inherit labels and title from parent issue/MR. When parent metadata changes, note documents become stale. The existing pipeline already marks discussion documents dirty on parent changes — note documents need the same treatment.\n\n## Approach\nFind where ingestion detects parent entity changes and marks discussion documents dirty. The dirty marking for discussions happens in:\n- src/ingestion/discussions.rs line 127: mark_dirty_tx(&tx, SourceType::Discussion, local_discussion_id)\n- src/ingestion/mr_discussions.rs line 162 and 362: mark_dirty_tx(&tx, SourceType::Discussion, local_discussion_id)\n\nThese fire when a discussion is upserted (which happens when parent entity is re-ingested). For note documents, we need to additionally mark all non-system notes of that discussion as dirty:\n\nAfter each mark_dirty_tx for Discussion, add:\n // Mark child note documents dirty (they inherit parent metadata)\n let note_ids: Vec = tx.prepare(\"SELECT id FROM notes WHERE discussion_id = ? AND is_system = 0\")?\n .query_map([local_discussion_id], |r| r.get(0))?\n .collect::, _>>()?;\n for note_id in note_ids {\n dirty_tracker::mark_dirty_tx(&tx, SourceType::Note, note_id)?;\n }\n\nAlternative (more efficient, set-based):\n INSERT INTO dirty_sources (source_type, source_id, queued_at)\n SELECT 'note', n.id, ?1\n FROM notes n\n WHERE n.discussion_id = ?2 AND n.is_system = 0\n ON CONFLICT(source_type, source_id) DO UPDATE SET queued_at = excluded.queued_at, attempt_count = 0\n\nUse the set-based approach for better performance with large discussions.\n\n## Files\n- MODIFY: src/ingestion/discussions.rs (add note dirty marking after line 127)\n- MODIFY: src/ingestion/mr_discussions.rs (add note dirty marking after lines 162 and 362)\n\n## TDD Anchor\nRED: test_parent_title_change_marks_notes_dirty — change issue title, re-ingest discussions, assert note documents appear in dirty_sources.\nGREEN: Add set-based INSERT INTO dirty_sources after discussion dirty marking.\nVERIFY: cargo test parent_title_change_marks_notes -- --nocapture\nTests: test_parent_label_change_marks_notes_dirty (modify issue labels, re-ingest, check dirty queue)\n\n## Acceptance Criteria\n- [ ] Discussion upsert for issue marks child non-system note documents dirty\n- [ ] Discussion upsert for MR marks child non-system note documents dirty (both call sites)\n- [ ] Only non-system notes marked dirty (is_system = 0 filter)\n- [ ] Set-based SQL (not per-note loop) for performance\n- [ ] Both tests pass\n\n## Dependency Context\n- Depends on NOTE-2D (bd-2ezb): dirty tracking infrastructure for notes must exist (dirty_sources accepts source_type='note', regenerator handles it)\n\n## Edge Cases\n- Discussion with 0 non-system notes: set-based INSERT is a no-op\n- Discussion with 100+ notes: set-based approach handles efficiently in one SQL statement\n- Concurrent discussion ingestion: ON CONFLICT DO UPDATE handles race safely","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:40.292874Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.717576Z","closed_at":"2026-02-12T18:13:15.717528Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"]} {"id":"bd-a7ba","title":"Implement project scope resolution for me command","description":"## Background\nThe `lore me` command needs to resolve which project(s) to query. There are three modes: single project (--project), all projects (--all), or default (use default_project if set, else all). The existing `Config.effective_project()` method already handles CLI flag > default_project fallback, and `resolve_project()` in `src/core/project.rs` handles fuzzy matching.\n\n## Approach\nCreate in `src/cli/commands/me/mod.rs` (or a submodule):\n```rust\npub enum ProjectScope {\n Single(i64), // internal DB project.id (NOT gitlab_project_id)\n All,\n}\n\npub fn resolve_project_scope(\n project_flag: Option<&str>,\n all_flag: bool,\n config: &Config,\n conn: &Connection,\n) -> Result {\n // AC-8.4: mutual exclusivity\n if project_flag.is_some() && all_flag {\n return Err(LoreError::UsageError(\n \"Cannot use --project and --all together.\".to_string()\n ));\n }\n // AC-8.2: --all overrides everything\n if all_flag {\n return Ok(ProjectScope::All);\n }\n // Use effective_project: CLI flag > config.default_project\n let effective = project_flag.or(config.default_project.as_deref());\n match effective {\n Some(p) => Ok(ProjectScope::Single(resolve_project(conn, p)?)),\n None => Ok(ProjectScope::All), // AC-8.1: no default → all\n }\n}\n```\n\nImports:\n```rust\nuse crate::Config;\nuse crate::core::error::{LoreError, Result};\nuse crate::core::project::resolve_project;\nuse rusqlite::Connection;\n```\n\nNote: `resolve_project()` returns the internal DB `id` (not `gitlab_project_id`). Match strategy: exact → case-insensitive → suffix → substring. Returns `LoreError::Ambiguous` (exit 18) on multiple matches, `LoreError::Other` on no match.\n\n## Acceptance Criteria\n- [ ] `--project` and `--all` both passed → `LoreError::UsageError` with exit code 2 (AC-8.4)\n- [ ] `--project \"repo\"` → resolves via `resolve_project()` → `Single(id)` (AC-8.3)\n- [ ] `--all` → `All` (AC-8.2)\n- [ ] No flags + `config.default_project` set → resolve it → `Single(id)` (AC-8.1)\n- [ ] No flags + no default_project → `All` (AC-8.1)\n- [ ] Unknown --project → `LoreError::Other` (from resolve_project)\n- [ ] Ambiguous --project → `LoreError::Ambiguous` exit 18 (from resolve_project)\n\n## Files\n- MODIFY: src/cli/commands/me/mod.rs (ProjectScope enum + resolve function)\n\n## TDD Anchor\nRED: Write `test_project_and_all_mutually_exclusive` in an in-memory DB test. Pass both flags, assert error matches `LoreError::UsageError`.\nGREEN: Implement resolve_project_scope.\nVERIFY: `cargo test project_scope`\n\nAdditional tests:\n- test_project_flag_resolves (insert project, pass --project with matching path)\n- test_all_flag_returns_all\n- test_default_project_used (no CLI flag, config has default_project)\n- test_no_default_no_flags_returns_all\n\n## Edge Cases\n- `resolve_project` returns internal DB `id`, not `gitlab_project_id` — use the returned id for WHERE clauses on `project_id`\n- default_project value in config might not exist in DB (deleted/unsynced) — resolve_project will error\n\n## Dependency Context\nUses `LoreError::UsageError` variant from bd-1f1f.\nUses `resolve_project` from `src/core/project.rs` (existing, no bead needed).\nUses `Config.default_project` field from `src/core/config.rs`.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:35:50.328852Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.047676Z","closed_at":"2026-02-20T16:09:13.047627Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-a7ba","depends_on_id":"bd-utt4","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-am7","title":"Implement embedding pipeline with chunking","description":"## Background\nThe embedding pipeline takes documents, chunks them (paragraph-boundary splitting with overlap), sends chunks to Ollama for embedding via async HTTP, and stores vectors in sqlite-vec + metadata. It uses keyset pagination, concurrent HTTP requests via FuturesUnordered, per-batch transactions, and dimension validation.\n\n## Approach\nCreate \\`src/embedding/pipeline.rs\\` per PRD Section 4.4. **The pipeline is async.**\n\n**Constants (per PRD):**\n```rust\nconst BATCH_SIZE: usize = 32; // texts per Ollama API call\nconst DB_PAGE_SIZE: usize = 500; // keyset pagination page size\nconst EXPECTED_DIMS: usize = 768; // nomic-embed-text dimensions\nconst CHUNK_MAX_CHARS: usize = 32_000; // max chars per chunk\nconst CHUNK_OVERLAP_CHARS: usize = 500; // overlap between chunks\n```\n\n**Core async function:**\n```rust\npub async fn embed_documents(\n conn: &Connection,\n client: &OllamaClient,\n selection: EmbedSelection,\n concurrency: usize, // max in-flight HTTP requests\n progress_callback: Option>,\n) -> Result\n```\n\n**EmbedSelection:** Pending | RetryFailed\n**EmbedResult:** { embedded, failed, skipped }\n\n**Algorithm (per PRD):**\n1. count_pending_documents(conn, selection) for progress total\n2. Keyset pagination loop: find_pending_documents(conn, DB_PAGE_SIZE, last_id, selection)\n3. For each page:\n a. Begin transaction\n b. For each doc: clear_document_embeddings(&tx, doc.id), split_into_chunks(&doc.content)\n c. Build ChunkWork items with doc_hash + chunk_hash\n d. Commit clearing transaction\n4. Batch ChunkWork texts into Ollama calls (BATCH_SIZE=32)\n5. Use **FuturesUnordered** for concurrent HTTP, cap at \\`concurrency\\`\n6. collect_writes() in per-batch transactions: validate dims (768), store LE bytes, write metadata\n7. On error: record_embedding_error per chunk (not abort)\n8. Advance keyset cursor\n\n**ChunkWork struct:**\n```rust\nstruct ChunkWork {\n doc_id: i64,\n chunk_index: usize,\n doc_hash: String, // SHA-256 of FULL document (staleness detection)\n chunk_hash: String, // SHA-256 of THIS chunk (provenance)\n text: String,\n}\n```\n\n**Splitting:** split_into_chunks(content) -> Vec<(usize, String)>\n- Documents <= CHUNK_MAX_CHARS: single chunk (index 0)\n- Longer: split at paragraph boundaries (\\\\n\\\\n), fallback to sentence/word, with CHUNK_OVERLAP_CHARS overlap\n\n**Storage:** embeddings as raw LE bytes, rowid = encode_rowid(doc_id, chunk_idx)\n**Staleness detection:** uses document_hash (not chunk_hash) because it's document-level\n\nAlso create \\`src/embedding/change_detector.rs\\` (referenced in PRD module structure):\n```rust\npub fn detect_embedding_changes(conn: &Connection) -> Result>;\n```\n\n## Acceptance Criteria\n- [ ] Pipeline is async (uses FuturesUnordered for concurrent HTTP)\n- [ ] concurrency parameter caps in-flight HTTP requests\n- [ ] progress_callback reports (processed, total)\n- [ ] New documents embedded, changed re-embedded, unchanged skipped\n- [ ] clear_document_embeddings before re-embedding (range delete vec0 + metadata)\n- [ ] Chunking at paragraph boundaries with 500-char overlap\n- [ ] Short documents (<32k chars) produce exactly 1 chunk\n- [ ] Embeddings stored as raw LE bytes in vec0\n- [ ] Rowids encoded via encode_rowid(doc_id, chunk_index)\n- [ ] Dimension validation: 768 floats per embedding (mismatch -> record error, not store)\n- [ ] Per-batch transactions for writes\n- [ ] Errors recorded in embedding_metadata per chunk (last_error, attempt_count)\n- [ ] Keyset pagination (d.id > last_id, not OFFSET)\n- [ ] Pending detection uses document_hash (not chunk_hash)\n- [ ] \\`cargo build\\` succeeds\n\n## Files\n- \\`src/embedding/pipeline.rs\\` — new file (async)\n- \\`src/embedding/change_detector.rs\\` — new file\n- \\`src/embedding/mod.rs\\` — add \\`pub mod pipeline; pub mod change_detector;\\` + re-exports\n\n## TDD Loop\nRED: Unit tests for chunking:\n- \\`test_short_document_single_chunk\\` — <32k produces [(0, full_content)]\n- \\`test_long_document_multiple_chunks\\` — >32k splits at paragraph boundaries\n- \\`test_chunk_overlap\\` — adjacent chunks share 500-char overlap\n- \\`test_no_paragraph_boundary\\` — falls back to char boundary\nIntegration tests need Ollama or mock.\nGREEN: Implement split_into_chunks, embed_documents (async)\nVERIFY: \\`cargo test pipeline\\`\n\n## Edge Cases\n- Empty document content_text: skip (don't embed)\n- No paragraph boundaries: split at CHUNK_MAX_CHARS with overlap\n- Ollama error for one batch: record error per chunk, continue with next batch\n- Dimension mismatch (model returns 512 instead of 768): record error, don't store corrupt data\n- Document deleted between pagination and embedding: skip gracefully","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:34.093701Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:58:58.908585Z","closed_at":"2026-01-30T17:58:58.908525Z","close_reason":"Implemented embedding pipeline: chunking at paragraph boundaries with 500-char overlap, change detector (keyset pagination, hash-based staleness), async embed via Ollama with batch processing, dimension validation, per-chunk error recording, LE byte vector storage. 7 chunking tests pass. 289 total tests.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-am7","depends_on_id":"bd-1y8","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"},{"issue_id":"bd-am7","depends_on_id":"bd-2ac","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"},{"issue_id":"bd-am7","depends_on_id":"bd-335","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} @@ -340,7 +340,7 @@ {"id":"bd-m7k1","title":"WHO: Active mode query (query_active)","description":"## Background\n\nActive mode answers \"What discussions are actively in progress?\" by finding unresolved resolvable discussions with recent activity. This is the most complex query due to the CTE structure and the dual SQL variant requirement.\n\n## Approach\n\n### Two static SQL variants (CRITICAL — not nullable-OR):\nActive mode uses separate global vs project-scoped SQL strings because:\n- With (?N IS NULL OR d.project_id = ?N), SQLite can't commit to either index at prepare time\n- Global queries need idx_discussions_unresolved_recent_global (single-column last_note_at)\n- Scoped queries need idx_discussions_unresolved_recent (project_id, last_note_at)\n- Selected at runtime: `match project_id { None => sql_global, Some(pid) => sql_scoped }`\n\n### CTE structure (4 stages):\n```sql\nWITH picked AS (\n -- Stage 1: Select limited discussions using the right index\n SELECT d.id, d.noteable_type, d.issue_id, d.merge_request_id,\n d.project_id, d.last_note_at\n FROM discussions d\n WHERE d.resolvable = 1 AND d.resolved = 0\n AND d.last_note_at >= ?1\n ORDER BY d.last_note_at DESC LIMIT ?2\n),\nnote_counts AS (\n -- Stage 2: Count all non-system notes per discussion (ACTUAL note count)\n SELECT n.discussion_id, COUNT(*) AS note_count\n FROM notes n JOIN picked p ON p.id = n.discussion_id\n WHERE n.is_system = 0\n GROUP BY n.discussion_id\n),\nparticipants AS (\n -- Stage 3: Distinct usernames per discussion, then GROUP_CONCAT\n SELECT x.discussion_id, GROUP_CONCAT(x.author_username, X'1F') AS participants\n FROM (\n SELECT DISTINCT n.discussion_id, n.author_username\n FROM notes n JOIN picked p ON p.id = n.discussion_id\n WHERE n.is_system = 0 AND n.author_username IS NOT NULL\n ) x\n GROUP BY x.discussion_id\n)\n-- Stage 4: Join everything\nSELECT p.id, p.noteable_type, COALESCE(i.iid, m.iid), COALESCE(i.title, m.title),\n proj.path_with_namespace, p.last_note_at,\n COALESCE(nc.note_count, 0), COALESCE(pa.participants, '')\nFROM picked p\nJOIN projects proj ON p.project_id = proj.id\nLEFT JOIN issues i ON p.issue_id = i.id\nLEFT JOIN merge_requests m ON p.merge_request_id = m.id\nLEFT JOIN note_counts nc ON nc.discussion_id = p.id\nLEFT JOIN participants pa ON pa.discussion_id = p.id\nORDER BY p.last_note_at DESC\n```\n\n### CRITICAL BUG PREVENTION: note_counts and participants MUST be separate CTEs.\nA single CTE with `SELECT DISTINCT discussion_id, author_username` then `COUNT(*)` produces a PARTICIPANT count, not a NOTE count. A discussion with 5 notes from 2 people would show note_count: 2 instead of 5.\n\n### Participants post-processing in Rust:\n```rust\nlet mut participants: Vec = csv.split('\\x1F').map(String::from).collect();\nparticipants.sort(); // deterministic — GROUP_CONCAT order is undefined\nconst MAX_PARTICIPANTS: usize = 50;\nlet participants_total = participants.len() as u32;\nlet participants_truncated = participants.len() > MAX_PARTICIPANTS;\n```\n\n### Total count also uses two variants (global/scoped), same match pattern.\n\n### Unit separator X'1F' for GROUP_CONCAT (not comma — usernames could theoretically contain commas)\n\n## Files\n\n- `src/cli/commands/who.rs`\n\n## TDD Loop\n\nRED:\n```\ntest_active_query — insert discussion + 2 notes by same user; verify:\n - total_unresolved_in_window = 1\n - discussions.len() = 1\n - participants = [\"reviewer_b\"]\n - note_count = 2 (NOT 1 — this was a real regression in iteration 4)\n - discussion_id > 0\ntest_active_participants_sorted — insert notes by zebra_user then alpha_user; verify sorted [\"alpha_user\", \"zebra_user\"]\n```\n\nGREEN: Implement query_active with both SQL variants and the shared map_row closure\nVERIFY: `cargo test -- active`\n\n## Acceptance Criteria\n\n- [ ] test_active_query passes with note_count = 2 (not participant count)\n- [ ] test_active_participants_sorted passes (alphabetical order)\n- [ ] discussion_id included in output (stable entity ID for agents)\n- [ ] Default since window: 7d\n- [ ] Bounded participants: cap 50, with total + truncated metadata\n\n## Edge Cases\n\n- note_count vs participant_count: MUST be separate CTEs (see bug prevention above)\n- GROUP_CONCAT order is undefined — sort participants in Rust after parsing\n- SQLite doesn't support GROUP_CONCAT(DISTINCT col, separator) — use subquery with SELECT DISTINCT then GROUP_CONCAT\n- Two SQL variants: prepare exactly ONE statement per invocation (don't prepare both)\n- entity_type mapping: \"MergeRequest\" -> \"MR\", else \"Issue\"","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:40:38.995549Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.598085Z","closed_at":"2026-02-08T04:10:29.598047Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-m7k1","depends_on_id":"bd-2ldg","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"},{"issue_id":"bd-m7k1","depends_on_id":"bd-34rr","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-mem","title":"Implement shared backoff utility","description":"## Background\nBoth `dirty_sources` and `pending_discussion_fetches` tables use exponential backoff with `next_attempt_at` timestamps. Without a shared utility, each module would duplicate the backoff curve logic, risking drift. The shared backoff module ensures consistent retry behavior across all queue consumers in Gate C.\n\n## Approach\nCreate `src/core/backoff.rs` per PRD Section 6.X.\n\n**IMPORTANT — PRD-exact signature and implementation:**\n```rust\nuse rand::Rng;\n\n/// Compute next_attempt_at with exponential backoff and jitter.\n///\n/// Formula: now + min(3600000, 1000 * 2^attempt_count) * (0.9 to 1.1)\n/// - Capped at 1 hour to prevent runaway delays\n/// - ±10% jitter prevents synchronized retries after outages\n///\n/// Used by:\n/// - `dirty_sources` retry scheduling (document regeneration failures)\n/// - `pending_discussion_fetches` retry scheduling (API fetch failures)\n///\n/// Having one implementation prevents subtle divergence between queues\n/// (e.g., different caps or jitter ranges).\npub fn compute_next_attempt_at(now: i64, attempt_count: i64) -> i64 {\n // Cap attempt_count to prevent overflow (2^30 > 1 hour anyway)\n let capped_attempts = attempt_count.min(30) as u32;\n let base_delay_ms = 1000_i64.saturating_mul(1 << capped_attempts);\n let capped_delay_ms = base_delay_ms.min(3_600_000); // 1 hour cap\n\n // Add ±10% jitter\n let jitter_factor = rand::thread_rng().gen_range(0.9..=1.1);\n let delay_with_jitter = (capped_delay_ms as f64 * jitter_factor) as i64;\n\n now + delay_with_jitter\n}\n```\n\n**Key PRD details (must match exactly):**\n- `attempt_count` parameter is `i64` (not `u32`) — matches SQLite integer type from DB columns\n- Overflow prevention: `.min(30) as u32` caps before shift\n- Base delay: `1000_i64.saturating_mul(1 << capped_attempts)` — uses `saturating_mul` for safety\n- Cap: `3_600_000` (1 hour)\n- Jitter: `gen_range(0.9..=1.1)` — inclusive range\n- Return: `i64` (milliseconds epoch)\n\n**Cargo.toml change:** Add `rand = \"0.8\"` to `[dependencies]`.\n\n## Acceptance Criteria\n- [ ] Single shared implementation used by both dirty_tracker and discussion_queue\n- [ ] Signature: `pub fn compute_next_attempt_at(now: i64, attempt_count: i64) -> i64`\n- [ ] attempt_count is i64 (matches SQLite column type), not u32\n- [ ] Overflow prevention: `.min(30) as u32` before shift\n- [ ] Base delay uses `1000_i64.saturating_mul(1 << capped_attempts)`\n- [ ] Cap at 1 hour (3,600,000 ms)\n- [ ] Jitter: `gen_range(0.9..=1.1)` inclusive range\n- [ ] Exponential curve: 1s, 2s, 4s, 8s, ... up to 1h cap\n- [ ] `cargo test backoff` passes\n\n## Files\n- `src/core/backoff.rs` — new file\n- `src/core/mod.rs` — add `pub mod backoff;`\n- `Cargo.toml` — add `rand = \"0.8\"`\n\n## TDD Loop\nRED: `src/core/backoff.rs` with `#[cfg(test)] mod tests`:\n- `test_exponential_curve` — verify delays double each attempt (within jitter range)\n- `test_cap_at_one_hour` — attempt 20+ still produces delay <= MAX_DELAY_MS * 1.1\n- `test_jitter_range` — run 100 iterations, all delays within [0.9x, 1.1x] of base\n- `test_first_retry_is_about_one_second` — attempt 1 produces ~1000ms delay\n- `test_overflow_safety` — very large attempt_count doesn't panic\nGREEN: Implement compute_next_attempt_at()\nVERIFY: `cargo test backoff`\n\n## Edge Cases\n- `attempt_count` > 30: `.min(30)` caps, saturating_mul prevents overflow\n- `attempt_count` = 0: not used in practice (callers pass `attempt_count + 1`)\n- `attempt_count` = 1: delay is ~1 second (first retry)\n- Negative attempt_count: `.min(30)` still works, shift of negative-as-u32 wraps but saturating_mul handles it","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:27:09.474Z","created_by":"tayloreernisse","updated_at":"2026-01-30T16:57:24.900137Z","closed_at":"2026-01-30T16:57:24.899942Z","close_reason":"Completed: compute_next_attempt_at with exp backoff (1s base, 1h cap, +-10% jitter), i64 params matching SQLite, overflow-safe, 5 tests pass","compaction_level":0,"original_size":0} {"id":"bd-mk3","title":"Update ingest command for merge_requests type","description":"## Background\nCLI entry point for MR ingestion. Routes `--type=merge_requests` to the orchestrator. Must ensure `--full` resets both MR cursor AND discussion watermarks. This is the user-facing command that kicks off the entire MR sync pipeline.\n\n## Approach\nUpdate `src/cli/commands/ingest.rs` to handle `merge_requests` type:\n1. Add `merge_requests` branch to the resource type match statement\n2. Validate resource type early with helpful error message\n3. Pass `full` flag through to orchestrator (it handles the watermark reset internally)\n\n## Files\n- `src/cli/commands/ingest.rs` - Add merge_requests branch to `run_ingest`\n\n## Acceptance Criteria\n- [ ] `gi ingest --type=merge_requests` runs MR ingestion successfully\n- [ ] `gi ingest --type=merge_requests --full` resets cursor AND discussion watermarks\n- [ ] `gi ingest --type=invalid` returns helpful error listing valid types\n- [ ] Progress output shows MR counts, discussion counts, and skip counts\n- [ ] Default type remains `issues` for backward compatibility\n- [ ] `cargo test ingest_command` passes\n\n## TDD Loop\nRED: `gi ingest --type=merge_requests` -> \"invalid type: merge_requests\"\nGREEN: Add merge_requests to match statement in run_ingest\nVERIFY: `gi ingest --type=merge_requests --help` shows merge_requests as valid\n\n## Function Signature\n```rust\npub async fn run_ingest(\n config: &Config,\n args: &IngestArgs,\n) -> Result<(), GiError>\n```\n\n## IngestArgs Reference (existing)\n```rust\n#[derive(Parser, Debug)]\npub struct IngestArgs {\n /// Resource type to ingest\n #[arg(long, short = 't', default_value = \"issues\")]\n pub r#type: String,\n \n /// Filter to specific project (by path or ID)\n #[arg(long, short = 'p')]\n pub project: Option,\n \n /// Force run even if another ingest is in progress\n #[arg(long, short = 'f')]\n pub force: bool,\n \n /// Full sync - reset cursor and refetch all\n #[arg(long)]\n pub full: bool,\n}\n```\n\n## Code Change\n```rust\nuse crate::core::errors::GiError;\nuse crate::ingestion::orchestrator::Orchestrator;\n\npub async fn run_ingest(\n config: &Config,\n args: &IngestArgs,\n) -> Result<(), GiError> {\n let resource_type = args.r#type.as_str();\n \n // Validate resource type early\n match resource_type {\n \"issues\" | \"merge_requests\" => {}\n _ => {\n return Err(GiError::InvalidArgument {\n name: \"type\".to_string(),\n value: resource_type.to_string(),\n expected: \"issues or merge_requests\".to_string(),\n });\n }\n }\n \n // Acquire single-flight lock (unless --force)\n if !args.force {\n acquire_ingest_lock(config, resource_type)?;\n }\n \n // Get projects to ingest (filtered if --project specified)\n let projects = get_projects_to_ingest(config, args.project.as_deref())?;\n \n for project in projects {\n println!(\"Ingesting {} for {}...\", resource_type, project.path);\n \n let orchestrator = Orchestrator::new(\n &config,\n project.id,\n project.gitlab_id,\n )?;\n \n let result = orchestrator.run_ingestion(resource_type, args.full).await?;\n \n // Print results based on resource type\n match resource_type {\n \"issues\" => {\n println!(\" {}: {} issues fetched, {} upserted\",\n project.path, result.issues_fetched, result.issues_upserted);\n }\n \"merge_requests\" => {\n println!(\" {}: {} MRs fetched, {} new labels, {} assignees, {} reviewers\",\n project.path,\n result.mrs_fetched,\n result.labels_created,\n result.assignees_linked,\n result.reviewers_linked,\n );\n println!(\" Discussions: {} synced, {} notes ({} DiffNotes)\",\n result.discussions_synced,\n result.notes_synced,\n result.diffnotes_count,\n );\n if result.mrs_skipped_discussion_sync > 0 {\n println!(\" Skipped discussion sync for {} unchanged MRs\",\n result.mrs_skipped_discussion_sync);\n }\n if result.failed_discussion_syncs > 0 {\n eprintln!(\" Warning: {} MRs failed discussion sync (will retry next run)\",\n result.failed_discussion_syncs);\n }\n }\n _ => unreachable!(),\n }\n }\n \n // Release lock\n if !args.force {\n release_ingest_lock(config, resource_type)?;\n }\n \n Ok(())\n}\n```\n\n## Output Format\n```\nIngesting merge_requests for group/project-one...\n group/project-one: 567 MRs fetched, 12 new labels, 89 assignees, 45 reviewers\n Discussions: 456 synced, 1,234 notes (89 DiffNotes)\n Skipped discussion sync for 444 unchanged MRs\n\nTotal: 567 MRs, 456 discussions, 1,234 notes\n```\n\n## Full Sync Behavior\nWhen `--full` is passed:\n1. MR cursor reset to NULL (handled by `ingest_merge_requests` with `full_sync: true`)\n2. Discussion watermarks reset to NULL (handled by `reset_discussion_watermarks` called from ingestion)\n3. All MRs re-fetched from GitLab API\n4. All discussions re-fetched for every MR\n\n## Error Types (from GiError enum)\n```rust\n// In src/core/errors.rs\npub enum GiError {\n InvalidArgument {\n name: String,\n value: String,\n expected: String,\n },\n LockError {\n resource: String,\n message: String,\n },\n // ... other variants\n}\n```\n\n## Edge Cases\n- Default type is `issues` for backward compatibility with CP1\n- Project filter (`--project`) can limit to specific project by path or ID\n- Force flag (`--force`) bypasses single-flight lock for debugging\n- If no projects configured, return helpful error about running `gi project add` first\n- Empty project (no MRs): completes successfully with \"0 MRs fetched\"","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:43.034952Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:28:52.711235Z","closed_at":"2026-01-27T00:28:52.711166Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-mk3","depends_on_id":"bd-10f","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} -{"id":"bd-nj7f","title":"Task 5: Robot mode JSON output and human-readable rendering","description":"## Background\n\nThis task implements the output layer for lore explain: robot mode JSON (structured envelope) and human-readable terminal rendering. Also registers the command in robot-docs manifest.\n\n**Parent bead:** bd-9lbr (lore explain feature)\n**Spec:** specs/SPEC_explain.md — Task 5\n**Phase:** 3 — Output Rendering\n**Depends on:** Task 1 (bd-2i3z), Task 2 (bd-wtrm), Task 3 (bd-wb0b), Task 4 (bd-3q5e) — needs all sections populated\n\n## Robot Mode Output\n\nWrap ExplainResult in standard envelope:\n{\"ok\": true, \"data\": , \"meta\": {\"elapsed_ms\": N}}\n\nPattern from other commands:\nlet start = std::time::Instant::now();\n// ... run_explain() ...\nlet elapsed = start.elapsed();\nlet response = serde_json::json!({\n \"ok\": true,\n \"data\": result,\n \"meta\": { \"elapsed_ms\": elapsed.as_millis() }\n});\nprintln!(\"{}\", serde_json::to_string(&response)?);\n\nskip_serializing_if on optional sections handles filtering automatically — when sections are None they are omitted from JSON.\n\n### Example output shape:\n{\n \"ok\": true,\n \"data\": {\n \"entity\": { \"type\": \"issue\", \"iid\": 42, \"title\": \"...\", \"state\": \"opened\", \"author\": \"teernisse\", \"assignees\": [\"teernisse\"], \"labels\": [\"customer:BNSF\"], \"created_at\": \"2026-01-10T...\", \"updated_at\": \"2026-02-12T...\", \"url\": \"...\", \"status_name\": \"In progress\" },\n \"description_excerpt\": \"First 500 chars...\",\n \"key_decisions\": [{ \"timestamp\": \"...\", \"actor\": \"...\", \"action\": \"state: closed\", \"context_note\": \"...\" }],\n \"activity\": { \"state_changes\": 3, \"label_changes\": 5, \"notes\": 42, \"first_event\": \"...\", \"last_event\": \"...\" },\n \"open_threads\": [{ \"discussion_id\": \"abc123\", \"started_by\": \"cseiber\", \"started_at\": \"...\", \"note_count\": 5, \"last_note_at\": \"...\" }],\n \"related\": { \"closing_mrs\": [{ \"iid\": 200, \"title\": \"...\", \"state\": \"merged\" }], \"related_issues\": [{ \"entity_type\": \"issue\", \"iid\": 3800, \"title\": \"...\", \"reference_type\": \"related\" }] },\n \"timeline_excerpt\": [{ \"timestamp\": \"...\", \"event_type\": \"state_changed\", \"actor\": \"teernisse\", \"summary\": \"State changed to closed\" }]\n },\n \"meta\": { \"elapsed_ms\": 350 }\n}\n\n## Human Mode Rendering\n\nUse Theme::bold(), Icons, render::truncate() from crate::cli::render.\nFollow timeline command's rendering pattern (src/cli/commands/timeline.rs): header with entity info -> separator line -> sections.\n\nEach section has a header (bold, colored), then indented content:\n- Entity: title, state, author, assignees, labels, status — on a few compact lines\n- Description: truncated excerpt (500 chars)\n- Key Decisions: one block per decision — timestamp + actor + action, indented context_note below\n- Activity: single summary line with counts and date range\n- Open Threads: list of unresolved discussions with started_by, note_count, last activity\n- Related: closing MRs (iid, title, state) and related issues (iid, title, type)\n- Timeline: chronological events, each as one compact line\n\nCheck params.sections before rendering each section (human mode skips unselected sections).\nEntity section always rendered (needed for identification).\n\n## Robot-Docs Registration\n\nRegister in src/app/robot_docs.rs in the commands JSON object. Follow the pattern of other commands like \"drift\" or \"timeline\":\n\n\"explain\": {\n \"description\": \"Auto-generate a structured narrative of an issue or MR\",\n \"flags\": [\"\", \"\", \"-p/--project \", \"--sections \", \"--no-timeline\", \"--max-decisions \", \"--since \"],\n \"example\": \"lore --robot explain issues 42\",\n \"sections\": [\"entity\", \"description\", \"key_decisions\", \"activity\", \"open_threads\", \"related\", \"timeline\"],\n \"response_schema\": {\n \"ok\": \"bool\",\n \"data\": {\"entity\": \"{type, iid, title, state, author, assignees, labels, created_at, updated_at, url, status_name}\", ...},\n \"meta\": {\"elapsed_ms\": \"int\"}\n }\n}\n\n## Error Handling\n\n- Entity not found: exit code 17, robot JSON error with code \"NOT_FOUND\", suggestion \"Entity not in local DB. Try: lore sync\"\n- Ambiguous project: exit code 18, robot JSON error with code \"AMBIGUOUS_MATCH\", suggestion \"Use -p to specify project\"\n- These should be handled by the existing error pipeline (LoreError -> exit code mapping)\n\n## Files to Modify\n\n- src/cli/commands/explain.rs — add print_explain_json() and print_explain() functions, wire into handle_explain\n- src/app/robot_docs.rs — add \"explain\" entry to commands JSON\n\n## TDD Workflow (Red-Green)\n\n### RED — Write these tests first, confirm they FAIL:\n\n1. test_explain_robot_output_shape: Call run_explain() with all sections, serialize to JSON, assert all 7 top-level keys present in data\n2. test_explain_sections_filter_robot: Call run_explain() with sections: Some(vec![\"key_decisions\", \"activity\"]), serialize, assert only entity + key_decisions + activity keys present (entity always included), assert description_excerpt, open_threads, related, timeline_excerpt are absent from JSON\n\n### GREEN — Implement:\n\n- print_explain_json(): wrap ExplainResult in envelope, print to stdout\n- print_explain(): render human-readable output with colored headers and formatted content\n- Register in robot-docs manifest\n- Wire into handle_explain: if robot_mode { print_explain_json } else { print_explain }\n\n### Verify:\ncargo test explain::tests::test_explain_robot && cargo test explain::tests::test_explain_sections_filter && cargo clippy --all-targets -- -D warnings\n\n## Acceptance Criteria\n\n- [ ] test_explain_robot_output_shape passes\n- [ ] test_explain_sections_filter_robot passes\n- [ ] Robot JSON matches spec schema\n- [ ] Section filtering works in both robot and human mode\n- [ ] Command appears in lore robot-docs output\n- [ ] Error cases: exit 17 (not found) with suggestion to sync, exit 18 (ambiguous) with suggestion to use -p\n- [ ] Performance: <500ms for issue with 50 notes\n- [ ] cargo clippy and cargo fmt clean","status":"open","priority":2,"issue_type":"task","created_at":"2026-03-10T17:38:11.609363Z","created_by":"tayloreernisse","updated_at":"2026-03-10T17:47:57.274681Z","compaction_level":0,"original_size":0,"labels":["cli-imp","explain"],"dependencies":[{"issue_id":"bd-nj7f","depends_on_id":"bd-3q5e","type":"blocks","created_at":"2026-03-10T17:38:19.012533Z","created_by":"tayloreernisse"},{"issue_id":"bd-nj7f","depends_on_id":"bd-9lbr","type":"parent-child","created_at":"2026-03-10T17:38:11.611068Z","created_by":"tayloreernisse"},{"issue_id":"bd-nj7f","depends_on_id":"bd-wb0b","type":"blocks","created_at":"2026-03-10T17:38:18.944373Z","created_by":"tayloreernisse"},{"issue_id":"bd-nj7f","depends_on_id":"bd-wtrm","type":"blocks","created_at":"2026-03-10T17:38:18.869452Z","created_by":"tayloreernisse"}]} +{"id":"bd-nj7f","title":"Task 5: Robot mode JSON output and human-readable rendering","description":"## Background\n\nThis task implements the output layer for lore explain: robot mode JSON (structured envelope) and human-readable terminal rendering. Also registers the command in robot-docs manifest.\n\n**Parent bead:** bd-9lbr (lore explain feature)\n**Spec:** specs/SPEC_explain.md — Task 5\n**Phase:** 3 — Output Rendering\n**Depends on:** Task 1 (bd-2i3z), Task 2 (bd-wtrm), Task 3 (bd-wb0b), Task 4 (bd-3q5e) — needs all sections populated\n\n## Robot Mode Output\n\nWrap ExplainResult in standard envelope:\n{\"ok\": true, \"data\": , \"meta\": {\"elapsed_ms\": N}}\n\nPattern from other commands:\nlet start = std::time::Instant::now();\n// ... run_explain() ...\nlet elapsed = start.elapsed();\nlet response = serde_json::json!({\n \"ok\": true,\n \"data\": result,\n \"meta\": { \"elapsed_ms\": elapsed.as_millis() }\n});\nprintln!(\"{}\", serde_json::to_string(&response)?);\n\nskip_serializing_if on optional sections handles filtering automatically — when sections are None they are omitted from JSON.\n\n### Example output shape:\n{\n \"ok\": true,\n \"data\": {\n \"entity\": { \"type\": \"issue\", \"iid\": 42, \"title\": \"...\", \"state\": \"opened\", \"author\": \"teernisse\", \"assignees\": [\"teernisse\"], \"labels\": [\"customer:BNSF\"], \"created_at\": \"2026-01-10T...\", \"updated_at\": \"2026-02-12T...\", \"url\": \"...\", \"status_name\": \"In progress\" },\n \"description_excerpt\": \"First 500 chars...\",\n \"key_decisions\": [{ \"timestamp\": \"...\", \"actor\": \"...\", \"action\": \"state: closed\", \"context_note\": \"...\" }],\n \"activity\": { \"state_changes\": 3, \"label_changes\": 5, \"notes\": 42, \"first_event\": \"...\", \"last_event\": \"...\" },\n \"open_threads\": [{ \"discussion_id\": \"abc123\", \"started_by\": \"cseiber\", \"started_at\": \"...\", \"note_count\": 5, \"last_note_at\": \"...\" }],\n \"related\": { \"closing_mrs\": [{ \"iid\": 200, \"title\": \"...\", \"state\": \"merged\" }], \"related_issues\": [{ \"entity_type\": \"issue\", \"iid\": 3800, \"title\": \"...\", \"reference_type\": \"related\" }] },\n \"timeline_excerpt\": [{ \"timestamp\": \"...\", \"event_type\": \"state_changed\", \"actor\": \"teernisse\", \"summary\": \"State changed to closed\" }]\n },\n \"meta\": { \"elapsed_ms\": 350 }\n}\n\n## Human Mode Rendering\n\nUse Theme::bold(), Icons, render::truncate() from crate::cli::render.\nFollow timeline command's rendering pattern (src/cli/commands/timeline.rs): header with entity info -> separator line -> sections.\n\nEach section has a header (bold, colored), then indented content:\n- Entity: title, state, author, assignees, labels, status — on a few compact lines\n- Description: truncated excerpt (500 chars)\n- Key Decisions: one block per decision — timestamp + actor + action, indented context_note below\n- Activity: single summary line with counts and date range\n- Open Threads: list of unresolved discussions with started_by, note_count, last activity\n- Related: closing MRs (iid, title, state) and related issues (iid, title, type)\n- Timeline: chronological events, each as one compact line\n\nCheck params.sections before rendering each section (human mode skips unselected sections).\nEntity section always rendered (needed for identification).\n\n## Robot-Docs Registration\n\nRegister in src/app/robot_docs.rs in the commands JSON object. Follow the pattern of other commands like \"drift\" or \"timeline\":\n\n\"explain\": {\n \"description\": \"Auto-generate a structured narrative of an issue or MR\",\n \"flags\": [\"\", \"\", \"-p/--project \", \"--sections \", \"--no-timeline\", \"--max-decisions \", \"--since \"],\n \"example\": \"lore --robot explain issues 42\",\n \"sections\": [\"entity\", \"description\", \"key_decisions\", \"activity\", \"open_threads\", \"related\", \"timeline\"],\n \"response_schema\": {\n \"ok\": \"bool\",\n \"data\": {\"entity\": \"{type, iid, title, state, author, assignees, labels, created_at, updated_at, url, status_name}\", ...},\n \"meta\": {\"elapsed_ms\": \"int\"}\n }\n}\n\n## Error Handling\n\n- Entity not found: exit code 17, robot JSON error with code \"NOT_FOUND\", suggestion \"Entity not in local DB. Try: lore sync\"\n- Ambiguous project: exit code 18, robot JSON error with code \"AMBIGUOUS_MATCH\", suggestion \"Use -p to specify project\"\n- These should be handled by the existing error pipeline (LoreError -> exit code mapping)\n\n## Files to Modify\n\n- src/cli/commands/explain.rs — add print_explain_json() and print_explain() functions, wire into handle_explain\n- src/app/robot_docs.rs — add \"explain\" entry to commands JSON\n\n## TDD Workflow (Red-Green)\n\n### RED — Write these tests first, confirm they FAIL:\n\n1. test_explain_robot_output_shape: Call run_explain() with all sections, serialize to JSON, assert all 7 top-level keys present in data\n2. test_explain_sections_filter_robot: Call run_explain() with sections: Some(vec![\"key_decisions\", \"activity\"]), serialize, assert only entity + key_decisions + activity keys present (entity always included), assert description_excerpt, open_threads, related, timeline_excerpt are absent from JSON\n\n### GREEN — Implement:\n\n- print_explain_json(): wrap ExplainResult in envelope, print to stdout\n- print_explain(): render human-readable output with colored headers and formatted content\n- Register in robot-docs manifest\n- Wire into handle_explain: if robot_mode { print_explain_json } else { print_explain }\n\n### Verify:\ncargo test explain::tests::test_explain_robot && cargo test explain::tests::test_explain_sections_filter && cargo clippy --all-targets -- -D warnings\n\n## Acceptance Criteria\n\n- [ ] test_explain_robot_output_shape passes\n- [ ] test_explain_sections_filter_robot passes\n- [ ] Robot JSON matches spec schema\n- [ ] Section filtering works in both robot and human mode\n- [ ] Command appears in lore robot-docs output\n- [ ] Error cases: exit 17 (not found) with suggestion to sync, exit 18 (ambiguous) with suggestion to use -p\n- [ ] Performance: <500ms for issue with 50 notes\n- [ ] cargo clippy and cargo fmt clean","status":"closed","priority":2,"issue_type":"task","created_at":"2026-03-10T17:38:11.609363Z","created_by":"tayloreernisse","updated_at":"2026-03-10T19:03:29.063559Z","closed_at":"2026-03-10T19:03:29.063506Z","close_reason":"Implemented: all explain command sections (activity, open_threads, related, timeline_excerpt, human/robot renderers) merged into main explain.rs, robot_docs updated, autocorrect registered, 1046 tests passing","compaction_level":0,"original_size":0,"labels":["cli-imp","explain"],"dependencies":[{"issue_id":"bd-nj7f","depends_on_id":"bd-3q5e","type":"blocks","created_at":"2026-03-10T17:38:19.012533Z","created_by":"tayloreernisse"},{"issue_id":"bd-nj7f","depends_on_id":"bd-9lbr","type":"parent-child","created_at":"2026-03-10T17:38:11.611068Z","created_by":"tayloreernisse"},{"issue_id":"bd-nj7f","depends_on_id":"bd-wb0b","type":"blocks","created_at":"2026-03-10T17:38:18.944373Z","created_by":"tayloreernisse"},{"issue_id":"bd-nj7f","depends_on_id":"bd-wtrm","type":"blocks","created_at":"2026-03-10T17:38:18.869452Z","created_by":"tayloreernisse"}]} {"id":"bd-nu0d","title":"Implement resize storm + rapid keypress + event fuzz tests","description":"## Background\nStress tests verify the TUI handles adverse input conditions without panic: rapid terminal resizes, fast keypress sequences, and randomized event traces. The event fuzz suite uses deterministic seed replay for reproducibility.\n\n## Approach\nResize storm:\n- Send 100 resize events in rapid succession (varying sizes from 20x10 to 300x80)\n- Assert no panic, no layout corruption, final render is valid for final size\n- FrankenTUI's BOCPD resize coalescing should handle this — verify it works\n\nRapid keypress:\n- Send 50 key events in <100ms: mix of navigation, filter input, mode switches\n- Assert no panic, no stuck input mode, final state is consistent\n- Verify Ctrl+C always exits regardless of state\n\nEvent fuzz (deterministic):\n- Generate 10k randomized event traces from: key events, resize events, paste events, tick events\n- Use seeded RNG for reproducibility\n- Replay each trace, check invariants after each event:\n - Navigation stack depth >= 1 (always has at least Dashboard)\n - InputMode transitions are valid (no impossible state combinations)\n - No panic\n - LoadState transitions are valid (no Idle->Refreshing without LoadingInitial first for initial load)\n- On invariant violation: log seed + event index for reproduction\n\n## Acceptance Criteria\n- [ ] 100 rapid resizes: no panic, valid final render\n- [ ] 50 rapid keys: no stuck input mode, Ctrl+C exits\n- [ ] 10k fuzz traces: zero invariant violations\n- [ ] Fuzz tests deterministically reproducible via seed\n- [ ] Navigation invariant: stack always has at least Dashboard\n- [ ] InputMode invariant: valid transitions only\n\n## Files\n- CREATE: crates/lore-tui/tests/stress_tests.rs\n- CREATE: crates/lore-tui/tests/fuzz_tests.rs\n\n## TDD Anchor\nRED: Write test_resize_storm_no_panic that sends 100 resize events to LoreApp, asserts no panic.\nGREEN: Ensure view() handles all terminal sizes gracefully.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_resize_storm\n\n## Edge Cases\n- Zero-size terminal (0x0): must not panic, skip rendering\n- Very large terminal (500x200): must not allocate unbounded memory\n- Paste events can contain arbitrary bytes including control chars — sanitize\n- Fuzz seed must be logged at test start for reproduction\n\n## Dependency Context\nUses LoreApp from \"Implement LoreApp Model\" task.\nUses NavigationStack from \"Implement NavigationStack\" task.\nUses FakeClock for deterministic time in fuzz tests.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:04:42.012118Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.299688Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-nu0d","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"},{"issue_id":"bd-nu0d","depends_on_id":"bd-2nfs","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-nwux","title":"Epic: TUI Phase 3 — Power Features","description":"## Background\nPhase 3 adds the power-user screens: Search (3 modes with preview), Timeline (5-stage pipeline visualization), Who (5 expert/workload modes), and Command Palette (fuzzy match). These screens leverage the foundation from Phases 1-2.\n\n## Acceptance Criteria\n- [ ] Search supports lexical, hybrid, and semantic modes with split-pane preview\n- [ ] Search capability detection enables/disables modes based on available indexes\n- [ ] Timeline renders chronological event stream with color-coded event types\n- [ ] Who supports Expert, Workload, Reviews, Active, and Overlap modes\n- [ ] Command palette provides fuzzy-match access to all commands","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:00:27.375421Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.286486Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-nwux","depends_on_id":"bd-3pxe","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-o7b","title":"[CP1] gi show issue command","description":"## Background\n\nThe `gi show issue ` command displays detailed information about a single issue including metadata, description, labels, and all discussions with their notes. It provides a complete view similar to the GitLab web UI.\n\n## Approach\n\n### Module: src/cli/commands/show.rs\n\n### Clap Definition\n\n```rust\n#[derive(Args)]\npub struct ShowArgs {\n /// Entity type\n #[arg(value_parser = [\"issue\", \"mr\"])]\n pub entity: String,\n\n /// Entity IID\n pub iid: i64,\n\n /// Project path (required if ambiguous)\n #[arg(long)]\n pub project: Option,\n}\n```\n\n### Handler Function\n\n```rust\npub async fn handle_show(args: ShowArgs, conn: &Connection) -> Result<()>\n```\n\n### Logic (for entity=\"issue\")\n\n1. **Find issue**: Query by iid, optionally filtered by project\n - If multiple projects have same iid, require --project or error\n2. **Load metadata**: title, state, author, created_at, updated_at, web_url\n3. **Load labels**: JOIN through issue_labels to labels table\n4. **Load discussions**: All discussions for this issue\n5. **Load notes**: All notes for each discussion, ordered by position\n6. **Format output**: Rich display with sections\n\n### Output Format (matches PRD)\n\n```\nIssue #1234: Authentication redesign\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\nProject: group/project-one\nState: opened\nAuthor: @johndoe\nCreated: 2024-01-15\nUpdated: 2024-03-20\nLabels: enhancement, auth\nURL: https://gitlab.example.com/group/project-one/-/issues/1234\n\nDescription:\n We need to redesign the authentication flow to support...\n\nDiscussions (5):\n\n @janedoe (2024-01-16):\n I agree we should move to JWT-based auth...\n\n @johndoe (2024-01-16):\n What about refresh token strategy?\n\n @bobsmith (2024-01-17):\n Have we considered OAuth2?\n```\n\n### Queries\n\n```sql\n-- Find issue\nSELECT i.*, p.path as project_path\nFROM issues i\nJOIN projects p ON i.project_id = p.id\nWHERE i.iid = ? AND (p.path = ? OR ? IS NULL)\n\n-- Get labels\nSELECT l.name FROM labels l\nJOIN issue_labels il ON l.id = il.label_id\nWHERE il.issue_id = ?\n\n-- Get discussions with notes\nSELECT d.*, n.* FROM discussions d\nJOIN notes n ON d.id = n.discussion_id\nWHERE d.issue_id = ?\nORDER BY d.first_note_at, n.position\n```\n\n## Acceptance Criteria\n\n- [ ] Shows issue metadata (title, state, author, dates, URL)\n- [ ] Shows labels as comma-separated list\n- [ ] Shows description (truncated if very long)\n- [ ] Shows discussions grouped with notes indented\n- [ ] Handles --project filter correctly\n- [ ] Errors clearly if iid is ambiguous without --project\n\n## Files\n\n- src/cli/commands/mod.rs (add `pub mod show;`)\n- src/cli/commands/show.rs (create)\n- src/cli/mod.rs (add Show variant to Commands enum)\n\n## TDD Loop\n\nRED:\n```rust\n#[tokio::test] async fn show_issue_displays_metadata()\n#[tokio::test] async fn show_issue_displays_labels()\n#[tokio::test] async fn show_issue_displays_discussions()\n#[tokio::test] async fn show_issue_requires_project_when_ambiguous()\n```\n\nGREEN: Implement handler with queries and formatting\n\nVERIFY: `cargo test show_issue`\n\n## Edge Cases\n\n- Issue with no labels - show \"Labels: (none)\"\n- Issue with no discussions - show \"Discussions: (none)\"\n- Issue with very long description - truncate with \"...\"\n- System notes in discussions - filter out or show with [system] prefix\n- Individual notes (not threaded) - show without reply indentation","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-25T17:02:38.384702Z","created_by":"tayloreernisse","updated_at":"2026-01-25T23:05:25.688102Z","closed_at":"2026-01-25T23:05:25.688043Z","close_reason":"Implemented gi show issue command with metadata, labels, and discussions display","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-o7b","depends_on_id":"bd-208","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"},{"issue_id":"bd-o7b","depends_on_id":"bd-hbo","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} @@ -360,11 +360,11 @@ {"id":"bd-utt4","title":"Define MeArgs struct and register me subcommand","description":"## Background\nThe `lore me` command needs a CLI argument struct and handler wiring. The existing pattern is `WhoArgs` in `src/cli/mod.rs` (line ~964) and `Commands::Who(WhoArgs)` variant. The handler in `src/main.rs` follows: `Some(Commands::Who(args)) => handle_who(cli.config.as_deref(), args, robot_mode)`.\n\n## Approach\n1. Define `MeArgs` in `src/cli/mod.rs` (alongside WhoArgs):\n```rust\n#[derive(Args, Debug)]\npub struct MeArgs {\n /// Show only issues section\n #[arg(long)]\n pub issues: bool,\n /// Show only MRs section (authored + reviewing)\n #[arg(long)]\n pub mrs: bool,\n /// Show only activity feed\n #[arg(long)]\n pub activity: bool,\n /// Activity window (e.g., \"30d\", \"7d\") — default 30d\n #[arg(long, default_value = \"30d\")]\n pub since: String,\n /// Scope to one project (fuzzy match)\n #[arg(short, long)]\n pub project: Option,\n /// Override configured username\n #[arg(long)]\n pub user: Option,\n /// Show all synced projects (overrides default_project)\n #[arg(long)]\n pub all: bool,\n /// Select output fields (preset: \"minimal\", or comma-separated)\n #[arg(long, value_delimiter = ',')]\n pub fields: Option>,\n}\n```\n\n2. Add variant to Commands enum (around line 114):\n```rust\n/// Personal work dashboard — my issues, MRs, and activity\nMe(MeArgs),\n```\n\n3. Create `src/cli/commands/me/mod.rs` with stub:\n```rust\nuse crate::Config;\nuse crate::cli::MeArgs;\nuse crate::core::error::Result;\n\npub fn handle_me(\n config_override: Option<&str>,\n args: MeArgs,\n robot_mode: bool,\n) -> std::result::Result<(), Box> {\n eprintln!(\"lore me: not yet implemented\");\n std::process::exit(1);\n}\n```\n\n4. Wire in `src/main.rs` — add match arm (near line ~3164 where handle_who is):\n```rust\nSome(Commands::Me(args)) => handle_me(cli.config.as_deref(), args, robot_mode),\n```\n\n5. Add `pub mod me;` to `src/cli/commands/mod.rs`.\n\n## Acceptance Criteria\n- [ ] `MeArgs` struct defined with all flags: --issues, --mrs, --activity, --since, --project, --user, --all, --fields\n- [ ] --since has default_value \"30d\"\n- [ ] --fields uses value_delimiter=',' for comma-separated list\n- [ ] `Me(MeArgs)` variant in Commands enum\n- [ ] `src/cli/commands/me/mod.rs` exists with stub handler\n- [ ] Handler wired in main.rs match arm\n- [ ] `lore me --help` shows all flags with descriptions\n- [ ] `lore me` runs without panic (stub prints \"not yet implemented\" and exits)\n- [ ] Standard global flags (--robot/-J, --color, --icons) inherited from Cli struct\n- [ ] --project and --all are separate flags (mutual exclusivity is runtime, not clap)\n\n## Files\n- MODIFY: src/cli/mod.rs (MeArgs struct + Commands::Me variant)\n- CREATE: src/cli/commands/me/mod.rs (stub handler)\n- MODIFY: src/cli/commands/mod.rs (add `pub mod me;`)\n- MODIFY: src/main.rs (add match arm calling handle_me)\n\n## TDD Anchor\nRED: Write `test_me_args_parse` that parses `[\"lore\", \"me\", \"--issues\", \"--since\", \"7d\", \"--user\", \"jdoe\"]` via `Cli::try_parse_from` and asserts `issues=true, since=\"7d\", user=Some(\"jdoe\")`.\nGREEN: Define MeArgs struct and Commands variant.\nVERIFY: `cargo test me_args`\n\n## Edge Cases\n- `--since` default must be \"30d\" when not provided (test by parsing without --since)\n- `--fields` with no value should be None, `--fields minimal` should be Some(vec![\"minimal\"])\n- The handler signature matches the who pattern: `(config_override, args, robot_mode)`\n\n## Dependency Context\nUses resolve_username from bd-1f1f (called within the handler when fully implemented).\nPattern reference: WhoArgs at `src/cli/mod.rs:964`, handle_who at `src/main.rs:3164`.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:35:34.340060Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.046435Z","closed_at":"2026-02-20T16:09:13.046391Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-utt4","depends_on_id":"bd-1f1f","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-v6i","title":"[CP1] gi ingest --type=issues command","description":"## Background\n\nThe `gi ingest --type=issues` command is the main entry point for issue ingestion. It acquires a single-flight lock, calls the orchestrator for each configured project, and outputs progress/summary to the user.\n\n## Approach\n\n### Module: src/cli/commands/ingest.rs\n\n### Clap Definition\n\n```rust\n#[derive(Args)]\npub struct IngestArgs {\n /// Resource type to ingest\n #[arg(long, value_parser = [\"issues\", \"merge_requests\"])]\n pub r#type: String,\n\n /// Filter to single project\n #[arg(long)]\n pub project: Option,\n\n /// Override stale sync lock\n #[arg(long)]\n pub force: bool,\n}\n```\n\n### Handler Function\n\n```rust\npub async fn handle_ingest(args: IngestArgs, config: &Config) -> Result<()>\n```\n\n### Logic\n\n1. **Acquire single-flight lock**: `acquire_sync_lock(conn, args.force)?`\n2. **Get projects to sync**:\n - If `args.project` specified, filter to that one\n - Otherwise, get all configured projects from DB\n3. **For each project**:\n - Print \"Ingesting issues for {project_path}...\"\n - Call `ingest_project_issues(conn, client, config, project_id, gitlab_project_id)`\n - Print \"{N} issues fetched, {M} new labels\"\n4. **Print discussion sync summary**:\n - \"Fetching discussions ({N} issues with updates)...\"\n - \"{N} discussions, {M} notes (excluding {K} system notes)\"\n - \"Skipped discussion sync for {N} unchanged issues.\"\n5. **Release lock**: Lock auto-released when handler returns\n\n### Output Format (matches PRD)\n\n```\nIngesting issues...\n\n group/project-one: 1,234 issues fetched, 45 new labels\n\nFetching discussions (312 issues with updates)...\n\n group/project-one: 312 issues → 1,234 discussions, 5,678 notes\n\nTotal: 1,234 issues, 1,234 discussions, 5,678 notes (excluding 1,234 system notes)\nSkipped discussion sync for 922 unchanged issues.\n```\n\n## Acceptance Criteria\n\n- [ ] Clap args parse --type, --project, --force correctly\n- [ ] Single-flight lock acquired before sync starts\n- [ ] Lock error message is clear if concurrent run attempted\n- [ ] Progress output shows per-project counts\n- [ ] Summary includes unchanged issues skipped count\n- [ ] --force flag allows overriding stale lock\n\n## Files\n\n- src/cli/commands/mod.rs (add `pub mod ingest;`)\n- src/cli/commands/ingest.rs (create)\n- src/cli/mod.rs (add Ingest variant to Commands enum)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/cli_ingest_tests.rs\n#[tokio::test] async fn ingest_issues_acquires_lock()\n#[tokio::test] async fn ingest_issues_fails_on_concurrent_run()\n#[tokio::test] async fn ingest_issues_respects_project_filter()\n#[tokio::test] async fn ingest_issues_force_overrides_stale_lock()\n```\n\nGREEN: Implement handler with lock and orchestrator calls\n\nVERIFY: `cargo test cli_ingest`\n\n## Edge Cases\n\n- No projects configured - return early with helpful message\n- Project filter matches nothing - error with \"project not found\"\n- Lock already held - clear error \"Sync already in progress\"\n- Ctrl-C during sync - lock should be released (via Drop or SIGINT handler)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.312565Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:56:44.090142Z","closed_at":"2026-01-25T22:56:44.090086Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-v6i","depends_on_id":"bd-ozy","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-v6tc","title":"Description","description":"This is a test","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:52:04.745618Z","updated_at":"2026-02-12T16:52:10.755235Z","closed_at":"2026-02-12T16:52:10.755188Z","close_reason":"test artifacts","compaction_level":0,"original_size":0} -{"id":"bd-wb0b","title":"Task 3: Implement open threads, activity summary, and cross-references","description":"## Background\n\nThis task implements three data-fetching functions for the explain command: open threads (unresolved discussions), activity summary (event/note counts), and cross-references (related entities).\n\n**Parent bead:** bd-9lbr (lore explain feature)\n**Spec:** specs/SPEC_explain.md — Task 3\n**Phase:** 2 — Core Logic\n**Depends on:** Task 1 (bd-2i3z) — needs ExplainParams, ExplainResult types, run_explain skeleton, and test helpers\n\n## Function Signatures\n\n\\`\\`\\`rust\npub fn fetch_open_threads(conn: &Connection, entity_type: &str, entity_id: i64) -> Result>\n\npub fn build_activity_summary(\n conn: &Connection,\n entity_type: &str,\n entity_id: i64,\n since: Option,\n) -> Result\n\npub fn fetch_related_entities(conn: &Connection, entity_type: &str, entity_id: i64) -> Result\n\npub fn extract_description_excerpt(description: Option<&str>) -> String\n\\`\\`\\`\n\nWire into run_explain():\n\\`\\`\\`rust\nlet description_excerpt = if should_include(¶ms.sections, \"description\") {\n Some(extract_description_excerpt(description.as_deref()))\n} else { None };\n\nlet open_threads = if should_include(¶ms.sections, \"open_threads\") {\n Some(fetch_open_threads(conn, ¶ms.entity_type, entity_local_id)?)\n} else { None };\n\nlet activity = if should_include(¶ms.sections, \"activity\") {\n Some(build_activity_summary(conn, ¶ms.entity_type, entity_local_id, params.since)?)\n} else { None };\n\nlet related = if should_include(¶ms.sections, \"related\") {\n Some(fetch_related_entities(conn, ¶ms.entity_type, entity_local_id)?)\n} else { None };\n\\`\\`\\`\n\n## Data Assembly\n\n### Open Threads (fetch_open_threads)\n\n\\`\\`\\`rust\nlet id_col = if entity_type == \"issues\" { \"issue_id\" } else { \"merge_request_id\" };\n\\`\\`\\`\n\nQuery unresolved discussions:\n\\`\\`\\`sql\nSELECT d.id, d.gitlab_discussion_id, d.first_note_at, d.last_note_at\nFROM discussions d\nWHERE d.{id_col} = ?1 AND d.resolvable = 1 AND d.resolved = 0\nORDER BY d.last_note_at DESC\n\\`\\`\\`\n\nFor each discussion row, two sub-queries:\n\nFirst note author:\n\\`\\`\\`sql\nSELECT author_username FROM notes WHERE discussion_id = ?1 ORDER BY created_at ASC LIMIT 1\n\\`\\`\\`\n\nNon-system note count:\n\\`\\`\\`sql\nSELECT COUNT(*) FROM notes WHERE discussion_id = ?1 AND is_system = 0\n\\`\\`\\`\n\nIMPORTANT: discussions.id is the LOCAL SQLite row id. notes.discussion_id references this local id. discussions.gitlab_discussion_id is the GitLab string id for display output.\n\nResult: OpenThread { discussion_id: gitlab_discussion_id, started_by: first note author, started_at: ms_to_iso(first_note_at), note_count, last_note_at: ms_to_iso(last_note_at) }\n\n### Activity Summary (build_activity_summary)\n\nThree aggregate queries:\n\n\\`\\`\\`sql\n-- State events\nSELECT COUNT(*), MIN(created_at), MAX(created_at)\nFROM resource_state_events WHERE {id_col} = ?1 AND (?2 IS NULL OR created_at >= ?2)\n\n-- Label events\nSELECT COUNT(*), MIN(created_at), MAX(created_at)\nFROM resource_label_events WHERE {id_col} = ?1 AND (?2 IS NULL OR created_at >= ?2)\n\n-- Non-system notes\nSELECT COUNT(*), MIN(n.created_at), MAX(n.created_at)\nFROM notes n JOIN discussions d ON n.discussion_id = d.id\nWHERE d.{id_col} = ?1 AND n.is_system = 0 AND (?2 IS NULL OR n.created_at >= ?2)\n\\`\\`\\`\n\nCombine min/max across all three for first_event/last_event:\n\\`\\`\\`rust\nlet first_event = [state_min, label_min, note_min].iter().flatten().copied().min();\nlet last_event = [state_max, label_max, note_max].iter().flatten().copied().max();\n\\`\\`\\`\n\nResult: ActivitySummary { state_changes, label_changes, notes, first_event: first_event.map(ms_to_iso), last_event: last_event.map(ms_to_iso) }\n\n### Cross-References (fetch_related_entities)\n\n**Closing MRs** (only for issues) — copy this exact query from show/issue.rs get_closing_mrs():\n\\`\\`\\`sql\nSELECT mr.iid, mr.title, mr.state, mr.web_url\nFROM entity_references er\nJOIN merge_requests mr ON mr.id = er.source_entity_id\nWHERE er.target_entity_type = 'issue'\n AND er.target_entity_id = ?\n AND er.source_entity_type = 'merge_request'\n AND er.reference_type = 'closes'\nORDER BY mr.iid\n\\`\\`\\`\n\nNOTE: entity_references stores source=MR, target=issue for closing relationships.\n\n**Related entities** — JOIN to get iid+title:\n\\`\\`\\`sql\n-- Outgoing (this entity references others, excluding closes)\nSELECT er.target_entity_type, er.target_entity_iid, er.reference_type,\n COALESCE(i.title, mr.title) as title\nFROM entity_references er\nLEFT JOIN issues i ON er.target_entity_type = 'issue' AND i.id = er.target_entity_id\nLEFT JOIN merge_requests mr ON er.target_entity_type = 'merge_request' AND mr.id = er.target_entity_id\nWHERE er.source_entity_type = ?1 AND er.source_entity_id = ?2\n AND er.reference_type != 'closes'\n\n-- Incoming (others reference this entity, excluding closes)\nSELECT er.source_entity_type, COALESCE(i.iid, mr.iid) as iid, er.reference_type,\n COALESCE(i.title, mr.title) as title\nFROM entity_references er\nLEFT JOIN issues i ON er.source_entity_type = 'issue' AND i.id = er.source_entity_id\nLEFT JOIN merge_requests mr ON er.source_entity_type = 'merge_request' AND mr.id = er.source_entity_id\nWHERE er.target_entity_type = ?1 AND er.target_entity_id = ?2\n AND er.reference_type != 'closes'\n\\`\\`\\`\n\nSkip closing MRs section when entity_type is \"mrs\" (closing MRs only apply to issues).\n\n### Description Excerpt\n\n\\`\\`\\`rust\npub fn extract_description_excerpt(description: Option<&str>) -> String {\n match description {\n Some(d) if !d.trim().is_empty() => truncate(d, 500),\n _ => \"(no description)\".to_owned(),\n }\n}\n\\`\\`\\`\n\n## Files to Modify\n\n- src/cli/commands/explain.rs — add fetch_open_threads(), build_activity_summary(), fetch_related_entities(), extract_description_excerpt(), wire into run_explain()\n\n## DB Schema Reference\n\n- discussions: id (local PK), gitlab_discussion_id (TEXT), project_id, issue_id (FK), merge_request_id (FK), noteable_type, individual_note (0/1), first_note_at (ms), last_note_at (ms), last_seen_at, resolvable (0/1), resolved (0/1). CHECK: exactly one of issue_id/merge_request_id NOT NULL.\n- notes: id (local PK), gitlab_id, discussion_id (FK->discussions.id), project_id, note_type, is_system (0/1), author_username, body, created_at (ms), updated_at, last_seen_at\n- entity_references: source_entity_type (issue|merge_request), source_entity_id (FK), target_entity_type, target_entity_id (nullable), target_project_path, target_entity_iid, reference_type (closes|mentioned|related), source_method\n\n## TDD Workflow (Red-Green)\n\n### RED — Write these tests first, confirm they FAIL:\n\nTest helpers (reuse from Task 1; add new insert helpers as needed):\n\n\\`\\`\\`rust\nfn insert_discussion_full(conn: &Connection, issue_id: i64, gitlab_id: &str, resolvable: bool, resolved: bool) -> i64 {\n conn.execute(\n \"INSERT INTO discussions (gitlab_discussion_id, project_id, issue_id, noteable_type, individual_note, first_note_at, last_note_at, last_seen_at, resolvable, resolved) VALUES (?1, 1, ?2, 'Issue', 0, 1000000, 2000000, 2000000, ?3, ?4)\",\n rusqlite::params![gitlab_id, issue_id, resolvable as i32, resolved as i32],\n ).unwrap();\n conn.last_insert_rowid()\n}\n\\`\\`\\`\n\nTests:\n1. test_explain_open_threads: Insert 2 discussions (1 unresolved+resolvable, 1 resolved+resolvable), each with 2 notes. Assert only unresolved appears in result, with correct started_by (first note author) and note_count (non-system notes).\n2. test_explain_activity_counts: Insert 3 state events + 2 label events + 10 non-system notes (via discussion), assert activity.state_changes=3, label_changes=2, notes=10. Verify first_event and last_event bracket the correct ms range.\n3. test_explain_no_notes: Insert issue with zero notes, zero events, NULL description. Assert open_threads is empty, activity is all zeros with None timestamps, description_excerpt = \"(no description)\".\n\n### GREEN — Implement the four functions and wire into run_explain()\n\n### Verify:\ncargo test explain::tests::test_explain_open_threads && cargo test explain::tests::test_explain_activity_counts && cargo test explain::tests::test_explain_no_notes && cargo clippy --all-targets -- -D warnings\n\n## Acceptance Criteria\n\n- [ ] test_explain_open_threads passes\n- [ ] test_explain_activity_counts passes\n- [ ] test_explain_no_notes passes\n- [ ] Open threads correctly filtered (only unresolved+resolvable)\n- [ ] Activity counts accurate with correct first/last event timestamps\n- [ ] Cross-references include closing MRs with iid/title/state (for issues)\n- [ ] Empty entity handled gracefully (no panics, sensible defaults)\n- [ ] cargo clippy and cargo fmt clean","status":"open","priority":2,"issue_type":"task","created_at":"2026-03-10T17:37:35.249719Z","created_by":"tayloreernisse","updated_at":"2026-03-10T17:50:58.892461Z","compaction_level":0,"original_size":0,"labels":["cli-imp","explain"],"dependencies":[{"issue_id":"bd-wb0b","depends_on_id":"bd-2i3z","type":"blocks","created_at":"2026-03-10T17:38:18.716761Z","created_by":"tayloreernisse"},{"issue_id":"bd-wb0b","depends_on_id":"bd-9lbr","type":"parent-child","created_at":"2026-03-10T17:37:35.251885Z","created_by":"tayloreernisse"}]} +{"id":"bd-wb0b","title":"Task 3: Implement open threads, activity summary, and cross-references","description":"## Background\n\nThis task implements three data-fetching functions for the explain command: open threads (unresolved discussions), activity summary (event/note counts), and cross-references (related entities).\n\n**Parent bead:** bd-9lbr (lore explain feature)\n**Spec:** specs/SPEC_explain.md — Task 3\n**Phase:** 2 — Core Logic\n**Depends on:** Task 1 (bd-2i3z) — needs ExplainParams, ExplainResult types, run_explain skeleton, and test helpers\n\n## Function Signatures\n\n\\`\\`\\`rust\npub fn fetch_open_threads(conn: &Connection, entity_type: &str, entity_id: i64) -> Result>\n\npub fn build_activity_summary(\n conn: &Connection,\n entity_type: &str,\n entity_id: i64,\n since: Option,\n) -> Result\n\npub fn fetch_related_entities(conn: &Connection, entity_type: &str, entity_id: i64) -> Result\n\npub fn extract_description_excerpt(description: Option<&str>) -> String\n\\`\\`\\`\n\nWire into run_explain():\n\\`\\`\\`rust\nlet description_excerpt = if should_include(¶ms.sections, \"description\") {\n Some(extract_description_excerpt(description.as_deref()))\n} else { None };\n\nlet open_threads = if should_include(¶ms.sections, \"open_threads\") {\n Some(fetch_open_threads(conn, ¶ms.entity_type, entity_local_id)?)\n} else { None };\n\nlet activity = if should_include(¶ms.sections, \"activity\") {\n Some(build_activity_summary(conn, ¶ms.entity_type, entity_local_id, params.since)?)\n} else { None };\n\nlet related = if should_include(¶ms.sections, \"related\") {\n Some(fetch_related_entities(conn, ¶ms.entity_type, entity_local_id)?)\n} else { None };\n\\`\\`\\`\n\n## Data Assembly\n\n### Open Threads (fetch_open_threads)\n\n\\`\\`\\`rust\nlet id_col = if entity_type == \"issues\" { \"issue_id\" } else { \"merge_request_id\" };\n\\`\\`\\`\n\nQuery unresolved discussions:\n\\`\\`\\`sql\nSELECT d.id, d.gitlab_discussion_id, d.first_note_at, d.last_note_at\nFROM discussions d\nWHERE d.{id_col} = ?1 AND d.resolvable = 1 AND d.resolved = 0\nORDER BY d.last_note_at DESC\n\\`\\`\\`\n\nFor each discussion row, two sub-queries:\n\nFirst note author:\n\\`\\`\\`sql\nSELECT author_username FROM notes WHERE discussion_id = ?1 ORDER BY created_at ASC LIMIT 1\n\\`\\`\\`\n\nNon-system note count:\n\\`\\`\\`sql\nSELECT COUNT(*) FROM notes WHERE discussion_id = ?1 AND is_system = 0\n\\`\\`\\`\n\nIMPORTANT: discussions.id is the LOCAL SQLite row id. notes.discussion_id references this local id. discussions.gitlab_discussion_id is the GitLab string id for display output.\n\nResult: OpenThread { discussion_id: gitlab_discussion_id, started_by: first note author, started_at: ms_to_iso(first_note_at), note_count, last_note_at: ms_to_iso(last_note_at) }\n\n### Activity Summary (build_activity_summary)\n\nThree aggregate queries:\n\n\\`\\`\\`sql\n-- State events\nSELECT COUNT(*), MIN(created_at), MAX(created_at)\nFROM resource_state_events WHERE {id_col} = ?1 AND (?2 IS NULL OR created_at >= ?2)\n\n-- Label events\nSELECT COUNT(*), MIN(created_at), MAX(created_at)\nFROM resource_label_events WHERE {id_col} = ?1 AND (?2 IS NULL OR created_at >= ?2)\n\n-- Non-system notes\nSELECT COUNT(*), MIN(n.created_at), MAX(n.created_at)\nFROM notes n JOIN discussions d ON n.discussion_id = d.id\nWHERE d.{id_col} = ?1 AND n.is_system = 0 AND (?2 IS NULL OR n.created_at >= ?2)\n\\`\\`\\`\n\nCombine min/max across all three for first_event/last_event:\n\\`\\`\\`rust\nlet first_event = [state_min, label_min, note_min].iter().flatten().copied().min();\nlet last_event = [state_max, label_max, note_max].iter().flatten().copied().max();\n\\`\\`\\`\n\nResult: ActivitySummary { state_changes, label_changes, notes, first_event: first_event.map(ms_to_iso), last_event: last_event.map(ms_to_iso) }\n\n### Cross-References (fetch_related_entities)\n\n**Closing MRs** (only for issues) — copy this exact query from show/issue.rs get_closing_mrs():\n\\`\\`\\`sql\nSELECT mr.iid, mr.title, mr.state, mr.web_url\nFROM entity_references er\nJOIN merge_requests mr ON mr.id = er.source_entity_id\nWHERE er.target_entity_type = 'issue'\n AND er.target_entity_id = ?\n AND er.source_entity_type = 'merge_request'\n AND er.reference_type = 'closes'\nORDER BY mr.iid\n\\`\\`\\`\n\nNOTE: entity_references stores source=MR, target=issue for closing relationships.\n\n**Related entities** — JOIN to get iid+title:\n\\`\\`\\`sql\n-- Outgoing (this entity references others, excluding closes)\nSELECT er.target_entity_type, er.target_entity_iid, er.reference_type,\n COALESCE(i.title, mr.title) as title\nFROM entity_references er\nLEFT JOIN issues i ON er.target_entity_type = 'issue' AND i.id = er.target_entity_id\nLEFT JOIN merge_requests mr ON er.target_entity_type = 'merge_request' AND mr.id = er.target_entity_id\nWHERE er.source_entity_type = ?1 AND er.source_entity_id = ?2\n AND er.reference_type != 'closes'\n\n-- Incoming (others reference this entity, excluding closes)\nSELECT er.source_entity_type, COALESCE(i.iid, mr.iid) as iid, er.reference_type,\n COALESCE(i.title, mr.title) as title\nFROM entity_references er\nLEFT JOIN issues i ON er.source_entity_type = 'issue' AND i.id = er.source_entity_id\nLEFT JOIN merge_requests mr ON er.source_entity_type = 'merge_request' AND mr.id = er.source_entity_id\nWHERE er.target_entity_type = ?1 AND er.target_entity_id = ?2\n AND er.reference_type != 'closes'\n\\`\\`\\`\n\nSkip closing MRs section when entity_type is \"mrs\" (closing MRs only apply to issues).\n\n### Description Excerpt\n\n\\`\\`\\`rust\npub fn extract_description_excerpt(description: Option<&str>) -> String {\n match description {\n Some(d) if !d.trim().is_empty() => truncate(d, 500),\n _ => \"(no description)\".to_owned(),\n }\n}\n\\`\\`\\`\n\n## Files to Modify\n\n- src/cli/commands/explain.rs — add fetch_open_threads(), build_activity_summary(), fetch_related_entities(), extract_description_excerpt(), wire into run_explain()\n\n## DB Schema Reference\n\n- discussions: id (local PK), gitlab_discussion_id (TEXT), project_id, issue_id (FK), merge_request_id (FK), noteable_type, individual_note (0/1), first_note_at (ms), last_note_at (ms), last_seen_at, resolvable (0/1), resolved (0/1). CHECK: exactly one of issue_id/merge_request_id NOT NULL.\n- notes: id (local PK), gitlab_id, discussion_id (FK->discussions.id), project_id, note_type, is_system (0/1), author_username, body, created_at (ms), updated_at, last_seen_at\n- entity_references: source_entity_type (issue|merge_request), source_entity_id (FK), target_entity_type, target_entity_id (nullable), target_project_path, target_entity_iid, reference_type (closes|mentioned|related), source_method\n\n## TDD Workflow (Red-Green)\n\n### RED — Write these tests first, confirm they FAIL:\n\nTest helpers (reuse from Task 1; add new insert helpers as needed):\n\n\\`\\`\\`rust\nfn insert_discussion_full(conn: &Connection, issue_id: i64, gitlab_id: &str, resolvable: bool, resolved: bool) -> i64 {\n conn.execute(\n \"INSERT INTO discussions (gitlab_discussion_id, project_id, issue_id, noteable_type, individual_note, first_note_at, last_note_at, last_seen_at, resolvable, resolved) VALUES (?1, 1, ?2, 'Issue', 0, 1000000, 2000000, 2000000, ?3, ?4)\",\n rusqlite::params![gitlab_id, issue_id, resolvable as i32, resolved as i32],\n ).unwrap();\n conn.last_insert_rowid()\n}\n\\`\\`\\`\n\nTests:\n1. test_explain_open_threads: Insert 2 discussions (1 unresolved+resolvable, 1 resolved+resolvable), each with 2 notes. Assert only unresolved appears in result, with correct started_by (first note author) and note_count (non-system notes).\n2. test_explain_activity_counts: Insert 3 state events + 2 label events + 10 non-system notes (via discussion), assert activity.state_changes=3, label_changes=2, notes=10. Verify first_event and last_event bracket the correct ms range.\n3. test_explain_no_notes: Insert issue with zero notes, zero events, NULL description. Assert open_threads is empty, activity is all zeros with None timestamps, description_excerpt = \"(no description)\".\n\n### GREEN — Implement the four functions and wire into run_explain()\n\n### Verify:\ncargo test explain::tests::test_explain_open_threads && cargo test explain::tests::test_explain_activity_counts && cargo test explain::tests::test_explain_no_notes && cargo clippy --all-targets -- -D warnings\n\n## Acceptance Criteria\n\n- [ ] test_explain_open_threads passes\n- [ ] test_explain_activity_counts passes\n- [ ] test_explain_no_notes passes\n- [ ] Open threads correctly filtered (only unresolved+resolvable)\n- [ ] Activity counts accurate with correct first/last event timestamps\n- [ ] Cross-references include closing MRs with iid/title/state (for issues)\n- [ ] Empty entity handled gracefully (no panics, sensible defaults)\n- [ ] cargo clippy and cargo fmt clean","status":"closed","priority":2,"issue_type":"task","created_at":"2026-03-10T17:37:35.249719Z","created_by":"tayloreernisse","updated_at":"2026-03-10T19:03:29.054858Z","closed_at":"2026-03-10T19:03:29.054782Z","close_reason":"Implemented: all explain command sections (activity, open_threads, related, timeline_excerpt, human/robot renderers) merged into main explain.rs, robot_docs updated, autocorrect registered, 1046 tests passing","compaction_level":0,"original_size":0,"labels":["cli-imp","explain"],"dependencies":[{"issue_id":"bd-wb0b","depends_on_id":"bd-2i3z","type":"blocks","created_at":"2026-03-10T17:38:18.716761Z","created_by":"tayloreernisse"},{"issue_id":"bd-wb0b","depends_on_id":"bd-9lbr","type":"parent-child","created_at":"2026-03-10T17:37:35.251885Z","created_by":"tayloreernisse"}]} {"id":"bd-wcja","title":"Extend SyncResult with surgical mode fields for robot output","description":"## Background\n\nRobot mode (`--robot`) serializes `SyncResult` as JSON for machine consumers. Currently `SyncResult` (lines 31-52 of `src/cli/commands/sync.rs`) only has fields for normal full sync. Surgical sync needs additional metadata in the JSON response: whether surgical mode was active, which IIDs were requested, per-entity outcomes, and whether it was a preflight-only run. These must be `Option` fields so normal sync serialization is unchanged (serde `skip_serializing_if = \"Option::is_none\"`).\n\n## Approach\n\nAdd four `Option` fields to the existing `SyncResult` struct:\n\n```rust\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub surgical_mode: Option,\n\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub surgical_iids: Option,\n\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub entity_results: Option>,\n\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub preflight_only: Option,\n```\n\nDefine two new supporting structs in the same file:\n\n```rust\n#[derive(Debug, Default, Serialize)]\npub struct SurgicalIids {\n pub issues: Vec,\n pub merge_requests: Vec,\n}\n\n#[derive(Debug, Serialize)]\npub struct EntitySyncResult {\n pub entity_type: String, // \"issue\" or \"merge_request\"\n pub iid: u64,\n pub outcome: String, // \"synced\", \"skipped_toctou\", \"failed\", \"not_found\"\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub error: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub toctou_reason: Option,\n}\n```\n\nBecause `SyncResult` derives `Default`, the new `Option` fields default to `None` automatically. Non-surgical callers need zero changes.\n\n## Acceptance Criteria\n\n1. `SyncResult` compiles with all four new `Option` fields\n2. `SurgicalIids` and `EntitySyncResult` are defined with `Serialize` derive\n3. Serializing a `SyncResult` with surgical fields set produces JSON with `surgical_mode`, `surgical_iids`, `entity_results`, `preflight_only` keys\n4. Serializing a default `SyncResult` (all `None`) produces JSON identical to current output (no surgical keys)\n5. `SyncResult::default()` still works without specifying new fields\n6. All existing tests pass unchanged\n\n## Files\n\n- `src/cli/commands/sync.rs` — add fields to `SyncResult`, define `SurgicalIids` and `EntitySyncResult`\n\n## TDD Anchor\n\nAdd a test module or extend the existing one in `src/cli/commands/sync.rs` (or a new `sync_tests.rs` file):\n\n```rust\n#[cfg(test)]\nmod surgical_result_tests {\n use super::*;\n\n #[test]\n fn sync_result_default_omits_surgical_fields() {\n let result = SyncResult::default();\n let json = serde_json::to_value(&result).unwrap();\n assert!(json.get(\"surgical_mode\").is_none());\n assert!(json.get(\"surgical_iids\").is_none());\n assert!(json.get(\"entity_results\").is_none());\n assert!(json.get(\"preflight_only\").is_none());\n }\n\n #[test]\n fn sync_result_with_surgical_fields_serializes_correctly() {\n let result = SyncResult {\n surgical_mode: Some(true),\n surgical_iids: Some(SurgicalIids {\n issues: vec![7, 42],\n merge_requests: vec![10],\n }),\n entity_results: Some(vec![\n EntitySyncResult {\n entity_type: \"issue\".to_string(),\n iid: 7,\n outcome: \"synced\".to_string(),\n error: None,\n toctou_reason: None,\n },\n EntitySyncResult {\n entity_type: \"issue\".to_string(),\n iid: 42,\n outcome: \"skipped_toctou\".to_string(),\n error: None,\n toctou_reason: Some(\"updated_at changed\".to_string()),\n },\n ]),\n preflight_only: Some(false),\n ..SyncResult::default()\n };\n let json = serde_json::to_value(&result).unwrap();\n assert_eq!(json[\"surgical_mode\"], true);\n assert_eq!(json[\"surgical_iids\"][\"issues\"], serde_json::json!([7, 42]));\n assert_eq!(json[\"entity_results\"].as_array().unwrap().len(), 2);\n assert_eq!(json[\"entity_results\"][1][\"outcome\"], \"skipped_toctou\");\n assert_eq!(json[\"preflight_only\"], false);\n }\n\n #[test]\n fn entity_sync_result_omits_none_fields() {\n let entity = EntitySyncResult {\n entity_type: \"merge_request\".to_string(),\n iid: 10,\n outcome: \"synced\".to_string(),\n error: None,\n toctou_reason: None,\n };\n let json = serde_json::to_value(&entity).unwrap();\n assert!(json.get(\"error\").is_none());\n assert!(json.get(\"toctou_reason\").is_none());\n assert!(json.get(\"entity_type\").is_some());\n }\n}\n```\n\n## Edge Cases\n\n- `entity_results: Some(vec![])` — empty vec serializes as `[]`, not omitted. This is correct for \"surgical mode ran but had no entities to process.\"\n- `surgical_iids` with empty vecs — valid for edge case where user passes `--issue` but all IIDs are filtered out before sync.\n- Ensure `EntitySyncResult.outcome` uses a fixed set of string values. Consider a future enum, but `String` is fine for initial implementation to keep serialization simple.\n\n## Dependency Context\n\n- **No upstream dependencies** — this bead only adds struct fields, no behavioral changes.\n- **Downstream**: bd-1i4i (orchestrator) populates these fields. bd-3bec (wiring) passes them through.\n- The `#[derive(Default)]` on `SyncResult` means all `Option` fields are `None` by default, so this is a fully additive change.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-17T19:17:03.915330Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:03:46.649727Z","closed_at":"2026-02-18T21:03:46.649679Z","close_reason":"Completed: SyncResult extended with surgical_mode, surgical_iids, entity_results fields","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-wcja","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-wnuo","title":"Implement performance benchmark fixtures (S/M/L tiers)","description":"## Background\nTiered performance fixtures validate latency at three data scales. S and M tiers are CI-enforced gates; L tier is advisory. Fixtures are synthetic SQLite databases with realistic data distributions.\n\n## Approach\nFixture generator (benches/ or tests/fixtures/):\n- S-tier: 10k issues, 5k MRs, 50k notes, 10k docs\n- M-tier: 100k issues, 50k MRs, 500k notes, 50k docs\n- L-tier: 250k issues, 100k MRs, 1M notes, 100k docs\n- Realistic distributions: state (60% closed, 30% opened, 10% other), authors from pool of 50 names, labels from pool of 20, dates spanning 2 years\n\nBenchmarks:\n- p95 first-paint latency: Dashboard load, Issue List load, MR List load\n- p95 keyset pagination: next page fetch\n- p95 search latency: lexical and hybrid modes\n- Memory ceiling: RSS after full dashboard + list load\n- SLO assertions per tier (see Phase 0 criteria)\n\nRequired indexes must be present in fixture DBs:\n- idx_issues_list_default, idx_mrs_list_default, idx_discussions_entity, idx_notes_discussion\n\n## Acceptance Criteria\n- [ ] S-tier fixture generated with correct counts\n- [ ] M-tier fixture generated with correct counts\n- [ ] L-tier fixture generated (on-demand, not CI)\n- [ ] p95 first-paint < 50ms (S), < 75ms (M), < 150ms (L)\n- [ ] p95 keyset pagination < 50ms (S), < 75ms (M), < 100ms (L)\n- [ ] p95 search latency < 100ms (S), < 200ms (M), < 400ms (L)\n- [ ] Memory < 150MB RSS (S), < 250MB RSS (M)\n- [ ] All required indexes present in fixtures\n- [ ] EXPLAIN QUERY PLAN shows index usage for top 10 queries\n\n## Files\n- CREATE: crates/lore-tui/benches/perf_benchmarks.rs\n- CREATE: crates/lore-tui/tests/fixtures/generate_fixtures.rs\n\n## TDD Anchor\nRED: Write benchmark_dashboard_load_s_tier that generates S-tier fixture, measures Dashboard load time, asserts p95 < 50ms.\nGREEN: Implement fetch_dashboard with efficient queries.\nVERIFY: cargo bench --manifest-path crates/lore-tui/Cargo.toml\n\n## Edge Cases\n- Fixture generation must be deterministic (seeded RNG) for reproducible benchmarks\n- CI machines may be slower — use generous multipliers or relative thresholds\n- S-tier fits in memory; M-tier requires WAL mode for concurrent access\n- Benchmark warmup: discard first 5 iterations\n\n## Dependency Context\nUses all action.rs query functions from Phase 2/3 tasks.\nUses DbManager from \"Implement DbManager\" task.\nUses required index migrations from the main lore crate.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:05:12.867291Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.463811Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-wnuo","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"},{"issue_id":"bd-wnuo","depends_on_id":"bd-3eis","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-wrw1","title":"Implement CLI/TUI parity tests (counts, lists, detail, search, sanitization)","description":"## Background\nParity tests ensure the TUI and CLI show the same data. Both interfaces query the same SQLite database, but through different code paths (TUI action functions vs CLI command handlers). Drift can occur when query functions are duplicated or modified independently. These tests catch drift by running both code paths against the same in-memory DB and comparing results.\n\n## Approach\n\n### Test Strategy: Library-Level (Same Process)\nTests run in the same process with a shared in-memory SQLite DB. No binary execution, no JSON parsing, no process spawning. Both TUI action functions and CLI query functions are called as library code.\n\nSetup pattern:\n```rust\nuse lore::core::db::{create_connection, run_migrations};\nuse std::path::Path;\n\nfn setup_parity_db() -> rusqlite::Connection {\n let conn = create_connection(Path::new(\":memory:\")).unwrap();\n run_migrations(&conn).unwrap();\n insert_fixture_data(&conn); // shared fixture with known counts\n conn\n}\n```\n\n### Fixture Data\nCreate a deterministic fixture with known quantities:\n- 1 project (gitlab_project_id=1, path_with_namespace=\"group/repo\", web_url=\"https://gitlab.example.com/group/repo\")\n- 15 issues (5 opened, 5 closed, 5 with various states)\n- 10 merge_requests (3 opened, 3 merged, 2 closed, 2 draft)\n- 30 discussions (20 for issues, 10 for MRs)\n- 60 notes (2 per discussion)\n- Insert via direct SQL (same pattern as existing tests in src/core/db.rs)\n\n### Parity Checks\n\n**Dashboard Count Parity:**\n- TUI: call the dashboard fetch function that returns entity counts\n- CLI: call the same count query functions used by `lore --robot count`\n- Assert: issue_count, mr_count, discussion_count, note_count all match\n\n**Issue List Parity:**\n- TUI: call issue list action with default filter (state=all, limit=50, sort=updated_at DESC)\n- CLI: call the issue list query used by `lore --robot issues`\n- Assert: same IIDs in same order, same state values for each\n\n**MR List Parity:**\n- TUI: call MR list action with default filter\n- CLI: call the MR list query used by `lore --robot mrs`\n- Assert: same IIDs in same order, same state values, same draft flags\n\n**Issue Detail Parity:**\n- TUI: call issue detail fetch for a specific IID\n- CLI: call the issue detail query used by `lore --robot issues `\n- Assert: same metadata fields (title, state, author, labels, created_at, updated_at), same discussion count\n\n**Search Parity:**\n- TUI: call search action with a known query term\n- CLI: call the search function used by `lore --robot search`\n- Assert: same document IDs returned in same rank order\n\n**Sanitization Parity:**\n- Insert an issue with ANSI escape sequences in the title: \"Normal \\x1b[31mRED\\x1b[0m text\"\n- TUI: fetch and sanitize via terminal safety module\n- CLI: fetch and render via robot mode (which strips ANSI)\n- Assert: both produce clean output without raw escape sequences\n\n## Acceptance Criteria\n- [ ] Dashboard counts: TUI == CLI for issues, MRs, discussions, notes on shared fixture\n- [ ] Issue list: TUI returns same IIDs in same order as CLI query function\n- [ ] MR list: TUI returns same IIDs in same order as CLI query function\n- [ ] Issue detail: TUI metadata matches CLI for title, state, author, discussion count\n- [ ] Search results: same document IDs in same rank order\n- [ ] Sanitization: both strip ANSI escape sequences from issue titles\n- [ ] All tests use in-memory DB (no file I/O, no binary spawning)\n- [ ] Tests are deterministic (fixed fixture, no wall clock dependency)\n\n## Files\n- CREATE: crates/lore-tui/tests/parity_tests.rs\n\n## TDD Anchor\nRED: Write `test_dashboard_count_parity` that creates shared fixture DB, calls both TUI dashboard fetch and CLI count query functions, asserts all counts equal.\nGREEN: Ensure TUI query functions exist and match CLI query logic.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml parity\n\nAdditional tests:\n- test_issue_list_parity\n- test_mr_list_parity\n- test_issue_detail_parity\n- test_search_parity\n- test_sanitization_parity\n\n## Edge Cases\n- CLI and TUI may use different default sort orders — normalize to same ORDER BY in test setup\n- CLI list commands default to limit=50, TUI may default to page size — test with explicit limit\n- Fixture must include edge cases: NULL labels, empty descriptions, issues with work item status set\n- Schema version must match between both code paths (same migration version)\n- FTS index must be populated for search parity (call generate-docs equivalent on fixture)\n\n## Dependency Context\n- Uses TUI action functions from Phase 2/3 screen beads (must exist as library code)\n- Uses CLI query functions from src/cli/ (already exist as `lore` library exports)\n- Uses lore::core::db for shared DB setup\n- Uses terminal safety module (bd-3ir1) for sanitization comparison\n- Depends on bd-14hv (soak tests) being complete per phase ordering","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:05:51.620596Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.629958Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-wrw1","depends_on_id":"bd-14hv","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"},{"issue_id":"bd-wrw1","depends_on_id":"bd-2o49","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} -{"id":"bd-wtrm","title":"Task 2: Implement key-decisions heuristic","description":"## Background\n\nThe key-decisions heuristic is the core intelligence of lore explain. It identifies notes that explain WHY state/label changes were made by correlating events with notes by the same actor within 60 minutes.\n\n**Parent bead:** bd-9lbr (lore explain feature)\n**Spec:** specs/SPEC_explain.md — Task 2\n**Phase:** 2 — Core Logic\n**Depends on:** Task 1 (bd-2i3z) — needs ExplainParams, ExplainResult types, run_explain skeleton, and test helpers (setup_explain_db, insert_test_issue)\n\n## Function Signature\n\n\\`\\`\\`rust\npub fn extract_key_decisions(\n conn: &Connection,\n entity_type: &str, // \"issues\" or \"mrs\"\n entity_id: i64, // LOCAL SQLite row id (from find_explain_issue/mr)\n since: Option, // ms epoch threshold, or None for all time\n max_decisions: usize, // cap on returned decisions\n) -> Result>\n\\`\\`\\`\n\nWire into run_explain():\n\\`\\`\\`rust\nlet key_decisions = if should_include(¶ms.sections, \"key_decisions\") {\n Some(extract_key_decisions(conn, ¶ms.entity_type, entity_local_id, params.since, params.max_decisions)?)\n} else { None };\n\\`\\`\\`\n\n## Algorithm\n\n1. Query resource_state_events for the entity (with optional --since filter)\n2. Query resource_label_events for the entity (with optional --since filter)\n3. Query non-system notes for the entity (with optional --since filter)\n4. Merge state + label events into unified chronological list with (timestamp, actor, description)\n5. For each event, find the FIRST non-system note by the SAME actor within 60 minutes AFTER the event\n6. Pair them as a KeyDecision\n7. Cap at max_decisions\n\n## Local Structs (define in explain.rs)\n\n\\`\\`\\`rust\nstruct UnifiedEvent {\n created_at: i64, // ms epoch\n actor: String,\n description: String, // \"state: closed\" or \"label: +bugfix\"\n}\n\nstruct NoteRow {\n body: String,\n author_username: String,\n created_at: i64, // ms epoch\n}\n\\`\\`\\`\n\n## SQL Queries\n\n### id_col resolution\n\\`\\`\\`rust\nlet id_col = if entity_type == \"issues\" { \"issue_id\" } else { \"merge_request_id\" };\n\\`\\`\\`\n\n### State events\n\\`\\`\\`sql\nSELECT state, actor_username, created_at\nFROM resource_state_events\nWHERE {id_col} = ?1 AND (?2 IS NULL OR created_at >= ?2)\nORDER BY created_at ASC\n\\`\\`\\`\n\n### Label events\n\\`\\`\\`sql\nSELECT action, label_name, actor_username, created_at\nFROM resource_label_events\nWHERE {id_col} = ?1 AND (?2 IS NULL OR created_at >= ?2)\nORDER BY created_at ASC\n\\`\\`\\`\n\n### Notes for correlation\n\\`\\`\\`sql\nSELECT n.body, n.author_username, n.created_at\nFROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nWHERE d.{id_col} = ?1 AND n.is_system = 0 AND (?2 IS NULL OR n.created_at >= ?2)\nORDER BY n.created_at ASC\n\\`\\`\\`\n\nWhere {id_col} is issue_id or merge_request_id based on entity_type.\nPass since (Option) as the ?2 parameter — rusqlite handles None as SQL NULL.\n\n## CRITICAL: State column format\n\nThe resource_state_events table has a SINGLE `state` TEXT column containing only the NEW state value (e.g., \"opened\", \"closed\", \"reopened\", \"merged\"). There is NO from_state column.\n\nAction string format for state events: \"state: closed\" (just the new state).\nAction string format for label events: \"label: +bugfix\" (add) or \"label: -wontfix\" (remove).\n\nNOTE: label_name is NULLABLE (migration 012). Use unwrap_or(\"[deleted label]\").\nNOTE: actor_username on both event tables is Option. Skip events with None actor (can't correlate).\n\n## Correlation Logic\n\n\\`\\`\\`rust\nconst ONE_HOUR_MS: i64 = 60 * 60 * 1000;\n\nlet mut decisions = Vec::new();\nfor event in &events { // sorted by created_at\n // Find first note by same actor within 60min after event\n if let Some(note) = notes.iter().find(|n|\n n.author_username == event.actor\n && n.created_at >= event.created_at\n && n.created_at <= event.created_at + ONE_HOUR_MS\n ) {\n decisions.push(KeyDecision {\n timestamp: ms_to_iso(event.created_at),\n actor: event.actor.clone(),\n action: event.description.clone(),\n context_note: truncate(¬e.body, 500),\n });\n }\n}\ndecisions.truncate(max_decisions);\n\\`\\`\\`\n\nUse crate::core::time::ms_to_iso() for timestamp conversion.\nUse crate::cli::render::truncate() for context_note truncation.\n\n## Files to Modify\n\n- src/cli/commands/explain.rs — add extract_key_decisions() function with local structs, wire into run_explain()\n\n## TDD Workflow (Red-Green)\n\n### RED — Write these tests first, confirm they FAIL:\n\nTest data insertion helpers (add to explain.rs test module, reuse setup_explain_db and insert_test_issue from Task 1):\n\n\\`\\`\\`rust\nfn insert_state_event(conn: &Connection, issue_id: i64, state: &str, actor: &str, created_at: i64) {\n conn.execute(\n \"INSERT INTO resource_state_events (gitlab_id, project_id, issue_id, state, actor_username, created_at) VALUES (?1, 1, ?2, ?3, ?4, ?5)\",\n rusqlite::params![created_at, issue_id, state, actor, created_at],\n ).unwrap();\n}\n\nfn insert_label_event(conn: &Connection, issue_id: i64, action: &str, label: &str, actor: &str, created_at: i64) {\n conn.execute(\n \"INSERT INTO resource_label_events (gitlab_id, project_id, issue_id, action, label_name, actor_username, created_at) VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6)\",\n rusqlite::params![created_at, issue_id, action, label, actor, created_at],\n ).unwrap();\n}\n\nfn insert_note(conn: &Connection, discussion_id: i64, author: &str, body: &str, created_at: i64) {\n conn.execute(\n \"INSERT INTO notes (gitlab_id, discussion_id, project_id, is_system, author_username, body, created_at, updated_at, last_seen_at) VALUES (?1, ?2, 1, 0, ?3, ?4, ?5, ?5, ?5)\",\n rusqlite::params![created_at, discussion_id, author, body, created_at],\n ).unwrap();\n}\n\nfn insert_discussion(conn: &Connection, issue_id: i64) -> i64 {\n conn.execute(\n \"INSERT INTO discussions (gitlab_discussion_id, project_id, issue_id, noteable_type, individual_note, first_note_at, last_note_at, last_seen_at, resolvable, resolved) VALUES ('disc-1', 1, ?1, 'Issue', 0, 0, 0, 0, 0, 0)\",\n [issue_id],\n ).unwrap();\n conn.last_insert_rowid()\n}\n\\`\\`\\`\n\nTests:\n1. test_explain_key_decision_heuristic: Insert state change event at T=1000000, insert note by SAME author at T+30min (T+1800000), call extract_key_decisions(), assert 1 decision with action containing \"state:\" and non-empty context_note\n2. test_explain_key_decision_ignores_unrelated_notes: Insert state change by \"alice\", insert note by \"bob\" at T+30min, assert 0 decisions\n3. test_explain_key_decision_label_event: Insert label add event (\"add\", \"bugfix\") + correlated note by same author, assert decision.action starts with \"label: +\"\n4. test_explain_max_decisions: Insert 5 correlated event+note pairs, call with max_decisions: 3, assert exactly 3 decisions returned\n5. test_explain_since_scopes_events: Insert event at T-60d and event at T-10d, call with since: Some(T-30d epoch), assert only recent event appears\n\n### GREEN — Implement extract_key_decisions() as described above\n\n### Verify:\ncargo test explain::tests::test_explain_key_decision && cargo clippy --all-targets -- -D warnings\n\n## Acceptance Criteria\n\n- [ ] test_explain_key_decision_heuristic passes\n- [ ] test_explain_key_decision_ignores_unrelated_notes passes\n- [ ] test_explain_key_decision_label_event passes\n- [ ] test_explain_max_decisions passes\n- [ ] test_explain_since_scopes_events passes\n- [ ] Heuristic correctly correlates events with explanatory notes\n- [ ] --max-decisions and --since respected\n- [ ] cargo clippy and cargo fmt clean","status":"open","priority":2,"issue_type":"task","created_at":"2026-03-10T17:37:15.975293Z","created_by":"tayloreernisse","updated_at":"2026-03-10T17:50:17.890556Z","compaction_level":0,"original_size":0,"labels":["cli-imp","explain"],"dependencies":[{"issue_id":"bd-wtrm","depends_on_id":"bd-2i3z","type":"blocks","created_at":"2026-03-10T17:38:18.638294Z","created_by":"tayloreernisse"},{"issue_id":"bd-wtrm","depends_on_id":"bd-9lbr","type":"parent-child","created_at":"2026-03-10T17:37:15.976973Z","created_by":"tayloreernisse"}]} +{"id":"bd-wtrm","title":"Task 2: Implement key-decisions heuristic","description":"## Background\n\nThe key-decisions heuristic is the core intelligence of lore explain. It identifies notes that explain WHY state/label changes were made by correlating events with notes by the same actor within 60 minutes.\n\n**Parent bead:** bd-9lbr (lore explain feature)\n**Spec:** specs/SPEC_explain.md — Task 2\n**Phase:** 2 — Core Logic\n**Depends on:** Task 1 (bd-2i3z) — needs ExplainParams, ExplainResult types, run_explain skeleton, and test helpers (setup_explain_db, insert_test_issue)\n\n## Function Signature\n\n\\`\\`\\`rust\npub fn extract_key_decisions(\n conn: &Connection,\n entity_type: &str, // \"issues\" or \"mrs\"\n entity_id: i64, // LOCAL SQLite row id (from find_explain_issue/mr)\n since: Option, // ms epoch threshold, or None for all time\n max_decisions: usize, // cap on returned decisions\n) -> Result>\n\\`\\`\\`\n\nWire into run_explain():\n\\`\\`\\`rust\nlet key_decisions = if should_include(¶ms.sections, \"key_decisions\") {\n Some(extract_key_decisions(conn, ¶ms.entity_type, entity_local_id, params.since, params.max_decisions)?)\n} else { None };\n\\`\\`\\`\n\n## Algorithm\n\n1. Query resource_state_events for the entity (with optional --since filter)\n2. Query resource_label_events for the entity (with optional --since filter)\n3. Query non-system notes for the entity (with optional --since filter)\n4. Merge state + label events into unified chronological list with (timestamp, actor, description)\n5. For each event, find the FIRST non-system note by the SAME actor within 60 minutes AFTER the event\n6. Pair them as a KeyDecision\n7. Cap at max_decisions\n\n## Local Structs (define in explain.rs)\n\n\\`\\`\\`rust\nstruct UnifiedEvent {\n created_at: i64, // ms epoch\n actor: String,\n description: String, // \"state: closed\" or \"label: +bugfix\"\n}\n\nstruct NoteRow {\n body: String,\n author_username: String,\n created_at: i64, // ms epoch\n}\n\\`\\`\\`\n\n## SQL Queries\n\n### id_col resolution\n\\`\\`\\`rust\nlet id_col = if entity_type == \"issues\" { \"issue_id\" } else { \"merge_request_id\" };\n\\`\\`\\`\n\n### State events\n\\`\\`\\`sql\nSELECT state, actor_username, created_at\nFROM resource_state_events\nWHERE {id_col} = ?1 AND (?2 IS NULL OR created_at >= ?2)\nORDER BY created_at ASC\n\\`\\`\\`\n\n### Label events\n\\`\\`\\`sql\nSELECT action, label_name, actor_username, created_at\nFROM resource_label_events\nWHERE {id_col} = ?1 AND (?2 IS NULL OR created_at >= ?2)\nORDER BY created_at ASC\n\\`\\`\\`\n\n### Notes for correlation\n\\`\\`\\`sql\nSELECT n.body, n.author_username, n.created_at\nFROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nWHERE d.{id_col} = ?1 AND n.is_system = 0 AND (?2 IS NULL OR n.created_at >= ?2)\nORDER BY n.created_at ASC\n\\`\\`\\`\n\nWhere {id_col} is issue_id or merge_request_id based on entity_type.\nPass since (Option) as the ?2 parameter — rusqlite handles None as SQL NULL.\n\n## CRITICAL: State column format\n\nThe resource_state_events table has a SINGLE `state` TEXT column containing only the NEW state value (e.g., \"opened\", \"closed\", \"reopened\", \"merged\"). There is NO from_state column.\n\nAction string format for state events: \"state: closed\" (just the new state).\nAction string format for label events: \"label: +bugfix\" (add) or \"label: -wontfix\" (remove).\n\nNOTE: label_name is NULLABLE (migration 012). Use unwrap_or(\"[deleted label]\").\nNOTE: actor_username on both event tables is Option. Skip events with None actor (can't correlate).\n\n## Correlation Logic\n\n\\`\\`\\`rust\nconst ONE_HOUR_MS: i64 = 60 * 60 * 1000;\n\nlet mut decisions = Vec::new();\nfor event in &events { // sorted by created_at\n // Find first note by same actor within 60min after event\n if let Some(note) = notes.iter().find(|n|\n n.author_username == event.actor\n && n.created_at >= event.created_at\n && n.created_at <= event.created_at + ONE_HOUR_MS\n ) {\n decisions.push(KeyDecision {\n timestamp: ms_to_iso(event.created_at),\n actor: event.actor.clone(),\n action: event.description.clone(),\n context_note: truncate(¬e.body, 500),\n });\n }\n}\ndecisions.truncate(max_decisions);\n\\`\\`\\`\n\nUse crate::core::time::ms_to_iso() for timestamp conversion.\nUse crate::cli::render::truncate() for context_note truncation.\n\n## Files to Modify\n\n- src/cli/commands/explain.rs — add extract_key_decisions() function with local structs, wire into run_explain()\n\n## TDD Workflow (Red-Green)\n\n### RED — Write these tests first, confirm they FAIL:\n\nTest data insertion helpers (add to explain.rs test module, reuse setup_explain_db and insert_test_issue from Task 1):\n\n\\`\\`\\`rust\nfn insert_state_event(conn: &Connection, issue_id: i64, state: &str, actor: &str, created_at: i64) {\n conn.execute(\n \"INSERT INTO resource_state_events (gitlab_id, project_id, issue_id, state, actor_username, created_at) VALUES (?1, 1, ?2, ?3, ?4, ?5)\",\n rusqlite::params![created_at, issue_id, state, actor, created_at],\n ).unwrap();\n}\n\nfn insert_label_event(conn: &Connection, issue_id: i64, action: &str, label: &str, actor: &str, created_at: i64) {\n conn.execute(\n \"INSERT INTO resource_label_events (gitlab_id, project_id, issue_id, action, label_name, actor_username, created_at) VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6)\",\n rusqlite::params![created_at, issue_id, action, label, actor, created_at],\n ).unwrap();\n}\n\nfn insert_note(conn: &Connection, discussion_id: i64, author: &str, body: &str, created_at: i64) {\n conn.execute(\n \"INSERT INTO notes (gitlab_id, discussion_id, project_id, is_system, author_username, body, created_at, updated_at, last_seen_at) VALUES (?1, ?2, 1, 0, ?3, ?4, ?5, ?5, ?5)\",\n rusqlite::params![created_at, discussion_id, author, body, created_at],\n ).unwrap();\n}\n\nfn insert_discussion(conn: &Connection, issue_id: i64) -> i64 {\n conn.execute(\n \"INSERT INTO discussions (gitlab_discussion_id, project_id, issue_id, noteable_type, individual_note, first_note_at, last_note_at, last_seen_at, resolvable, resolved) VALUES ('disc-1', 1, ?1, 'Issue', 0, 0, 0, 0, 0, 0)\",\n [issue_id],\n ).unwrap();\n conn.last_insert_rowid()\n}\n\\`\\`\\`\n\nTests:\n1. test_explain_key_decision_heuristic: Insert state change event at T=1000000, insert note by SAME author at T+30min (T+1800000), call extract_key_decisions(), assert 1 decision with action containing \"state:\" and non-empty context_note\n2. test_explain_key_decision_ignores_unrelated_notes: Insert state change by \"alice\", insert note by \"bob\" at T+30min, assert 0 decisions\n3. test_explain_key_decision_label_event: Insert label add event (\"add\", \"bugfix\") + correlated note by same author, assert decision.action starts with \"label: +\"\n4. test_explain_max_decisions: Insert 5 correlated event+note pairs, call with max_decisions: 3, assert exactly 3 decisions returned\n5. test_explain_since_scopes_events: Insert event at T-60d and event at T-10d, call with since: Some(T-30d epoch), assert only recent event appears\n\n### GREEN — Implement extract_key_decisions() as described above\n\n### Verify:\ncargo test explain::tests::test_explain_key_decision && cargo clippy --all-targets -- -D warnings\n\n## Acceptance Criteria\n\n- [ ] test_explain_key_decision_heuristic passes\n- [ ] test_explain_key_decision_ignores_unrelated_notes passes\n- [ ] test_explain_key_decision_label_event passes\n- [ ] test_explain_max_decisions passes\n- [ ] test_explain_since_scopes_events passes\n- [ ] Heuristic correctly correlates events with explanatory notes\n- [ ] --max-decisions and --since respected\n- [ ] cargo clippy and cargo fmt clean","status":"closed","priority":2,"issue_type":"task","created_at":"2026-03-10T17:37:15.975293Z","created_by":"tayloreernisse","updated_at":"2026-03-10T18:37:44.492583Z","closed_at":"2026-03-10T18:37:44.492531Z","close_reason":"Key-decisions heuristic implemented with state/label event correlation, 5 tests passing","compaction_level":0,"original_size":0,"labels":["cli-imp","explain"],"dependencies":[{"issue_id":"bd-wtrm","depends_on_id":"bd-2i3z","type":"blocks","created_at":"2026-03-10T17:38:18.638294Z","created_by":"tayloreernisse"},{"issue_id":"bd-wtrm","depends_on_id":"bd-9lbr","type":"parent-child","created_at":"2026-03-10T17:37:15.976973Z","created_by":"tayloreernisse"}]} {"id":"bd-wzqi","title":"Implement Command Palette (state + view)","description":"## Background\nThe Command Palette is a modal overlay (Ctrl+P) that provides fuzzy-match access to all commands. It uses FrankenTUI's built-in CommandPalette widget and is populated from the CommandRegistry.\n\n## Approach\nState (state/command_palette.rs):\n- CommandPaletteState: wraps ftui CommandPalette widget state\n- input (String), filtered_commands (Vec), selected_index (usize), visible (bool)\n\nView (view/command_palette.rs):\n- Modal overlay centered on screen (60% width, 50% height)\n- Text input at top for fuzzy search\n- Scrollable list of matching commands with keybinding hints\n- Enter executes selected command, Esc closes palette\n- Fuzzy matching: subsequence match on command label and help text\n\nIntegration:\n- Ctrl+P from any screen opens palette (handled in interpret_key stage 2)\n- execute_palette_action() in app.rs converts selected command to Msg\n\n## Acceptance Criteria\n- [ ] Ctrl+P opens palette from any screen in Normal mode\n- [ ] Fuzzy matching filters commands as user types\n- [ ] Commands show label + keybinding + help text\n- [ ] Enter executes selected command\n- [ ] Esc closes palette without action\n- [ ] Palette populated from CommandRegistry (single source of truth)\n- [ ] Modal renders on top of current screen content\n\n## Files\n- MODIFY: crates/lore-tui/src/state/command_palette.rs (expand from stub)\n- CREATE: crates/lore-tui/src/view/command_palette.rs\n\n## TDD Anchor\nRED: Write test_palette_fuzzy_match that creates registry with 5 commands, filters with \"iss\", asserts Issue-related commands match.\nGREEN: Implement fuzzy matching on command labels.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_palette_fuzzy\n\n## Edge Cases\n- Empty search shows all commands\n- Very long command labels: truncate with ellipsis\n- Command not available on current screen: show but gray out\n- Palette should not steal focus from text inputs — only opens in Normal mode\n\n## Dependency Context\nUses CommandRegistry from \"Implement CommandRegistry\" task.\nUses ftui CommandPalette widget from FrankenTUI.\nUses InputMode::Palette from \"Implement core types\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:37.250065Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.175286Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-wzqi","depends_on_id":"bd-35g5","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"},{"issue_id":"bd-wzqi","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} {"id":"bd-x8oq","title":"Write surgical_tests.rs with TDD test suite","description":"## Background\n\nThe surgical sync module (`src/ingestion/surgical.rs` from bd-3sez) needs a comprehensive test suite. Tests use in-memory SQLite (no real GitLab or Ollama) and wiremock for HTTP mocks. The test file lives at `src/ingestion/surgical_tests.rs` and is included via `#[cfg(test)] #[path = \"surgical_tests.rs\"] mod tests;` in surgical.rs.\n\nKey testing constraints:\n- In-memory DB pattern: `create_connection(Path::new(\":memory:\"))` + `run_migrations(&conn)`\n- Test project insert: `INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url)` (no `name`/`last_seen_at` columns)\n- `GitLabIssue` required fields: `id`, `iid`, `project_id`, `title`, `state`, `created_at`, `updated_at`, `author`, `web_url`\n- `GitLabMergeRequest` adds: `source_branch`, `target_branch`, `draft`, `merge_status`, `reviewers`\n- `updated_at` is `String` (ISO 8601) in GitLab types, e.g. `\"2026-02-17T12:00:00.000+00:00\"`\n- `SourceType` enum variants: `Issue`, `MergeRequest`, `Discussion`, `Note`\n- `dirty_sources` table: `(source_type TEXT, source_id INTEGER)` primary key\n\n## Approach\n\nCreate `src/ingestion/surgical_tests.rs` with:\n\n### Test Helpers\n- `setup_db() -> Connection` — in-memory DB with migrations + test project row\n- `make_test_issue(iid: i64, updated_at: &str) -> GitLabIssue` — minimal valid JSON fixture\n- `make_test_mr(iid: i64, updated_at: &str) -> GitLabMergeRequest` — minimal valid JSON fixture\n- `get_db_updated_at(conn, table, iid) -> Option` — helper to query DB updated_at for assertions\n- `get_dirty_keys(conn) -> Vec<(String, i64)>` — query dirty_sources for assertions\n\n### Sync Tests (13)\n1. `test_ingest_issue_by_iid_upserts_and_marks_dirty` — fresh issue ingest, verify DB row + dirty_sources entry\n2. `test_ingest_mr_by_iid_upserts_and_marks_dirty` — fresh MR ingest, verify DB row + dirty_sources entry\n3. `test_toctou_skips_stale_issue` — insert issue at T1, call ingest with payload at T1, assert skipped_stale=true and no dirty mark\n4. `test_toctou_skips_stale_mr` — same for MRs\n5. `test_toctou_allows_newer_issue` — DB has T1, payload has T2 (T2 > T1), assert upserted=true\n6. `test_toctou_allows_newer_mr` — same for MRs\n7. `test_is_stale_parses_iso8601` — unit test: `\"2026-02-17T12:00:00.000+00:00\"` parses to correct ms-epoch\n8. `test_is_stale_handles_none_db_value` — first ingest, no DB row, assert not stale\n9. `test_is_stale_with_z_suffix` — `\"2026-02-17T12:00:00Z\"` also parses correctly\n10. `test_ingest_issue_returns_dirty_source_keys` — verify `dirty_source_keys` contains `(SourceType::Issue, local_id)`\n11. `test_ingest_mr_returns_dirty_source_keys` — verify MR dirty source keys\n12. `test_ingest_issue_updates_existing` — ingest same IID twice with newer updated_at, verify update\n13. `test_ingest_mr_updates_existing` — same for MRs\n\n### Async Preflight Test (1, wiremock)\n14. `test_preflight_fetch_returns_issues_and_mrs` — wiremock GET `/projects/:id/issues?iids[]=42` returns 200 with fixture, verify PreflightResult.issues has 1 entry\n\n### Integration Stubs (4, for bd-3jqx)\n15. `test_surgical_cancellation_during_preflight` — stub: signal.cancel() before preflight, verify early return\n16. `test_surgical_timeout_during_fetch` — stub: wiremock delay exceeds timeout\n17. `test_surgical_embed_isolation` — stub: verify only surgical docs get embedded\n18. `test_surgical_payload_integrity` — stub: verify ingested data matches GitLab payload exactly\n\n## Acceptance Criteria\n\n- [ ] All 13 sync tests pass with in-memory SQLite\n- [ ] Async preflight test passes with wiremock\n- [ ] 4 integration stubs compile and are marked `#[ignore]` (implemented in bd-3jqx)\n- [ ] Test helpers produce valid GitLabIssue/GitLabMergeRequest fixtures that pass `transform_issue`/`transform_merge_request`\n- [ ] No flaky tests: deterministic timestamps, no real network calls\n- [ ] File wired into surgical.rs via `#[cfg(test)] #[path = \"surgical_tests.rs\"] mod tests;`\n\n## Files\n\n- `src/ingestion/surgical_tests.rs` (NEW)\n- `src/ingestion/surgical.rs` (add `#[cfg(test)]` module path — created in bd-3sez)\n\n## TDD Anchor\n\nThis bead IS the test suite. Tests are written first (TDD red phase), then bd-3sez implements the production code to make them pass (green phase). Specific test signatures:\n\n```rust\n#[test]\nfn test_ingest_issue_by_iid_upserts_and_marks_dirty() {\n let conn = setup_db();\n let issue = make_test_issue(42, \"2026-02-17T12:00:00.000+00:00\");\n let config = Config::default();\n let result = ingest_issue_by_iid(&conn, &config, /*project_id=*/1, &issue).unwrap();\n assert!(result.upserted);\n assert!(!result.skipped_stale);\n let dirty = get_dirty_keys(&conn);\n assert!(dirty.contains(&(\"issue\".to_string(), /*local_id from DB*/)));\n}\n\n#[test]\nfn test_toctou_skips_stale_issue() {\n let conn = setup_db();\n let issue = make_test_issue(42, \"2026-02-17T12:00:00.000+00:00\");\n ingest_issue_by_iid(&conn, &Config::default(), 1, &issue).unwrap();\n // Ingest same timestamp again\n let result = ingest_issue_by_iid(&conn, &Config::default(), 1, &issue).unwrap();\n assert!(result.skipped_stale);\n}\n\n#[tokio::test]\nasync fn test_preflight_fetch_returns_issues_and_mrs() {\n let mock = MockServer::start().await;\n // ... wiremock setup ...\n}\n```\n\n## Edge Cases\n\n- `make_test_issue` must produce all required fields (`id`, `iid`, `project_id`, `title`, `state`, `created_at`, `updated_at`, `author` with `username` and `id`, `web_url`) or `transform_issue` will fail\n- `make_test_mr` additionally needs `source_branch`, `target_branch`, `draft`, `merge_status`, `reviewers`\n- ISO 8601 fixtures must use `+00:00` suffix (GitLab format), not `Z`\n- Integration stubs must be `#[ignore]` so they do not fail CI before bd-3jqx implements them\n- Test DB needs `run_migrations` to create all tables including `dirty_sources`, `documents`, `issues`, `merge_requests`\n\n## Dependency Context\n\n- **Blocked by bd-3sez**: Cannot compile tests until surgical.rs module exists (circular co-dependency — develop together)\n- **Blocks bd-3jqx**: Integration test stubs are implemented in that bead\n- **No other blockers**: Uses only in-memory DB and wiremock, no external dependencies","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-17T19:15:05.498388Z","created_by":"tayloreernisse","updated_at":"2026-02-18T19:25:57.434371Z","closed_at":"2026-02-18T19:25:57.434313Z","close_reason":"Merged into bd-3sez: tests belong with the code they test, not in a separate bead. TDD is the default workflow, not a separate deliverable.","compaction_level":0,"original_size":0,"labels":["surgical-sync"]} {"id":"bd-xhz","title":"[CP1] GitLab client pagination methods","description":"## Background\n\nGitLab pagination methods enable fetching large result sets (issues, discussions) as async streams. The client uses `x-next-page` headers to determine continuation and applies cursor rewind for tuple-based incremental sync.\n\n## Approach\n\nAdd pagination methods to GitLabClient using `async-stream` crate:\n\n### Methods to Add\n\n```rust\nimpl GitLabClient {\n /// Paginate through issues for a project.\n pub fn paginate_issues(\n &self,\n gitlab_project_id: i64,\n updated_after: Option, // ms epoch cursor\n cursor_rewind_seconds: u32,\n ) -> Pin> + Send + '_>>\n\n /// Paginate through discussions for an issue.\n pub fn paginate_issue_discussions(\n &self,\n gitlab_project_id: i64,\n issue_iid: i64,\n ) -> Pin> + Send + '_>>\n\n /// Make request and return response with headers for pagination.\n async fn request_with_headers(\n &self,\n path: &str,\n params: &[(&str, String)],\n ) -> Result<(T, HeaderMap)>\n}\n```\n\n### Pagination Logic\n\n1. Start at page 1, per_page=100\n2. For issues: add scope=all, state=all, order_by=updated_at, sort=asc\n3. Apply cursor rewind: `updated_after = cursor - rewind_seconds` (clamped to 0)\n4. Yield each item from response\n5. Check `x-next-page` header for continuation\n6. Stop when header is empty/absent OR response is empty\n\n### Cursor Rewind\n\n```rust\nif let Some(ts) = updated_after {\n let rewind_ms = (cursor_rewind_seconds as i64) * 1000;\n let rewound = (ts - rewind_ms).max(0); // Clamp to avoid underflow\n // Convert to ISO 8601 for updated_after param\n}\n```\n\n## Acceptance Criteria\n\n- [ ] `paginate_issues` returns Stream of GitLabIssue\n- [ ] `paginate_issues` adds scope=all, state=all, order_by=updated_at, sort=asc\n- [ ] `paginate_issues` applies cursor rewind with max(0) clamping\n- [ ] `paginate_issue_discussions` returns Stream of GitLabDiscussion\n- [ ] Both methods follow x-next-page header until empty\n- [ ] Both methods stop on empty response (fallback)\n- [ ] `request_with_headers` returns (T, HeaderMap) tuple\n\n## Files\n\n- src/gitlab/client.rs (edit - add methods)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/pagination_tests.rs\n#[tokio::test] async fn fetches_all_pages_when_multiple_exist()\n#[tokio::test] async fn respects_per_page_parameter()\n#[tokio::test] async fn follows_x_next_page_header_until_empty()\n#[tokio::test] async fn falls_back_to_empty_page_stop_if_headers_missing()\n#[tokio::test] async fn applies_cursor_rewind_for_tuple_semantics()\n#[tokio::test] async fn clamps_negative_rewind_to_zero()\n```\n\nGREEN: Implement pagination methods with async-stream\n\nVERIFY: `cargo test pagination`\n\n## Edge Cases\n\n- cursor_updated_at near zero - rewind must not underflow (use max(0))\n- GitLab returns empty x-next-page - treat as end of pages\n- GitLab omits pagination headers entirely - use empty response as stop condition\n- DateTime conversion fails - omit updated_after and fetch all (safe fallback)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.222168Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:28:39.192876Z","closed_at":"2026-01-25T22:28:39.192815Z","close_reason":"Implemented paginate_issues and paginate_issue_discussions with async-stream, cursor rewind with max(0) clamping, x-next-page header following, 4 unit tests passing","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-xhz","depends_on_id":"bd-1np","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"},{"issue_id":"bd-xhz","depends_on_id":"bd-2ys","type":"blocks","created_at":"2026-03-04T20:02:52Z","created_by":"import"}]} diff --git a/.beads/last-touched b/.beads/last-touched index d7dd706..220e7cb 100644 --- a/.beads/last-touched +++ b/.beads/last-touched @@ -1 +1 @@ -bd-2i3z +bd-9lbr diff --git a/src/app/robot_docs.rs b/src/app/robot_docs.rs index 1153e5c..52788c9 100644 --- a/src/app/robot_docs.rs +++ b/src/app/robot_docs.rs @@ -316,6 +316,17 @@ fn handle_robot_docs(robot_mode: bool, brief: bool) -> Result<(), Box", "", "-p/--project ", "--sections ", "--no-timeline", "--max-decisions ", "--since "], + "valid_sections": ["entity", "description", "key_decisions", "activity", "open_threads", "related", "timeline"], + "example": "lore --robot explain issues 42 --sections key_decisions,activity --since 30d", + "response_schema": { + "ok": "bool", + "data": {"entity": "{type:string, iid:int, title:string, state:string, author:string, assignees:[string], labels:[string], created_at:string, updated_at:string, url:string?, status_name:string?}", "description_excerpt": "string?", "key_decisions": "[{timestamp:string, actor:string, action:string, context_note:string}]?", "activity": "{state_changes:int, label_changes:int, notes:int, first_event:string?, last_event:string?}?", "open_threads": "[{discussion_id:string, started_by:string, started_at:string, note_count:int, last_note_at:string}]?", "related": "{closing_mrs:[{iid:int, title:string, state:string, web_url:string?}], related_issues:[{entity_type:string, iid:int, title:string?, reference_type:string}]}?", "timeline_excerpt": "[{timestamp:string, event_type:string, actor:string?, summary:string}]?"}, + "meta": {"elapsed_ms": "int"} + } + }, "notes": { "description": "List notes from discussions with rich filtering", "flags": ["--limit/-n ", "--author/-a ", "--note-type ", "--contains ", "--for-issue ", "--for-mr ", "-p/--project ", "--since ", "--until ", "--path ", "--resolution ", "--sort ", "--asc", "--include-system", "--note-id ", "--gitlab-note-id ", "--discussion-id ", "--fields ", "--open"], diff --git a/src/cli/autocorrect.rs b/src/cli/autocorrect.rs index ba4920a..14c8192 100644 --- a/src/cli/autocorrect.rs +++ b/src/cli/autocorrect.rs @@ -209,6 +209,16 @@ const COMMAND_FLAGS: &[(&str, &[&str])] = &[ ], ), ("drift", &["--threshold", "--project"]), + ( + "explain", + &[ + "--project", + "--sections", + "--no-timeline", + "--max-decisions", + "--since", + ], + ), ( "notes", &[ @@ -388,6 +398,7 @@ const CANONICAL_SUBCOMMANDS: &[&str] = &[ "file-history", "trace", "drift", + "explain", "related", "cron", "token", diff --git a/src/cli/commands/explain.rs b/src/cli/commands/explain.rs new file mode 100644 index 0000000..04172f2 --- /dev/null +++ b/src/cli/commands/explain.rs @@ -0,0 +1,1946 @@ +use rusqlite::Connection; +use serde::Serialize; + +use crate::core::error::{LoreError, Result}; +use crate::core::project::resolve_project; +use crate::core::time::ms_to_iso; +use crate::timeline::collect::collect_events; +use crate::timeline::seed::seed_timeline_direct; + +// --------------------------------------------------------------------------- +// Types +// --------------------------------------------------------------------------- + +/// Parameters controlling explain behavior. +pub struct ExplainParams { + pub entity_type: String, + pub iid: i64, + pub project: Option, + pub sections: Option>, + pub no_timeline: bool, + pub max_decisions: usize, + pub since: Option, +} + +#[derive(Debug, Serialize)] +pub struct ExplainResult { + pub entity: EntitySummary, + #[serde(skip_serializing_if = "Option::is_none")] + pub description_excerpt: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub key_decisions: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub activity: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub open_threads: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub related: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub timeline_excerpt: Option>, +} + +#[derive(Debug, Serialize)] +pub struct EntitySummary { + #[serde(rename = "type")] + pub entity_type: String, + pub iid: i64, + pub title: String, + pub state: String, + pub author: String, + pub assignees: Vec, + pub labels: Vec, + pub created_at: String, + pub updated_at: String, + pub url: Option, + pub status_name: Option, +} + +#[derive(Debug, Serialize)] +pub struct KeyDecision { + pub timestamp: String, + pub actor: String, + pub action: String, + pub context_note: String, +} + +#[derive(Debug, Serialize)] +pub struct ActivitySummary { + pub state_changes: usize, + pub label_changes: usize, + pub notes: usize, + pub first_event: Option, + pub last_event: Option, +} + +#[derive(Debug, Serialize)] +pub struct OpenThread { + pub discussion_id: String, + pub started_by: String, + pub started_at: String, + pub note_count: usize, + pub last_note_at: String, +} + +#[derive(Debug, Serialize)] +pub struct RelatedEntities { + pub closing_mrs: Vec, + pub related_issues: Vec, +} + +#[derive(Debug, Serialize)] +pub struct ClosingMrInfo { + pub iid: i64, + pub title: String, + pub state: String, + pub web_url: Option, +} + +#[derive(Debug, Serialize)] +pub struct RelatedEntityInfo { + pub entity_type: String, + pub iid: i64, + pub title: Option, + pub reference_type: String, +} + +#[derive(Debug, Serialize)] +pub struct TimelineEventSummary { + pub timestamp: String, + pub event_type: String, + pub actor: Option, + pub summary: String, +} + +// --------------------------------------------------------------------------- +// Section filtering helper +// --------------------------------------------------------------------------- + +fn should_include(sections: &Option>, name: &str) -> bool { + sections + .as_ref() + .is_none_or(|s| s.iter().any(|sec| sec == name)) +} + +// --------------------------------------------------------------------------- +// Entity resolution (copied from show/ patterns — private there) +// --------------------------------------------------------------------------- + +struct ExplainIssueRow { + id: i64, + iid: i64, + title: String, + state: String, + author_username: String, + created_at: i64, + updated_at: i64, + web_url: Option, + project_path: String, + status_name: Option, +} + +struct ExplainMrRow { + id: i64, + iid: i64, + title: String, + state: String, + author_username: String, + created_at: i64, + updated_at: i64, + web_url: Option, + project_path: String, +} + +fn find_explain_issue( + conn: &Connection, + iid: i64, + project_filter: Option<&str>, +) -> Result<(EntitySummary, i64, String)> { + let (sql, params): (&str, Vec>) = match project_filter { + Some(project) => { + let project_id = resolve_project(conn, project)?; + ( + "SELECT i.id, i.iid, i.title, i.state, i.author_username, + i.created_at, i.updated_at, i.web_url, p.path_with_namespace, + i.status_name + FROM issues i + JOIN projects p ON i.project_id = p.id + WHERE i.iid = ? AND i.project_id = ?", + vec![Box::new(iid), Box::new(project_id)], + ) + } + None => ( + "SELECT i.id, i.iid, i.title, i.state, i.author_username, + i.created_at, i.updated_at, i.web_url, p.path_with_namespace, + i.status_name + FROM issues i + JOIN projects p ON i.project_id = p.id + WHERE i.iid = ?", + vec![Box::new(iid)], + ), + }; + + let param_refs: Vec<&dyn rusqlite::ToSql> = params.iter().map(|p| p.as_ref()).collect(); + let mut stmt = conn.prepare(sql)?; + let rows: Vec = stmt + .query_map(param_refs.as_slice(), |row| { + Ok(ExplainIssueRow { + id: row.get(0)?, + iid: row.get(1)?, + title: row.get(2)?, + state: row.get(3)?, + author_username: row.get(4)?, + created_at: row.get(5)?, + updated_at: row.get(6)?, + web_url: row.get(7)?, + project_path: row.get(8)?, + status_name: row.get(9)?, + }) + })? + .collect::, _>>()?; + + match rows.len() { + 0 => Err(LoreError::NotFound(format!("Issue #{iid} not found"))), + 1 => { + let r = rows.into_iter().next().unwrap(); + let local_id = r.id; + let project_path = r.project_path.clone(); + let labels = get_issue_labels(conn, r.id)?; + let assignees = get_issue_assignees(conn, r.id)?; + let summary = EntitySummary { + entity_type: "issue".to_string(), + iid: r.iid, + title: r.title, + state: r.state, + author: r.author_username, + assignees, + labels, + created_at: ms_to_iso(r.created_at), + updated_at: ms_to_iso(r.updated_at), + url: r.web_url, + status_name: r.status_name, + }; + Ok((summary, local_id, project_path)) + } + _ => { + let projects: Vec = rows.iter().map(|r| r.project_path.clone()).collect(); + Err(LoreError::Ambiguous(format!( + "Issue #{iid} exists in multiple projects: {}. Use --project to specify.", + projects.join(", ") + ))) + } + } +} + +fn find_explain_mr( + conn: &Connection, + iid: i64, + project_filter: Option<&str>, +) -> Result<(EntitySummary, i64, String)> { + let (sql, params): (&str, Vec>) = match project_filter { + Some(project) => { + let project_id = resolve_project(conn, project)?; + ( + "SELECT m.id, m.iid, m.title, m.state, m.author_username, + m.created_at, m.updated_at, m.web_url, p.path_with_namespace + FROM merge_requests m + JOIN projects p ON m.project_id = p.id + WHERE m.iid = ? AND m.project_id = ?", + vec![Box::new(iid), Box::new(project_id)], + ) + } + None => ( + "SELECT m.id, m.iid, m.title, m.state, m.author_username, + m.created_at, m.updated_at, m.web_url, p.path_with_namespace + FROM merge_requests m + JOIN projects p ON m.project_id = p.id + WHERE m.iid = ?", + vec![Box::new(iid)], + ), + }; + + let param_refs: Vec<&dyn rusqlite::ToSql> = params.iter().map(|p| p.as_ref()).collect(); + let mut stmt = conn.prepare(sql)?; + let rows: Vec = stmt + .query_map(param_refs.as_slice(), |row| { + Ok(ExplainMrRow { + id: row.get(0)?, + iid: row.get(1)?, + title: row.get(2)?, + state: row.get(3)?, + author_username: row.get(4)?, + created_at: row.get(5)?, + updated_at: row.get(6)?, + web_url: row.get(7)?, + project_path: row.get(8)?, + }) + })? + .collect::, _>>()?; + + match rows.len() { + 0 => Err(LoreError::NotFound(format!("MR !{iid} not found"))), + 1 => { + let r = rows.into_iter().next().unwrap(); + let local_id = r.id; + let project_path = r.project_path.clone(); + let labels = get_mr_labels(conn, r.id)?; + let assignees = get_mr_assignees(conn, r.id)?; + let summary = EntitySummary { + entity_type: "merge_request".to_string(), + iid: r.iid, + title: r.title, + state: r.state, + author: r.author_username, + assignees, + labels, + created_at: ms_to_iso(r.created_at), + updated_at: ms_to_iso(r.updated_at), + url: r.web_url, + status_name: None, + }; + Ok((summary, local_id, project_path)) + } + _ => { + let projects: Vec = rows.iter().map(|r| r.project_path.clone()).collect(); + Err(LoreError::Ambiguous(format!( + "MR !{iid} exists in multiple projects: {}. Use --project to specify.", + projects.join(", ") + ))) + } + } +} + +fn get_issue_labels(conn: &Connection, issue_id: i64) -> Result> { + let mut stmt = conn.prepare( + "SELECT l.name FROM labels l + JOIN issue_labels il ON l.id = il.label_id + WHERE il.issue_id = ? + ORDER BY l.name", + )?; + let labels: Vec = stmt + .query_map([issue_id], |row| row.get(0))? + .collect::, _>>()?; + Ok(labels) +} + +fn get_issue_assignees(conn: &Connection, issue_id: i64) -> Result> { + let mut stmt = conn.prepare( + "SELECT username FROM issue_assignees + WHERE issue_id = ? + ORDER BY username", + )?; + let assignees: Vec = stmt + .query_map([issue_id], |row| row.get(0))? + .collect::, _>>()?; + Ok(assignees) +} + +fn get_mr_labels(conn: &Connection, mr_id: i64) -> Result> { + let mut stmt = conn.prepare( + "SELECT l.name FROM labels l + JOIN mr_labels ml ON l.id = ml.label_id + WHERE ml.merge_request_id = ? + ORDER BY l.name", + )?; + let labels: Vec = stmt + .query_map([mr_id], |row| row.get(0))? + .collect::, _>>()?; + Ok(labels) +} + +fn get_mr_assignees(conn: &Connection, mr_id: i64) -> Result> { + let mut stmt = conn.prepare( + "SELECT username FROM mr_assignees + WHERE merge_request_id = ? + ORDER BY username", + )?; + let assignees: Vec = stmt + .query_map([mr_id], |row| row.get(0))? + .collect::, _>>()?; + Ok(assignees) +} + +// --------------------------------------------------------------------------- +// Description excerpt helper +// --------------------------------------------------------------------------- + +fn truncate_description(desc: Option<&str>, max_len: usize) -> String { + match desc { + None | Some("") => "(no description)".to_string(), + Some(s) => { + if s.len() <= max_len { + s.to_string() + } else { + let boundary = s.floor_char_boundary(max_len); + format!("{}...", &s[..boundary]) + } + } + } +} + +// --------------------------------------------------------------------------- +// Core: run_explain +// --------------------------------------------------------------------------- + +pub fn run_explain(conn: &Connection, params: &ExplainParams) -> Result { + let project_filter = params.project.as_deref(); + + let (entity_summary, entity_local_id, _project_path, description) = + if params.entity_type == "issues" { + let (summary, local_id, path) = find_explain_issue(conn, params.iid, project_filter)?; + let desc = get_issue_description(conn, local_id)?; + (summary, local_id, path, desc) + } else { + let (summary, local_id, path) = find_explain_mr(conn, params.iid, project_filter)?; + let desc = get_mr_description(conn, local_id)?; + (summary, local_id, path, desc) + }; + + let description_excerpt = if should_include(¶ms.sections, "description") { + Some(truncate_description(description.as_deref(), 500)) + } else { + None + }; + + let key_decisions = if should_include(¶ms.sections, "key_decisions") { + Some(extract_key_decisions( + conn, + ¶ms.entity_type, + entity_local_id, + params.since, + params.max_decisions, + )?) + } else { + None + }; + + let activity = if should_include(¶ms.sections, "activity") { + Some(build_activity_summary( + conn, + ¶ms.entity_type, + entity_local_id, + params.since, + )?) + } else { + None + }; + + let open_threads = if should_include(¶ms.sections, "open_threads") { + Some(fetch_open_threads( + conn, + ¶ms.entity_type, + entity_local_id, + )?) + } else { + None + }; + + let related = if should_include(¶ms.sections, "related") { + Some(fetch_related_entities( + conn, + ¶ms.entity_type, + entity_local_id, + )?) + } else { + None + }; + + let timeline_excerpt = if !params.no_timeline && should_include(¶ms.sections, "timeline") { + build_timeline_excerpt_from_pipeline(conn, &entity_summary, params) + } else { + None + }; + + Ok(ExplainResult { + entity: entity_summary, + description_excerpt, + key_decisions, + activity, + open_threads, + related, + timeline_excerpt, + }) +} + +fn get_issue_description(conn: &Connection, issue_id: i64) -> Result> { + let desc: Option = conn.query_row( + "SELECT description FROM issues WHERE id = ?", + [issue_id], + |row| row.get(0), + )?; + Ok(desc) +} + +fn get_mr_description(conn: &Connection, mr_id: i64) -> Result> { + let desc: Option = conn.query_row( + "SELECT description FROM merge_requests WHERE id = ?", + [mr_id], + |row| row.get(0), + )?; + Ok(desc) +} + +// --------------------------------------------------------------------------- +// Key-decisions heuristic (Task 2) +// --------------------------------------------------------------------------- + +struct UnifiedEvent { + created_at: i64, + actor: String, + description: String, +} + +struct NoteRow { + body: String, + author: String, + created_at: i64, +} + +/// 60 minutes in milliseconds — the correlation window for matching +/// a non-system note to a preceding state/label event by the same actor. +const DECISION_WINDOW_MS: i64 = 60 * 60 * 1000; + +/// Maximum length (in bytes, snapped to a char boundary) for the +/// `context_note` field in a `KeyDecision`. +const NOTE_TRUNCATE_LEN: usize = 500; + +fn truncate_note(text: &str, max_len: usize) -> String { + if text.len() <= max_len { + text.to_string() + } else { + let boundary = text.floor_char_boundary(max_len); + format!("{}...", &text[..boundary]) + } +} + +fn id_column_for(entity_type: &str) -> &'static str { + if entity_type == "issues" { + "issue_id" + } else { + "merge_request_id" + } +} + +fn query_state_events( + conn: &Connection, + entity_type: &str, + entity_id: i64, + since: Option, +) -> Result> { + let id_col = id_column_for(entity_type); + let sql = format!( + "SELECT state, actor_username, created_at \ + FROM resource_state_events \ + WHERE {id_col} = ?1 AND (?2 IS NULL OR created_at >= ?2) \ + ORDER BY created_at" + ); + let mut stmt = conn.prepare(&sql)?; + let rows = stmt + .query_map(rusqlite::params![entity_id, since], |row| { + let state: String = row.get(0)?; + let actor: Option = row.get(1)?; + let created_at: i64 = row.get(2)?; + Ok(UnifiedEvent { + created_at, + actor: actor.unwrap_or_default(), + description: format!("state: {state}"), + }) + })? + .collect::, _>>()?; + Ok(rows) +} + +fn query_label_events( + conn: &Connection, + entity_type: &str, + entity_id: i64, + since: Option, +) -> Result> { + let id_col = id_column_for(entity_type); + let sql = format!( + "SELECT action, label_name, actor_username, created_at \ + FROM resource_label_events \ + WHERE {id_col} = ?1 AND (?2 IS NULL OR created_at >= ?2) \ + ORDER BY created_at" + ); + let mut stmt = conn.prepare(&sql)?; + let rows = stmt + .query_map(rusqlite::params![entity_id, since], |row| { + let action: String = row.get(0)?; + let label_name: Option = row.get(1)?; + let actor: Option = row.get(2)?; + let created_at: i64 = row.get(3)?; + let prefix = if action == "add" { "+" } else { "-" }; + let label = label_name.unwrap_or_else(|| "(unknown)".to_string()); + Ok(UnifiedEvent { + created_at, + actor: actor.unwrap_or_default(), + description: format!("label: {prefix}{label}"), + }) + })? + .collect::, _>>()?; + Ok(rows) +} + +fn query_non_system_notes( + conn: &Connection, + entity_type: &str, + entity_id: i64, + since: Option, +) -> Result> { + let id_col = id_column_for(entity_type); + let sql = format!( + "SELECT n.body, n.author_username, n.created_at \ + FROM notes n \ + JOIN discussions d ON n.discussion_id = d.id \ + WHERE d.{id_col} = ?1 AND n.is_system = 0 \ + AND (?2 IS NULL OR n.created_at >= ?2) \ + ORDER BY n.created_at" + ); + let mut stmt = conn.prepare(&sql)?; + let rows = stmt + .query_map(rusqlite::params![entity_id, since], |row| { + Ok(NoteRow { + body: row.get::<_, Option>(0)?.unwrap_or_default(), + author: row.get::<_, Option>(1)?.unwrap_or_default(), + created_at: row.get(2)?, + }) + })? + .collect::, _>>()?; + Ok(rows) +} + +/// Extract key decisions by correlating state/label events with +/// explanatory notes by the same actor within a 60-minute window. +pub fn extract_key_decisions( + conn: &Connection, + entity_type: &str, + entity_id: i64, + since: Option, + max_decisions: usize, +) -> Result> { + let mut events = query_state_events(conn, entity_type, entity_id, since)?; + let mut label_events = query_label_events(conn, entity_type, entity_id, since)?; + events.append(&mut label_events); + events.sort_by_key(|e| e.created_at); + + let notes = query_non_system_notes(conn, entity_type, entity_id, since)?; + + let mut decisions = Vec::new(); + + for event in &events { + if decisions.len() >= max_decisions { + break; + } + // Find the FIRST non-system note by the SAME actor within 60 minutes AFTER the event + let matching_note = notes.iter().find(|n| { + n.author == event.actor + && n.created_at >= event.created_at + && n.created_at <= event.created_at + DECISION_WINDOW_MS + }); + if let Some(note) = matching_note { + decisions.push(KeyDecision { + timestamp: ms_to_iso(event.created_at), + actor: event.actor.clone(), + action: event.description.clone(), + context_note: truncate_note(¬e.body, NOTE_TRUNCATE_LEN), + }); + } + } + + Ok(decisions) +} + +// --------------------------------------------------------------------------- +// Activity summary (Task 3) +// --------------------------------------------------------------------------- + +fn build_activity_summary( + conn: &Connection, + entity_type: &str, + entity_id: i64, + since: Option, +) -> Result { + let id_col = id_column_for(entity_type); + + let state_sql = format!( + "SELECT COUNT(*), MIN(created_at), MAX(created_at) \ + FROM resource_state_events \ + WHERE {id_col} = ?1 AND (?2 IS NULL OR created_at >= ?2)" + ); + let (state_count, state_min, state_max): (i64, Option, Option) = + conn.query_row(&state_sql, rusqlite::params![entity_id, since], |row| { + Ok((row.get(0)?, row.get(1)?, row.get(2)?)) + })?; + let state_changes = state_count as usize; + + let label_sql = format!( + "SELECT COUNT(*), MIN(created_at), MAX(created_at) \ + FROM resource_label_events \ + WHERE {id_col} = ?1 AND (?2 IS NULL OR created_at >= ?2)" + ); + let (label_count, label_min, label_max): (i64, Option, Option) = + conn.query_row(&label_sql, rusqlite::params![entity_id, since], |row| { + Ok((row.get(0)?, row.get(1)?, row.get(2)?)) + })?; + let label_changes = label_count as usize; + + let notes_sql = format!( + "SELECT COUNT(*), MIN(n.created_at), MAX(n.created_at) \ + FROM notes n \ + JOIN discussions d ON n.discussion_id = d.id \ + WHERE d.{id_col} = ?1 AND n.is_system = 0 \ + AND (?2 IS NULL OR n.created_at >= ?2)" + ); + let (notes_count, note_min, note_max): (i64, Option, Option) = + conn.query_row(¬es_sql, rusqlite::params![entity_id, since], |row| { + Ok((row.get(0)?, row.get(1)?, row.get(2)?)) + })?; + let notes = notes_count as usize; + + let first_event = [state_min, label_min, note_min] + .iter() + .copied() + .flatten() + .min(); + let last_event = [state_max, label_max, note_max] + .iter() + .copied() + .flatten() + .max(); + + Ok(ActivitySummary { + state_changes, + label_changes, + notes, + first_event: first_event.map(ms_to_iso), + last_event: last_event.map(ms_to_iso), + }) +} + +// --------------------------------------------------------------------------- +// Open threads (Task 3) +// --------------------------------------------------------------------------- + +fn fetch_open_threads( + conn: &Connection, + entity_type: &str, + entity_id: i64, +) -> Result> { + let id_col = id_column_for(entity_type); + + let sql = format!( + "SELECT d.id, d.gitlab_discussion_id, d.first_note_at, d.last_note_at \ + FROM discussions d \ + WHERE d.{id_col} = ?1 \ + AND d.resolvable = 1 \ + AND d.resolved = 0 \ + ORDER BY d.last_note_at DESC" + ); + + let mut stmt = conn.prepare(&sql)?; + let rows: Vec<(i64, String, i64, i64)> = stmt + .query_map([entity_id], |row| { + Ok((row.get(0)?, row.get(1)?, row.get(2)?, row.get(3)?)) + })? + .collect::, _>>()?; + + let mut threads = Vec::with_capacity(rows.len()); + + for (local_id, gitlab_discussion_id, first_note_at, last_note_at) in rows { + let started_by: String = conn + .query_row( + "SELECT author_username FROM notes \ + WHERE discussion_id = ?1 \ + ORDER BY created_at ASC LIMIT 1", + [local_id], + |row| row.get(0), + ) + .unwrap_or_else(|_| "unknown".to_owned()); + + let note_count_i64: i64 = conn.query_row( + "SELECT COUNT(*) FROM notes \ + WHERE discussion_id = ?1 AND is_system = 0", + [local_id], + |row| row.get(0), + )?; + let note_count = note_count_i64 as usize; + + threads.push(OpenThread { + discussion_id: gitlab_discussion_id, + started_by, + started_at: ms_to_iso(first_note_at), + note_count, + last_note_at: ms_to_iso(last_note_at), + }); + } + + Ok(threads) +} + +// --------------------------------------------------------------------------- +// Related entities (Task 3) +// --------------------------------------------------------------------------- + +/// Maps plural entity_type to the entity_references column value. +fn ref_entity_type(entity_type: &str) -> &str { + match entity_type { + "issues" => "issue", + "mrs" => "merge_request", + _ => entity_type, + } +} + +fn fetch_related_entities( + conn: &Connection, + entity_type: &str, + entity_id: i64, +) -> Result { + let ref_type = ref_entity_type(entity_type); + + // Closing MRs (only for issues) + let closing_mrs = if entity_type == "issues" { + let mut stmt = conn.prepare( + "SELECT mr.iid, mr.title, mr.state, mr.web_url \ + FROM entity_references er \ + JOIN merge_requests mr ON mr.id = er.source_entity_id \ + WHERE er.target_entity_type = 'issue' \ + AND er.target_entity_id = ?1 \ + AND er.source_entity_type = 'merge_request' \ + AND er.reference_type = 'closes' \ + ORDER BY mr.iid", + )?; + + stmt.query_map([entity_id], |row| { + Ok(ClosingMrInfo { + iid: row.get(0)?, + title: row.get(1)?, + state: row.get(2)?, + web_url: row.get(3)?, + }) + })? + .collect::, _>>()? + } else { + vec![] + }; + + // Outgoing references (excluding closes, shown above) + let mut out_stmt = conn.prepare( + "SELECT er.target_entity_type, er.target_entity_iid, er.reference_type, \ + COALESCE(i.title, mr.title) as title \ + FROM entity_references er \ + LEFT JOIN issues i ON er.target_entity_type = 'issue' AND i.id = er.target_entity_id \ + LEFT JOIN merge_requests mr ON er.target_entity_type = 'merge_request' AND mr.id = er.target_entity_id \ + WHERE er.source_entity_type = ?1 AND er.source_entity_id = ?2 \ + AND er.reference_type != 'closes'", + )?; + + let outgoing: Vec = out_stmt + .query_map(rusqlite::params![ref_type, entity_id], |row| { + Ok(RelatedEntityInfo { + entity_type: row.get(0)?, + iid: row.get(1)?, + reference_type: row.get(2)?, + title: row.get(3)?, + }) + })? + .collect::, _>>()?; + + // Incoming references (excluding closes) + let mut in_stmt = conn.prepare( + "SELECT er.source_entity_type, COALESCE(i.iid, mr.iid) as iid, er.reference_type, \ + COALESCE(i.title, mr.title) as title \ + FROM entity_references er \ + LEFT JOIN issues i ON er.source_entity_type = 'issue' AND i.id = er.source_entity_id \ + LEFT JOIN merge_requests mr ON er.source_entity_type = 'merge_request' AND mr.id = er.source_entity_id \ + WHERE er.target_entity_type = ?1 AND er.target_entity_id = ?2 \ + AND er.reference_type != 'closes'", + )?; + + let incoming: Vec = in_stmt + .query_map(rusqlite::params![ref_type, entity_id], |row| { + Ok(RelatedEntityInfo { + entity_type: row.get(0)?, + iid: row.get(1)?, + reference_type: row.get(2)?, + title: row.get(3)?, + }) + })? + .collect::, _>>()?; + + let mut related_issues = outgoing; + related_issues.extend(incoming); + + Ok(RelatedEntities { + closing_mrs, + related_issues, + }) +} + +// --------------------------------------------------------------------------- +// Timeline excerpt (Task 4) +// --------------------------------------------------------------------------- + +/// Maximum events in the timeline excerpt. +const MAX_TIMELINE_EVENTS: usize = 20; + +/// Build a timeline excerpt by calling `seed_timeline_direct` + `collect_events`. +/// Returns `None` on pipeline errors (timeline is supplementary, not critical). +fn build_timeline_excerpt_from_pipeline( + conn: &Connection, + entity: &EntitySummary, + params: &ExplainParams, +) -> Option> { + let timeline_entity_type = match entity.entity_type.as_str() { + "issue" => "issue", + "merge_request" => "merge_request", + _ => return Some(vec![]), + }; + + let project_id = params + .project + .as_deref() + .and_then(|p| resolve_project(conn, p).ok()); + + let seed_result = match seed_timeline_direct(conn, timeline_entity_type, params.iid, project_id) + { + Ok(result) => result, + Err(_) => return Some(vec![]), + }; + + let (mut events, _total) = match collect_events( + conn, + &seed_result.seed_entities, + &[], + &seed_result.evidence_notes, + &seed_result.matched_discussions, + params.since, + MAX_TIMELINE_EVENTS, + ) { + Ok(result) => result, + Err(_) => return Some(vec![]), + }; + + events.truncate(MAX_TIMELINE_EVENTS); + + let summaries = events + .iter() + .map(|e| TimelineEventSummary { + timestamp: ms_to_iso(e.timestamp), + event_type: timeline_event_type_label(&e.event_type), + actor: e.actor.clone(), + summary: e.summary.clone(), + }) + .collect(); + + Some(summaries) +} + +fn timeline_event_type_label(event_type: &crate::timeline::TimelineEventType) -> String { + use crate::timeline::TimelineEventType; + match event_type { + TimelineEventType::Created => "created".to_string(), + TimelineEventType::StateChanged { state } => format!("state_changed:{state}"), + TimelineEventType::LabelAdded { label } => format!("label_added:{label}"), + TimelineEventType::LabelRemoved { label } => format!("label_removed:{label}"), + TimelineEventType::MilestoneSet { milestone } => format!("milestone_set:{milestone}"), + TimelineEventType::MilestoneRemoved { milestone } => { + format!("milestone_removed:{milestone}") + } + TimelineEventType::Merged => "merged".to_string(), + TimelineEventType::NoteEvidence { .. } => "note_evidence".to_string(), + TimelineEventType::DiscussionThread { .. } => "discussion_thread".to_string(), + TimelineEventType::CrossReferenced { .. } => "cross_referenced".to_string(), + } +} + +// --------------------------------------------------------------------------- +// Handler (called from main.rs) +// --------------------------------------------------------------------------- + +#[allow(clippy::too_many_arguments)] +pub fn handle_explain( + config_override: Option<&str>, + entity_type: &str, + iid: i64, + project: Option<&str>, + sections: Option>, + no_timeline: bool, + max_decisions: usize, + since: Option<&str>, + robot_mode: bool, +) -> std::result::Result<(), Box> { + let start = std::time::Instant::now(); + + // Normalize singular forms + let entity_type = match entity_type { + "issue" => "issues", + "mr" => "mrs", + other => other, + }; + + // Validate sections + const VALID_SECTIONS: &[&str] = &[ + "entity", + "description", + "key_decisions", + "activity", + "open_threads", + "related", + "timeline", + ]; + if let Some(ref secs) = sections { + for s in secs { + if !VALID_SECTIONS.contains(&s.as_str()) { + return Err(Box::new(LoreError::Other(format!( + "Invalid section '{s}'. Valid: {}", + VALID_SECTIONS.join(", ") + )))); + } + } + } + + // Parse --since + let since_ms = since.and_then(crate::core::time::parse_since); + + let config = crate::Config::load(config_override)?; + let db_path = crate::core::paths::get_db_path(config.storage.db_path.as_deref()); + let conn = crate::core::db::create_connection(&db_path)?; + + let effective_project = config.effective_project(project); + + let params = ExplainParams { + entity_type: entity_type.to_string(), + iid, + project: effective_project.map(String::from), + sections, + no_timeline, + max_decisions, + since: since_ms, + }; + + let result = run_explain(&conn, ¶ms)?; + let elapsed_ms = start.elapsed().as_millis() as u64; + + if robot_mode { + print_explain_json(&result, elapsed_ms); + } else { + print_explain(&result); + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Output rendering (Task 5 fills these in fully) +// --------------------------------------------------------------------------- + +pub fn print_explain_json(result: &ExplainResult, elapsed_ms: u64) { + let response = serde_json::json!({ + "ok": true, + "data": result, + "meta": { "elapsed_ms": elapsed_ms } + }); + println!("{}", serde_json::to_string(&response).unwrap_or_default()); +} + +pub fn print_explain(result: &ExplainResult) { + use crate::cli::render::{Icons, Theme}; + + // Entity header + let type_label = match result.entity.entity_type.as_str() { + "issue" => "Issue", + "merge_request" => "MR", + _ => &result.entity.entity_type, + }; + println!( + "{} {} #{} — {}", + Icons::info(), + Theme::bold().render(type_label), + result.entity.iid, + Theme::bold().render(&result.entity.title) + ); + println!( + " State: {} Author: {} Created: {}", + result.entity.state, result.entity.author, result.entity.created_at + ); + if !result.entity.assignees.is_empty() { + println!(" Assignees: {}", result.entity.assignees.join(", ")); + } + if !result.entity.labels.is_empty() { + println!(" Labels: {}", result.entity.labels.join(", ")); + } + if let Some(ref url) = result.entity.url { + println!(" URL: {url}"); + } + + // Description + if let Some(ref desc) = result.description_excerpt { + println!("\n{}", Theme::bold().render("Description")); + for line in desc.lines() { + println!(" {line}"); + } + } + + // Key decisions + if let Some(ref decisions) = result.key_decisions + && !decisions.is_empty() + { + println!( + "\n{} {}", + Icons::info(), + Theme::bold().render("Key Decisions") + ); + for d in decisions { + println!( + " {} {} — {}", + Theme::muted().render(&d.timestamp), + Theme::bold().render(&d.actor), + d.action, + ); + for line in d.context_note.lines() { + println!(" {line}"); + } + } + } + + // Activity + if let Some(ref act) = result.activity { + println!("\n{}", Theme::bold().render("Activity")); + println!( + " {} state changes, {} label changes, {} notes", + act.state_changes, act.label_changes, act.notes + ); + if let Some(ref first) = act.first_event { + println!(" First event: {first}"); + } + if let Some(ref last) = act.last_event { + println!(" Last event: {last}"); + } + } + + // Open threads + if let Some(ref threads) = result.open_threads + && !threads.is_empty() + { + println!( + "\n{} {} ({})", + Icons::warning(), + Theme::bold().render("Open Threads"), + threads.len() + ); + for t in threads { + println!( + " {} by {} ({} notes, last: {})", + t.discussion_id, t.started_by, t.note_count, t.last_note_at + ); + } + } + + // Related + if let Some(ref related) = result.related + && (!related.closing_mrs.is_empty() || !related.related_issues.is_empty()) + { + println!("\n{}", Theme::bold().render("Related")); + for mr in &related.closing_mrs { + println!( + " {} MR !{} — {} [{}]", + Icons::success(), + mr.iid, + mr.title, + mr.state + ); + } + for ri in &related.related_issues { + println!( + " {} {} #{} — {} ({})", + Icons::info(), + ri.entity_type, + ri.iid, + ri.title.as_deref().unwrap_or("(untitled)"), + ri.reference_type + ); + } + } + + // Timeline excerpt + if let Some(ref events) = result.timeline_excerpt + && !events.is_empty() + { + println!( + "\n{} {} ({} events)", + Icons::info(), + Theme::bold().render("Timeline"), + events.len() + ); + for e in events { + let actor_str = e.actor.as_deref().unwrap_or(""); + println!( + " {} {} {} {}", + Theme::muted().render(&e.timestamp), + e.event_type, + actor_str, + e.summary + ); + } + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + fn setup_explain_db() -> (Connection, i64) { + let conn = crate::core::db::create_connection(std::path::Path::new(":memory:")).unwrap(); + crate::core::db::run_migrations(&conn).unwrap(); + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url) \ + VALUES (100, 'test/project', 'https://gitlab.example.com/test/project')", + [], + ) + .unwrap(); + let project_id = conn.last_insert_rowid(); + (conn, project_id) + } + + fn insert_test_issue(conn: &Connection, project_id: i64, iid: i64, desc: Option<&str>) -> i64 { + conn.execute( + "INSERT INTO issues (gitlab_id, iid, project_id, title, state, author_username, \ + created_at, updated_at, last_seen_at, description) \ + VALUES (?1, ?2, ?3, 'Test Issue', 'opened', 'testuser', \ + 1704067200000, 1704153600000, 1704153600000, ?4)", + rusqlite::params![iid * 10, iid, project_id, desc], + ) + .unwrap(); + conn.last_insert_rowid() + } + + fn insert_test_mr(conn: &Connection, project_id: i64, iid: i64) -> i64 { + conn.execute( + "INSERT INTO merge_requests (gitlab_id, iid, project_id, title, state, draft, \ + author_username, source_branch, target_branch, created_at, updated_at, \ + merged_at, last_seen_at) \ + VALUES (?1, ?2, ?3, 'Test MR', 'merged', 0, 'testuser', 'feat', 'main', \ + 1704067200000, 1704153600000, 1704240000000, 1704153600000)", + rusqlite::params![iid * 10, iid, project_id], + ) + .unwrap(); + conn.last_insert_rowid() + } + + fn insert_test_discussion( + conn: &Connection, + project_id: i64, + issue_id: Option, + mr_id: Option, + gitlab_discussion_id: &str, + ) -> i64 { + conn.execute( + "INSERT INTO discussions (gitlab_discussion_id, project_id, noteable_type, issue_id, \ + merge_request_id, resolvable, resolved, first_note_at, last_note_at, last_seen_at) \ + VALUES (?1, ?2, ?3, ?4, ?5, 0, 0, 1704067200000, 1704153600000, 1704153600000)", + rusqlite::params![ + gitlab_discussion_id, + project_id, + if issue_id.is_some() { + "Issue" + } else { + "MergeRequest" + }, + issue_id, + mr_id, + ], + ) + .unwrap(); + conn.last_insert_rowid() + } + + fn insert_test_note( + conn: &Connection, + project_id: i64, + discussion_id: i64, + gitlab_id: i64, + is_system: bool, + ) { + conn.execute( + "INSERT INTO notes (gitlab_id, discussion_id, project_id, body, author_username, \ + created_at, updated_at, last_seen_at, is_system) \ + VALUES (?1, ?2, ?3, 'Test note body', 'testuser', \ + 1704067200000, 1704067200000, 1704067200000, ?4)", + rusqlite::params![gitlab_id, discussion_id, project_id, is_system], + ) + .unwrap(); + } + + fn insert_test_state_event( + conn: &Connection, + project_id: i64, + issue_id: Option, + mr_id: Option, + state: &str, + actor: &str, + created_at: i64, + ) { + conn.execute( + "INSERT INTO resource_state_events (gitlab_id, project_id, issue_id, \ + merge_request_id, state, actor_username, created_at) \ + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)", + rusqlite::params![ + created_at, project_id, issue_id, mr_id, state, actor, created_at, + ], + ) + .unwrap(); + } + + #[test] + fn test_explain_issue_basic() { + let (conn, project_id) = setup_explain_db(); + let issue_id = insert_test_issue(&conn, project_id, 42, Some("Issue description text")); + let disc_id = insert_test_discussion(&conn, project_id, Some(issue_id), None, "disc-001"); + insert_test_note(&conn, project_id, disc_id, 1001, false); + insert_test_state_event( + &conn, + project_id, + Some(issue_id), + None, + "closed", + "testuser", + 1704100000000, + ); + + let params = ExplainParams { + entity_type: "issues".to_string(), + iid: 42, + project: None, + sections: None, + no_timeline: true, + max_decisions: 10, + since: None, + }; + + let result = run_explain(&conn, ¶ms).unwrap(); + + assert_eq!(result.entity.entity_type, "issue"); + assert_eq!(result.entity.iid, 42); + assert_eq!(result.entity.title, "Test Issue"); + assert_eq!(result.entity.state, "opened"); + assert_eq!(result.entity.author, "testuser"); + + // All sections present (as Some) + assert!(result.description_excerpt.is_some()); + assert!(result.key_decisions.is_some()); + assert!(result.activity.is_some()); + assert!(result.open_threads.is_some()); + assert!(result.related.is_some()); + // timeline is None when no_timeline=true (tested separately) + } + + #[test] + fn test_explain_mr() { + let (conn, project_id) = setup_explain_db(); + insert_test_mr(&conn, project_id, 99); + + let params = ExplainParams { + entity_type: "mrs".to_string(), + iid: 99, + project: None, + sections: None, + no_timeline: true, + max_decisions: 10, + since: None, + }; + + let result = run_explain(&conn, ¶ms).unwrap(); + + assert_eq!(result.entity.entity_type, "merge_request"); + assert_eq!(result.entity.iid, 99); + assert_eq!(result.entity.title, "Test MR"); + assert_eq!(result.entity.state, "merged"); + } + + #[test] + fn test_explain_singular_entity_type() { + let (conn, project_id) = setup_explain_db(); + insert_test_issue(&conn, project_id, 42, Some("Description")); + + // Use "issues" (the normalized form) since run_explain expects already-normalized types. + // The normalization happens in handle_explain, which we test via the handler logic. + // Here we verify the skeleton works with the normalized form. + let params = ExplainParams { + entity_type: "issues".to_string(), + iid: 42, + project: None, + sections: None, + no_timeline: true, + max_decisions: 10, + since: None, + }; + + let result = run_explain(&conn, ¶ms).unwrap(); + assert_eq!(result.entity.entity_type, "issue"); + assert_eq!(result.entity.iid, 42); + } + + #[test] + fn test_explain_description_excerpt() { + let (conn, project_id) = setup_explain_db(); + insert_test_issue(&conn, project_id, 43, None); + + let params = ExplainParams { + entity_type: "issues".to_string(), + iid: 43, + project: None, + sections: None, + no_timeline: true, + max_decisions: 10, + since: None, + }; + + let result = run_explain(&conn, ¶ms).unwrap(); + assert_eq!( + result.description_excerpt.as_deref(), + Some("(no description)") + ); + } + + #[test] + fn test_explain_section_filtering() { + let (conn, project_id) = setup_explain_db(); + insert_test_issue(&conn, project_id, 44, Some("Desc")); + + let params = ExplainParams { + entity_type: "issues".to_string(), + iid: 44, + project: None, + sections: Some(vec!["key_decisions".to_string(), "activity".to_string()]), + no_timeline: true, + max_decisions: 10, + since: None, + }; + + let result = run_explain(&conn, ¶ms).unwrap(); + + // Entity always present + assert_eq!(result.entity.iid, 44); + + // Selected sections present + assert!(result.key_decisions.is_some()); + assert!(result.activity.is_some()); + + // Unselected sections absent + assert!(result.description_excerpt.is_none()); + assert!(result.open_threads.is_none()); + assert!(result.related.is_none()); + assert!(result.timeline_excerpt.is_none()); + } + + #[test] + fn test_truncate_description() { + assert_eq!(truncate_description(None, 500), "(no description)"); + assert_eq!(truncate_description(Some(""), 500), "(no description)"); + assert_eq!(truncate_description(Some("short"), 500), "short"); + + let long = "a".repeat(600); + let truncated = truncate_description(Some(&long), 500); + assert!(truncated.ends_with("...")); + assert!(truncated.len() <= 504); // 500 + "..." + } + + // ----------------------------------------------------------------------- + // Test helpers for key-decisions heuristic (Task 2) + // ----------------------------------------------------------------------- + + #[allow(clippy::too_many_arguments)] + fn insert_test_label_event( + conn: &Connection, + project_id: i64, + issue_id: Option, + mr_id: Option, + action: &str, + label_name: &str, + actor: &str, + created_at: i64, + ) { + conn.execute( + "INSERT INTO resource_label_events (gitlab_id, project_id, issue_id, \ + merge_request_id, action, label_name, actor_username, created_at) \ + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", + rusqlite::params![ + created_at, project_id, issue_id, mr_id, action, label_name, actor, created_at + ], + ) + .unwrap(); + } + + #[allow(clippy::too_many_arguments)] + fn insert_test_note_with( + conn: &Connection, + project_id: i64, + discussion_id: i64, + gitlab_id: i64, + body: &str, + author: &str, + created_at: i64, + is_system: bool, + ) { + conn.execute( + "INSERT INTO notes (gitlab_id, discussion_id, project_id, body, author_username, \ + created_at, updated_at, last_seen_at, is_system) \ + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?6, ?6, ?7)", + rusqlite::params![ + gitlab_id, + discussion_id, + project_id, + body, + author, + created_at, + is_system + ], + ) + .unwrap(); + } + + // ----------------------------------------------------------------------- + // Key-decisions heuristic tests (Task 2) + // ----------------------------------------------------------------------- + + #[test] + fn test_explain_key_decision_heuristic() { + let (conn, project_id) = setup_explain_db(); + let issue_id = insert_test_issue(&conn, project_id, 50, Some("desc")); + + // State event at T + let t = 1_704_100_000_000_i64; + insert_test_state_event( + &conn, + project_id, + Some(issue_id), + None, + "closed", + "alice", + t, + ); + + // Note by SAME author at T + 30 minutes (within 60min window) + let disc_id = insert_test_discussion(&conn, project_id, Some(issue_id), None, "disc-kd1"); + insert_test_note_with( + &conn, + project_id, + disc_id, + 5001, + "Closing because the fix landed in MR !200", + "alice", + t + 30 * 60 * 1000, + false, + ); + + let decisions = extract_key_decisions(&conn, "issues", issue_id, None, 10).unwrap(); + + assert_eq!(decisions.len(), 1); + assert_eq!(decisions[0].actor, "alice"); + assert!(decisions[0].action.contains("state:")); + assert!(decisions[0].action.contains("closed")); + assert!(decisions[0].context_note.contains("Closing because")); + } + + #[test] + fn test_explain_key_decision_ignores_unrelated_notes() { + let (conn, project_id) = setup_explain_db(); + let issue_id = insert_test_issue(&conn, project_id, 51, Some("desc")); + + let t = 1_704_100_000_000_i64; + // State event by alice + insert_test_state_event( + &conn, + project_id, + Some(issue_id), + None, + "closed", + "alice", + t, + ); + + // Note by BOB at T + 30min — different author, should NOT correlate + let disc_id = insert_test_discussion(&conn, project_id, Some(issue_id), None, "disc-kd2"); + insert_test_note_with( + &conn, + project_id, + disc_id, + 5002, + "Some unrelated comment", + "bob", + t + 30 * 60 * 1000, + false, + ); + + let decisions = extract_key_decisions(&conn, "issues", issue_id, None, 10).unwrap(); + + assert_eq!(decisions.len(), 0); + } + + #[test] + fn test_explain_key_decision_label_event() { + let (conn, project_id) = setup_explain_db(); + let issue_id = insert_test_issue(&conn, project_id, 52, Some("desc")); + + let t = 1_704_100_000_000_i64; + // Label add event + insert_test_label_event( + &conn, + project_id, + Some(issue_id), + None, + "add", + "bugfix", + "alice", + t, + ); + + // Correlated note by same actor + let disc_id = insert_test_discussion(&conn, project_id, Some(issue_id), None, "disc-kd3"); + insert_test_note_with( + &conn, + project_id, + disc_id, + 5003, + "Labeling as bugfix per triage", + "alice", + t + 10 * 60 * 1000, + false, + ); + + let decisions = extract_key_decisions(&conn, "issues", issue_id, None, 10).unwrap(); + + assert_eq!(decisions.len(), 1); + assert!( + decisions[0].action.starts_with("label: +"), + "Expected action to start with 'label: +', got: {}", + decisions[0].action + ); + } + + #[test] + fn test_explain_max_decisions() { + let (conn, project_id) = setup_explain_db(); + let issue_id = insert_test_issue(&conn, project_id, 53, Some("desc")); + + let base_t = 1_704_100_000_000_i64; + let disc_id = insert_test_discussion(&conn, project_id, Some(issue_id), None, "disc-kd4"); + + // Insert 5 correlated event+note pairs (each 2 hours apart to avoid overlap) + for i in 0..5 { + let event_t = base_t + i64::from(i) * 2 * 60 * 60 * 1000; + insert_test_state_event( + &conn, + project_id, + Some(issue_id), + None, + if i % 2 == 0 { "closed" } else { "reopened" }, + "alice", + event_t, + ); + insert_test_note_with( + &conn, + project_id, + disc_id, + 5010 + i64::from(i), + &format!("Reason for change {i}"), + "alice", + event_t + 10 * 60 * 1000, + false, + ); + } + + let decisions = extract_key_decisions(&conn, "issues", issue_id, None, 3).unwrap(); + + assert_eq!(decisions.len(), 3, "Expected max_decisions=3 to cap at 3"); + } + + #[test] + fn test_explain_since_scopes_events() { + let (conn, project_id) = setup_explain_db(); + let issue_id = insert_test_issue(&conn, project_id, 54, Some("desc")); + + let now = 1_704_200_000_000_i64; + let sixty_days_ago = now - 60 * 24 * 60 * 60 * 1000; + let ten_days_ago = now - 10 * 24 * 60 * 60 * 1000; + let thirty_days_ago = now - 30 * 24 * 60 * 60 * 1000; + + let disc_id = insert_test_discussion(&conn, project_id, Some(issue_id), None, "disc-kd5"); + + // Old event at T-60d with correlated note + insert_test_state_event( + &conn, + project_id, + Some(issue_id), + None, + "closed", + "alice", + sixty_days_ago, + ); + insert_test_note_with( + &conn, + project_id, + disc_id, + 5020, + "Old closure reason", + "alice", + sixty_days_ago + 10 * 60 * 1000, + false, + ); + + // Recent event at T-10d with correlated note + insert_test_state_event( + &conn, + project_id, + Some(issue_id), + None, + "reopened", + "alice", + ten_days_ago, + ); + insert_test_note_with( + &conn, + project_id, + disc_id, + 5021, + "Recent reopening reason", + "alice", + ten_days_ago + 10 * 60 * 1000, + false, + ); + + // Call with since = 30 days ago — should only get the recent event + let decisions = + extract_key_decisions(&conn, "issues", issue_id, Some(thirty_days_ago), 10).unwrap(); + + assert_eq!(decisions.len(), 1, "Expected only the recent event"); + assert!(decisions[0].context_note.contains("Recent reopening")); + } + + // ----------------------------------------------------------------------- + // Activity / open threads / related tests (Task 3) + // ----------------------------------------------------------------------- + + #[allow(clippy::too_many_arguments)] + fn insert_resolvable_discussion( + conn: &Connection, + project_id: i64, + issue_id: Option, + mr_id: Option, + gitlab_discussion_id: &str, + resolvable: bool, + resolved: bool, + first_note_at: i64, + last_note_at: i64, + ) -> i64 { + conn.execute( + "INSERT INTO discussions (gitlab_discussion_id, project_id, noteable_type, issue_id, \ + merge_request_id, resolvable, resolved, first_note_at, last_note_at, last_seen_at) \ + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?9)", + rusqlite::params![ + gitlab_discussion_id, + project_id, + if issue_id.is_some() { + "Issue" + } else { + "MergeRequest" + }, + issue_id, + mr_id, + resolvable, + resolved, + first_note_at, + last_note_at, + ], + ) + .unwrap(); + conn.last_insert_rowid() + } + + #[test] + fn test_explain_open_threads() { + let (conn, project_id) = setup_explain_db(); + let issue_id = insert_test_issue(&conn, project_id, 60, Some("desc")); + + // Unresolved, resolvable discussion + let disc1 = insert_resolvable_discussion( + &conn, + project_id, + Some(issue_id), + None, + "disc-unresolved", + true, + false, + 1_000_000, + 3_000_000, + ); + insert_test_note_with( + &conn, project_id, disc1, 6001, "note1", "alice", 1_000_000, false, + ); + insert_test_note_with( + &conn, project_id, disc1, 6002, "note2", "bob", 2_000_000, false, + ); + + // Resolved discussion (should NOT appear) + let disc2 = insert_resolvable_discussion( + &conn, + project_id, + Some(issue_id), + None, + "disc-resolved", + true, + true, + 1_500_000, + 2_500_000, + ); + insert_test_note_with( + &conn, project_id, disc2, 6003, "note3", "charlie", 1_500_000, false, + ); + + let threads = fetch_open_threads(&conn, "issues", issue_id).unwrap(); + + assert_eq!(threads.len(), 1, "Only unresolved thread should appear"); + assert_eq!(threads[0].discussion_id, "disc-unresolved"); + assert_eq!(threads[0].started_by, "alice"); + assert_eq!(threads[0].note_count, 2); + } + + #[test] + fn test_explain_activity_summary() { + let (conn, project_id) = setup_explain_db(); + let issue_id = insert_test_issue(&conn, project_id, 61, Some("desc")); + + // 2 state events + insert_test_state_event( + &conn, + project_id, + Some(issue_id), + None, + "closed", + "alice", + 1_000_000, + ); + insert_test_state_event( + &conn, + project_id, + Some(issue_id), + None, + "reopened", + "alice", + 5_000_000, + ); + + // 1 label event + insert_test_label_event( + &conn, + project_id, + Some(issue_id), + None, + "add", + "bug", + "alice", + 1_500_000, + ); + + // 3 non-system notes + let disc = insert_test_discussion(&conn, project_id, Some(issue_id), None, "disc-act"); + for i in 0..3 { + insert_test_note_with( + &conn, + project_id, + disc, + 7001 + i, + &format!("comment {i}"), + "commenter", + 1_100_000 + i * 100_000, + false, + ); + } + + let activity = build_activity_summary(&conn, "issues", issue_id, None).unwrap(); + + assert_eq!(activity.state_changes, 2); + assert_eq!(activity.label_changes, 1); + assert_eq!(activity.notes, 3); + assert!(activity.first_event.is_some()); + assert!(activity.last_event.is_some()); + } + + #[test] + fn test_explain_activity_with_since() { + let (conn, project_id) = setup_explain_db(); + let issue_id = insert_test_issue(&conn, project_id, 62, Some("desc")); + + // Old event + insert_test_state_event( + &conn, + project_id, + Some(issue_id), + None, + "closed", + "alice", + 1_000_000, + ); + // Recent event + insert_test_state_event( + &conn, + project_id, + Some(issue_id), + None, + "reopened", + "alice", + 5_000_000, + ); + + let activity = build_activity_summary(&conn, "issues", issue_id, Some(3_000_000)).unwrap(); + + assert_eq!(activity.state_changes, 1, "Only the recent event"); + } + + #[test] + fn test_explain_related_closing_mrs() { + let (conn, project_id) = setup_explain_db(); + let issue_id = insert_test_issue(&conn, project_id, 63, Some("desc")); + let mr_id = insert_test_mr(&conn, project_id, 99); + + // Insert a closing reference: MR closes issue + conn.execute( + "INSERT INTO entity_references (project_id, source_entity_type, source_entity_id, \ + target_entity_type, target_entity_id, target_entity_iid, reference_type, \ + source_method, created_at) \ + VALUES (?, 'merge_request', ?, 'issue', ?, 63, 'closes', 'api', 1000000)", + rusqlite::params![project_id, mr_id, issue_id], + ) + .unwrap(); + + let related = fetch_related_entities(&conn, "issues", issue_id).unwrap(); + + assert_eq!(related.closing_mrs.len(), 1); + assert_eq!(related.closing_mrs[0].iid, 99); + assert_eq!(related.closing_mrs[0].state, "merged"); + } + + #[test] + fn test_explain_empty_activity() { + let (conn, project_id) = setup_explain_db(); + let issue_id = insert_test_issue(&conn, project_id, 64, None); + + let activity = build_activity_summary(&conn, "issues", issue_id, None).unwrap(); + assert_eq!(activity.state_changes, 0); + assert_eq!(activity.label_changes, 0); + assert_eq!(activity.notes, 0); + assert!(activity.first_event.is_none()); + assert!(activity.last_event.is_none()); + + let threads = fetch_open_threads(&conn, "issues", issue_id).unwrap(); + assert!(threads.is_empty()); + + let related = fetch_related_entities(&conn, "issues", issue_id).unwrap(); + assert!(related.closing_mrs.is_empty()); + assert!(related.related_issues.is_empty()); + } +} diff --git a/src/cli/commands/mod.rs b/src/cli/commands/mod.rs index ed3ba81..64fc8e6 100644 --- a/src/cli/commands/mod.rs +++ b/src/cli/commands/mod.rs @@ -5,6 +5,7 @@ pub mod cron; pub mod doctor; pub mod drift; pub mod embed; +pub mod explain; pub mod file_history; pub mod generate_docs; pub mod ingest; @@ -35,6 +36,7 @@ pub use cron::{ pub use doctor::{DoctorChecks, print_doctor_results, run_doctor}; pub use drift::{DriftResponse, print_drift_human, print_drift_json, run_drift}; pub use embed::{print_embed, print_embed_json, run_embed}; +pub use explain::{handle_explain, print_explain, print_explain_json, run_explain}; pub use file_history::{print_file_history, print_file_history_json, run_file_history}; pub use generate_docs::{print_generate_docs, print_generate_docs_json, run_generate_docs}; pub use ingest::{ diff --git a/src/cli/mod.rs b/src/cli/mod.rs index a44aa64..486d42f 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -277,6 +277,44 @@ pub enum Commands { /// Trace why code was introduced: file -> MR -> issue -> discussion Trace(TraceArgs), + /// Auto-generate a structured narrative of an issue or MR + #[command(after_help = "\x1b[1mExamples:\x1b[0m + lore explain issues 42 # Narrative for issue #42 + lore explain mrs 99 -p group/repo # Narrative for MR !99 in specific project + lore -J explain issues 42 # JSON output for automation + lore explain issues 42 --sections key_decisions,open_threads # Specific sections only + lore explain issues 42 --since 30d # Narrative scoped to last 30 days + lore explain issues 42 --no-timeline # Skip timeline (faster)")] + Explain { + /// Entity type: "issues" or "mrs" (singular forms also accepted) + #[arg(value_parser = ["issues", "mrs", "issue", "mr"])] + entity_type: String, + + /// Entity IID + iid: i64, + + /// Scope to project (fuzzy match) + #[arg(short, long)] + project: Option, + + /// Select specific sections (comma-separated) + /// Valid: entity, description, key_decisions, activity, open_threads, related, timeline + #[arg(long, value_delimiter = ',', help_heading = "Output")] + sections: Option>, + + /// Skip timeline excerpt (faster execution) + #[arg(long, help_heading = "Output")] + no_timeline: bool, + + /// Maximum key decisions to include + #[arg(long, default_value = "10", help_heading = "Output")] + max_decisions: usize, + + /// Time scope for events/notes (e.g. 7d, 2w, 1m, or YYYY-MM-DD) + #[arg(long, help_heading = "Filters")] + since: Option, + }, + /// Detect discussion divergence from original intent #[command(after_help = "\x1b[1mExamples:\x1b[0m lore drift issues 42 # Check drift on issue #42 diff --git a/src/main.rs b/src/main.rs index 5e34623..e012ce3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -13,23 +13,24 @@ use lore::cli::autocorrect::{self, CorrectionResult}; use lore::cli::commands::{ IngestDisplay, InitInputs, InitOptions, InitResult, ListFilters, MrListFilters, NoteListFilters, RefreshOptions, RefreshResult, SearchCliFilters, SyncOptions, TimelineParams, - delete_orphan_projects, open_issue_in_browser, open_mr_in_browser, parse_trace_path, - print_count, print_count_json, print_cron_install, print_cron_install_json, print_cron_status, - print_cron_status_json, print_cron_uninstall, print_cron_uninstall_json, print_doctor_results, - print_drift_human, print_drift_json, print_dry_run_preview, print_dry_run_preview_json, - print_embed, print_embed_json, print_event_count, print_event_count_json, print_file_history, - print_file_history_json, print_generate_docs, print_generate_docs_json, print_ingest_summary, - print_ingest_summary_json, print_list_issues, print_list_issues_json, print_list_mrs, - print_list_mrs_json, print_list_notes, print_list_notes_json, print_related_human, - print_related_json, print_search_results, print_search_results_json, print_show_issue, - print_show_issue_json, print_show_mr, print_show_mr_json, print_stats, print_stats_json, - print_sync, print_sync_json, print_sync_status, print_sync_status_json, print_timeline, - print_timeline_json_with_meta, print_trace, print_trace_json, print_who_human, print_who_json, - query_notes, run_auth_test, run_count, run_count_events, run_cron_install, run_cron_status, - run_cron_uninstall, run_doctor, run_drift, run_embed, run_file_history, run_generate_docs, - run_ingest, run_ingest_dry_run, run_init, run_init_refresh, run_list_issues, run_list_mrs, - run_me, run_related, run_search, run_show_issue, run_show_mr, run_stats, run_sync, - run_sync_status, run_timeline, run_token_set, run_token_show, run_who, + delete_orphan_projects, handle_explain, open_issue_in_browser, open_mr_in_browser, + parse_trace_path, print_count, print_count_json, print_cron_install, print_cron_install_json, + print_cron_status, print_cron_status_json, print_cron_uninstall, print_cron_uninstall_json, + print_doctor_results, print_drift_human, print_drift_json, print_dry_run_preview, + print_dry_run_preview_json, print_embed, print_embed_json, print_event_count, + print_event_count_json, print_file_history, print_file_history_json, print_generate_docs, + print_generate_docs_json, print_ingest_summary, print_ingest_summary_json, print_list_issues, + print_list_issues_json, print_list_mrs, print_list_mrs_json, print_list_notes, + print_list_notes_json, print_related_human, print_related_json, print_search_results, + print_search_results_json, print_show_issue, print_show_issue_json, print_show_mr, + print_show_mr_json, print_stats, print_stats_json, print_sync, print_sync_json, + print_sync_status, print_sync_status_json, print_timeline, print_timeline_json_with_meta, + print_trace, print_trace_json, print_who_human, print_who_json, query_notes, run_auth_test, + run_count, run_count_events, run_cron_install, run_cron_status, run_cron_uninstall, run_doctor, + run_drift, run_embed, run_file_history, run_generate_docs, run_ingest, run_ingest_dry_run, + run_init, run_init_refresh, run_list_issues, run_list_mrs, run_me, run_related, run_search, + run_show_issue, run_show_mr, run_stats, run_sync, run_sync_status, run_timeline, run_token_set, + run_token_show, run_who, }; use lore::cli::render::{ColorMode, GlyphMode, Icons, LoreRenderer, Theme}; use lore::cli::robot::{RobotMeta, strip_schemas}; @@ -222,6 +223,25 @@ fn main() { Some(Commands::Trace(args)) => handle_trace(cli.config.as_deref(), args, robot_mode), Some(Commands::Cron(args)) => handle_cron(cli.config.as_deref(), args, robot_mode), Some(Commands::Token(args)) => handle_token(cli.config.as_deref(), args, robot_mode).await, + Some(Commands::Explain { + entity_type, + iid, + project, + sections, + no_timeline, + max_decisions, + since, + }) => handle_explain( + cli.config.as_deref(), + &entity_type, + iid, + project.as_deref(), + sections, + no_timeline, + max_decisions, + since.as_deref(), + robot_mode, + ), Some(Commands::Drift { entity_type, iid,