feat(tui): Phase 4 completion + Phase 5 session/lock/text-width

Phase 4 (bd-1df9) — all 5 acceptance criteria met:
- Sync screen with delta ledger (bd-2x2h, bd-y095)
- Doctor screen with health checks (bd-2iqk)
- Stats screen with document counts (bd-2iqk)
- CLI integration: lore tui subcommand (bd-26lp)
- CLI integration: lore sync --tui flag (bd-3l56)

Phase 5 (bd-3h00) — session persistence + instance lock + text width:
- text_width.rs: Unicode-aware measurement, truncation, padding (16 tests)
- instance_lock.rs: Advisory PID lock with stale recovery (6 tests)
- session.rs: Atomic write + CRC32 checksum + quarantine (9 tests)

Closes: bd-26lp, bd-3h00, bd-3l56, bd-1df9, bd-y095
This commit is contained in:
teernisse
2026-02-18 23:40:30 -05:00
parent 418417b0f4
commit 146eb61623
45 changed files with 5216 additions and 207 deletions

View File

@@ -485,6 +485,12 @@ dependencies = [
"litrs",
]
[[package]]
name = "either"
version = "1.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
[[package]]
name = "encode_unicode"
version = "1.0.0"
@@ -500,6 +506,12 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "env_home"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe"
[[package]]
name = "equivalent"
version = "1.0.2"
@@ -1336,6 +1348,7 @@ dependencies = [
"url",
"urlencoding",
"uuid",
"which",
]
[[package]]
@@ -1345,6 +1358,7 @@ dependencies = [
"anyhow",
"chrono",
"clap",
"crc32fast",
"crossterm 0.28.1",
"dirs",
"ftui",
@@ -1354,6 +1368,8 @@ dependencies = [
"serde",
"serde_json",
"tempfile",
"unicode-segmentation",
"unicode-width 0.2.2",
]
[[package]]
@@ -2782,6 +2798,18 @@ dependencies = [
"wasm-bindgen",
]
[[package]]
name = "which"
version = "7.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d643ce3fd3e5b54854602a080f34fb10ab75e0b813ee32d00ca2b44fa74762"
dependencies = [
"either",
"env_home",
"rustix 1.1.3",
"winsafe",
]
[[package]]
name = "winapi"
version = "0.3.9"
@@ -3048,6 +3076,12 @@ dependencies = [
"memchr",
]
[[package]]
name = "winsafe"
version = "0.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904"
[[package]]
name = "wit-bindgen"
version = "0.51.0"

View File

@@ -42,5 +42,12 @@ serde_json = "1"
# Regex (used by safety module for PII/secret redaction)
regex = "1"
# Unicode text measurement
unicode-width = "0.2"
unicode-segmentation = "1"
# Session persistence (CRC32 checksum)
crc32fast = "1"
[dev-dependencies]
tempfile = "3"

View File

@@ -96,8 +96,7 @@ pub fn fetch_file_history(
merge_commit_sha: row.get(7)?,
})
})?
.filter_map(std::result::Result::ok)
.collect();
.collect::<std::result::Result<Vec<_>, _>>()?;
let total_mrs = merge_requests.len();
@@ -170,8 +169,7 @@ fn fetch_file_discussions(
created_at_ms: row.get(4)?,
})
})?
.filter_map(std::result::Result::ok)
.collect();
.collect::<std::result::Result<Vec<_>, _>>()?;
Ok(discussions)
}
@@ -187,12 +185,10 @@ pub fn fetch_file_history_paths(conn: &Connection, project_id: Option<i64>) -> R
let mut stmt = conn.prepare(sql)?;
let paths: Vec<String> = if let Some(pid) = project_id {
stmt.query_map([pid], |row| row.get(0))?
.filter_map(std::result::Result::ok)
.collect()
.collect::<std::result::Result<Vec<_>, _>>()?
} else {
stmt.query_map([], |row| row.get(0))?
.filter_map(std::result::Result::ok)
.collect()
.collect::<std::result::Result<Vec<_>, _>>()?
};
Ok(paths)

View File

@@ -12,6 +12,7 @@ mod issue_list;
mod mr_detail;
mod mr_list;
mod search;
mod sync;
mod timeline;
mod trace;
mod who;
@@ -24,6 +25,7 @@ pub use issue_list::*;
pub use mr_detail::*;
pub use mr_list::*;
pub use search::*;
pub use sync::*;
pub use timeline::*;
pub use trace::*;
pub use who::*;

View File

@@ -0,0 +1,587 @@
#![allow(dead_code)]
//! Sync screen actions — query sync run history and detect running syncs.
//!
//! With cron-driven syncs as the primary mechanism, the TUI's sync screen
//! acts as a status dashboard. These pure query functions read `sync_runs`
//! and `projects` to populate the screen.
use anyhow::{Context, Result};
use rusqlite::Connection;
use crate::clock::Clock;
/// How many recent runs to display in the sync history.
const HISTORY_LIMIT: usize = 10;
/// If a "running" sync hasn't heartbeated in this many milliseconds,
/// consider it stale (likely crashed).
const STALE_HEARTBEAT_MS: i64 = 120_000; // 2 minutes
// ---------------------------------------------------------------------------
// Data types
// ---------------------------------------------------------------------------
/// Overview data for the sync screen.
#[derive(Debug, Default)]
pub struct SyncOverview {
/// Info about a currently running sync, if any.
pub running: Option<RunningSyncInfo>,
/// Most recent completed (succeeded or failed) run.
pub last_completed: Option<SyncRunInfo>,
/// Recent sync run history (newest first).
pub recent_runs: Vec<SyncRunInfo>,
/// Configured project paths.
pub projects: Vec<String>,
}
/// A sync that is currently in progress.
#[derive(Debug, Clone)]
pub struct RunningSyncInfo {
/// Row ID in sync_runs.
pub id: i64,
/// When this sync started (ms epoch).
pub started_at: i64,
/// Last heartbeat (ms epoch).
pub heartbeat_at: i64,
/// How long it's been running (ms).
pub elapsed_ms: u64,
/// Whether the heartbeat is stale (sync may have crashed).
pub stale: bool,
/// Items processed so far.
pub items_processed: u64,
}
/// Summary of a single sync run.
#[derive(Debug, Clone)]
pub struct SyncRunInfo {
/// Row ID in sync_runs.
pub id: i64,
/// 'succeeded', 'failed', or 'running'.
pub status: String,
/// The command that was run (e.g., 'sync', 'ingest issues').
pub command: String,
/// When this sync started (ms epoch).
pub started_at: i64,
/// When this sync finished (ms epoch), if completed.
pub finished_at: Option<i64>,
/// Duration in ms (computed from started_at/finished_at).
pub duration_ms: Option<u64>,
/// Total items processed.
pub items_processed: u64,
/// Total errors encountered.
pub errors: u64,
/// Error message if the run failed.
pub error: Option<String>,
/// Correlation ID for log matching.
pub run_id: Option<String>,
}
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
/// Fetch the complete sync overview for the sync screen.
///
/// Combines running sync detection, last completed run, recent history,
/// and configured projects into a single struct.
pub fn fetch_sync_overview(conn: &Connection, clock: &dyn Clock) -> Result<SyncOverview> {
let running = detect_running_sync(conn, clock)?;
let recent_runs = fetch_recent_runs(conn, HISTORY_LIMIT)?;
let last_completed = recent_runs
.iter()
.find(|r| r.status == "succeeded" || r.status == "failed")
.cloned();
let projects = fetch_configured_projects(conn)?;
Ok(SyncOverview {
running,
last_completed,
recent_runs,
projects,
})
}
/// Detect a currently running sync from the `sync_runs` table.
///
/// A sync is considered "running" if `status = 'running'`. It's marked
/// stale if the heartbeat is older than [`STALE_HEARTBEAT_MS`].
pub fn detect_running_sync(
conn: &Connection,
clock: &dyn Clock,
) -> Result<Option<RunningSyncInfo>> {
let result = conn.query_row(
"SELECT id, started_at, heartbeat_at, total_items_processed
FROM sync_runs
WHERE status = 'running'
ORDER BY id DESC
LIMIT 1",
[],
|row| {
let id: i64 = row.get(0)?;
let started_at: i64 = row.get(1)?;
let heartbeat_at: i64 = row.get(2)?;
let items: Option<i64> = row.get(3)?;
Ok((id, started_at, heartbeat_at, items.unwrap_or(0)))
},
);
match result {
Ok((id, started_at, heartbeat_at, items)) => {
let now = clock.now_ms();
let elapsed_ms = now.saturating_sub(started_at);
let stale = (now - heartbeat_at) > STALE_HEARTBEAT_MS;
#[allow(clippy::cast_sign_loss)]
Ok(Some(RunningSyncInfo {
id,
started_at,
heartbeat_at,
elapsed_ms: elapsed_ms as u64,
stale,
items_processed: items as u64,
}))
}
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e).context("detecting running sync"),
}
}
/// Fetch recent sync runs (newest first).
pub fn fetch_recent_runs(conn: &Connection, limit: usize) -> Result<Vec<SyncRunInfo>> {
let mut stmt = conn
.prepare(
"SELECT id, status, command, started_at, finished_at,
total_items_processed, total_errors, error, run_id
FROM sync_runs
ORDER BY id DESC
LIMIT ?1",
)
.context("preparing sync runs query")?;
let rows = stmt
.query_map([limit as i64], |row| {
let id: i64 = row.get(0)?;
let status: String = row.get(1)?;
let command: String = row.get(2)?;
let started_at: i64 = row.get(3)?;
let finished_at: Option<i64> = row.get(4)?;
let items: Option<i64> = row.get(5)?;
let errors: Option<i64> = row.get(6)?;
let error: Option<String> = row.get(7)?;
let run_id: Option<String> = row.get(8)?;
Ok((
id, status, command, started_at, finished_at, items, errors, error, run_id,
))
})
.context("querying sync runs")?;
let mut result = Vec::new();
for row in rows {
let (id, status, command, started_at, finished_at, items, errors, error, run_id) =
row.context("reading sync run row")?;
#[allow(clippy::cast_sign_loss)]
let duration_ms = finished_at.map(|f| (f - started_at) as u64);
#[allow(clippy::cast_sign_loss)]
result.push(SyncRunInfo {
id,
status,
command,
started_at,
finished_at,
duration_ms,
items_processed: items.unwrap_or(0) as u64,
errors: errors.unwrap_or(0) as u64,
error,
run_id,
});
}
Ok(result)
}
/// Fetch configured project paths from the `projects` table.
pub fn fetch_configured_projects(conn: &Connection) -> Result<Vec<String>> {
let mut stmt = conn
.prepare("SELECT path_with_namespace FROM projects ORDER BY path_with_namespace")
.context("preparing projects query")?;
let rows = stmt
.query_map([], |row| row.get::<_, String>(0))
.context("querying projects")?;
let mut result = Vec::new();
for row in rows {
result.push(row.context("reading project row")?);
}
Ok(result)
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::clock::FakeClock;
/// Create the minimal schema needed for sync queries.
fn create_sync_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE sync_runs (
id INTEGER PRIMARY KEY,
started_at INTEGER NOT NULL,
heartbeat_at INTEGER NOT NULL,
finished_at INTEGER,
status TEXT NOT NULL,
command TEXT NOT NULL,
error TEXT,
metrics_json TEXT,
run_id TEXT,
total_items_processed INTEGER DEFAULT 0,
total_errors INTEGER DEFAULT 0
);
",
)
.expect("create sync schema");
}
fn insert_project(conn: &Connection, id: i64, path: &str) {
conn.execute(
"INSERT INTO projects (id, gitlab_project_id, path_with_namespace)
VALUES (?1, ?2, ?3)",
rusqlite::params![id, id * 100, path],
)
.expect("insert project");
}
fn insert_sync_run(
conn: &Connection,
started_at: i64,
finished_at: Option<i64>,
status: &str,
command: &str,
items: i64,
errors: i64,
error: Option<&str>,
) -> i64 {
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command,
total_items_processed, total_errors, error)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
rusqlite::params![
started_at,
finished_at.unwrap_or(started_at),
finished_at,
status,
command,
items,
errors,
error,
],
)
.expect("insert sync run");
conn.last_insert_rowid()
}
// -----------------------------------------------------------------------
// detect_running_sync
// -----------------------------------------------------------------------
#[test]
fn test_detect_running_sync_none_when_empty() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let clock = FakeClock::from_ms(1_700_000_000_000);
let result = detect_running_sync(&conn, &clock).unwrap();
assert!(result.is_none());
}
#[test]
fn test_detect_running_sync_none_when_all_completed() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_sync_run(&conn, now - 60_000, Some(now - 30_000), "succeeded", "sync", 100, 0, None);
insert_sync_run(&conn, now - 120_000, Some(now - 90_000), "failed", "sync", 50, 2, Some("timeout"));
let clock = FakeClock::from_ms(now);
let result = detect_running_sync(&conn, &clock).unwrap();
assert!(result.is_none());
}
#[test]
fn test_detect_running_sync_found() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
let started = now - 30_000; // 30 seconds ago
// Heartbeat at started_at (fresh since we just set it)
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, status, command, total_items_processed)
VALUES (?1, ?2, 'running', 'sync', 42)",
[started, now - 5_000], // heartbeat 5 seconds ago
)
.unwrap();
let clock = FakeClock::from_ms(now);
let running = detect_running_sync(&conn, &clock).unwrap().unwrap();
assert_eq!(running.elapsed_ms, 30_000);
assert_eq!(running.items_processed, 42);
assert!(!running.stale);
}
#[test]
fn test_detect_running_sync_stale_heartbeat() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
let started = now - 300_000; // 5 minutes ago
// Heartbeat 3 minutes ago — stale
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, status, command)
VALUES (?1, ?2, 'running', 'sync')",
[started, now - 180_000],
)
.unwrap();
let clock = FakeClock::from_ms(now);
let running = detect_running_sync(&conn, &clock).unwrap().unwrap();
assert!(running.stale);
assert_eq!(running.elapsed_ms, 300_000);
}
// -----------------------------------------------------------------------
// fetch_recent_runs
// -----------------------------------------------------------------------
#[test]
fn test_fetch_recent_runs_empty() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let runs = fetch_recent_runs(&conn, 10).unwrap();
assert!(runs.is_empty());
}
#[test]
fn test_fetch_recent_runs_ordered_newest_first() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_sync_run(&conn, now - 120_000, Some(now - 90_000), "succeeded", "sync", 100, 0, None);
insert_sync_run(&conn, now - 60_000, Some(now - 30_000), "succeeded", "sync", 200, 0, None);
let runs = fetch_recent_runs(&conn, 10).unwrap();
assert_eq!(runs.len(), 2);
// Newest first (higher id)
assert_eq!(runs[0].items_processed, 200);
assert_eq!(runs[1].items_processed, 100);
}
#[test]
fn test_fetch_recent_runs_respects_limit() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
for i in 0..5 {
insert_sync_run(
&conn,
now - (5 - i) * 60_000,
Some(now - (5 - i) * 60_000 + 30_000),
"succeeded",
"sync",
i * 10,
0,
None,
);
}
let runs = fetch_recent_runs(&conn, 3).unwrap();
assert_eq!(runs.len(), 3);
}
#[test]
fn test_fetch_recent_runs_duration_computed() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_sync_run(&conn, now - 60_000, Some(now - 15_000), "succeeded", "sync", 0, 0, None);
let runs = fetch_recent_runs(&conn, 10).unwrap();
assert_eq!(runs[0].duration_ms, Some(45_000));
}
#[test]
fn test_fetch_recent_runs_running_no_duration() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_sync_run(&conn, now - 60_000, None, "running", "sync", 0, 0, None);
let runs = fetch_recent_runs(&conn, 10).unwrap();
assert_eq!(runs[0].status, "running");
assert!(runs[0].duration_ms.is_none());
}
#[test]
fn test_fetch_recent_runs_failed_with_error() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_sync_run(
&conn,
now - 60_000,
Some(now - 30_000),
"failed",
"sync",
50,
3,
Some("network timeout"),
);
let runs = fetch_recent_runs(&conn, 10).unwrap();
assert_eq!(runs[0].status, "failed");
assert_eq!(runs[0].errors, 3);
assert_eq!(runs[0].error.as_deref(), Some("network timeout"));
}
// -----------------------------------------------------------------------
// fetch_configured_projects
// -----------------------------------------------------------------------
#[test]
fn test_fetch_configured_projects_empty() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let projects = fetch_configured_projects(&conn).unwrap();
assert!(projects.is_empty());
}
#[test]
fn test_fetch_configured_projects_sorted() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
insert_project(&conn, 1, "group/beta");
insert_project(&conn, 2, "group/alpha");
insert_project(&conn, 3, "other/gamma");
let projects = fetch_configured_projects(&conn).unwrap();
assert_eq!(projects, vec!["group/alpha", "group/beta", "other/gamma"]);
}
// -----------------------------------------------------------------------
// fetch_sync_overview (integration)
// -----------------------------------------------------------------------
#[test]
fn test_fetch_sync_overview_empty_db() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let clock = FakeClock::from_ms(1_700_000_000_000);
let overview = fetch_sync_overview(&conn, &clock).unwrap();
assert!(overview.running.is_none());
assert!(overview.last_completed.is_none());
assert!(overview.recent_runs.is_empty());
assert!(overview.projects.is_empty());
}
#[test]
fn test_fetch_sync_overview_with_history() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_project(&conn, 1, "group/repo");
insert_sync_run(&conn, now - 120_000, Some(now - 90_000), "succeeded", "sync", 150, 0, None);
insert_sync_run(&conn, now - 60_000, Some(now - 30_000), "failed", "sync", 50, 2, Some("db locked"));
let clock = FakeClock::from_ms(now);
let overview = fetch_sync_overview(&conn, &clock).unwrap();
assert!(overview.running.is_none());
assert_eq!(overview.recent_runs.len(), 2);
assert_eq!(overview.projects, vec!["group/repo"]);
// last_completed should be the newest completed run (failed, id=2)
let last = overview.last_completed.unwrap();
assert_eq!(last.status, "failed");
assert_eq!(last.errors, 2);
}
#[test]
fn test_fetch_sync_overview_with_running_sync() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_project(&conn, 1, "group/repo");
// A completed run.
insert_sync_run(&conn, now - 600_000, Some(now - 570_000), "succeeded", "sync", 200, 0, None);
// A currently running sync.
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, status, command, total_items_processed)
VALUES (?1, ?2, 'running', 'sync', 75)",
[now - 20_000, now - 2_000],
)
.unwrap();
let clock = FakeClock::from_ms(now);
let overview = fetch_sync_overview(&conn, &clock).unwrap();
assert!(overview.running.is_some());
let running = overview.running.unwrap();
assert_eq!(running.elapsed_ms, 20_000);
assert_eq!(running.items_processed, 75);
assert!(!running.stale);
// last_completed should find the succeeded run, not the running one.
let last = overview.last_completed.unwrap();
assert_eq!(last.status, "succeeded");
assert_eq!(last.items_processed, 200);
}
#[test]
fn test_sync_run_info_with_run_id() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command,
total_items_processed, total_errors, run_id)
VALUES (?1, ?1, ?2, 'succeeded', 'sync', 100, 0, 'abc-123')",
[now - 60_000, now - 30_000],
)
.unwrap();
let runs = fetch_recent_runs(&conn, 10).unwrap();
assert_eq!(runs[0].run_id.as_deref(), Some("abc-123"));
}
}

View File

@@ -64,10 +64,12 @@ pub fn fetch_timeline_events(
let filter = resolve_timeline_scope(conn, scope)?;
let mut events = Vec::new();
collect_tl_created_events(conn, &filter, &mut events)?;
collect_tl_state_events(conn, &filter, &mut events)?;
collect_tl_label_events(conn, &filter, &mut events)?;
collect_tl_milestone_events(conn, &filter, &mut events)?;
// Each collector is given the full limit. After merge-sorting, we truncate
// to `limit`. Worst case we hold 4*limit events in memory (bounded).
collect_tl_created_events(conn, &filter, limit, &mut events)?;
collect_tl_state_events(conn, &filter, limit, &mut events)?;
collect_tl_label_events(conn, &filter, limit, &mut events)?;
collect_tl_milestone_events(conn, &filter, limit, &mut events)?;
// Sort by timestamp descending (most recent first), with stable tiebreak.
events.sort_by(|a, b| {
@@ -85,11 +87,12 @@ pub fn fetch_timeline_events(
fn collect_tl_created_events(
conn: &Connection,
filter: &TimelineFilter,
limit: usize,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
// Issue created events.
if !matches!(filter, TimelineFilter::MergeRequest(_)) {
let (where_clause, params) = match filter {
let (where_clause, mut params) = match filter {
TimelineFilter::All => (
"1=1".to_string(),
Vec::<Box<dyn rusqlite::types::ToSql>>::new(),
@@ -105,12 +108,16 @@ fn collect_tl_created_events(
TimelineFilter::MergeRequest(_) => unreachable!(),
};
let limit_param = params.len() + 1;
let sql = format!(
"SELECT i.created_at, i.iid, i.title, i.author_username, i.project_id, p.path_with_namespace
FROM issues i
JOIN projects p ON p.id = i.project_id
WHERE {where_clause}"
WHERE {where_clause}
ORDER BY i.created_at DESC
LIMIT ?{limit_param}"
);
params.push(Box::new(limit as i64));
let mut stmt = conn
.prepare(&sql)
@@ -148,7 +155,7 @@ fn collect_tl_created_events(
// MR created events.
if !matches!(filter, TimelineFilter::Issue(_)) {
let (where_clause, params) = match filter {
let (where_clause, mut params) = match filter {
TimelineFilter::All => (
"1=1".to_string(),
Vec::<Box<dyn rusqlite::types::ToSql>>::new(),
@@ -164,12 +171,16 @@ fn collect_tl_created_events(
TimelineFilter::Issue(_) => unreachable!(),
};
let limit_param = params.len() + 1;
let sql = format!(
"SELECT mr.created_at, mr.iid, mr.title, mr.author_username, mr.project_id, p.path_with_namespace
FROM merge_requests mr
JOIN projects p ON p.id = mr.project_id
WHERE {where_clause}"
WHERE {where_clause}
ORDER BY mr.created_at DESC
LIMIT ?{limit_param}"
);
params.push(Box::new(limit as i64));
let mut stmt = conn.prepare(&sql).context("preparing MR created query")?;
let param_refs: Vec<&dyn rusqlite::types::ToSql> =
@@ -252,9 +263,11 @@ fn resolve_event_entity(
fn collect_tl_state_events(
conn: &Connection,
filter: &TimelineFilter,
limit: usize,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
let (where_clause, params) = resource_event_where(filter);
let (where_clause, mut params) = resource_event_where(filter);
let limit_param = params.len() + 1;
let sql = format!(
"SELECT e.created_at, e.state, e.actor_username,
@@ -266,8 +279,11 @@ fn collect_tl_state_events(
LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id
LEFT JOIN projects pi ON pi.id = i.project_id
LEFT JOIN projects pm ON pm.id = mr.project_id
WHERE {where_clause}"
WHERE {where_clause}
ORDER BY e.created_at DESC
LIMIT ?{limit_param}"
);
params.push(Box::new(limit as i64));
let mut stmt = conn.prepare(&sql).context("preparing state events query")?;
let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(AsRef::as_ref).collect();
@@ -338,9 +354,11 @@ fn collect_tl_state_events(
fn collect_tl_label_events(
conn: &Connection,
filter: &TimelineFilter,
limit: usize,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
let (where_clause, params) = resource_event_where(filter);
let (where_clause, mut params) = resource_event_where(filter);
let limit_param = params.len() + 1;
let sql = format!(
"SELECT e.created_at, e.action, e.label_name, e.actor_username,
@@ -352,8 +370,11 @@ fn collect_tl_label_events(
LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id
LEFT JOIN projects pi ON pi.id = i.project_id
LEFT JOIN projects pm ON pm.id = mr.project_id
WHERE {where_clause}"
WHERE {where_clause}
ORDER BY e.created_at DESC
LIMIT ?{limit_param}"
);
params.push(Box::new(limit as i64));
let mut stmt = conn.prepare(&sql).context("preparing label events query")?;
let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(AsRef::as_ref).collect();
@@ -426,9 +447,11 @@ fn collect_tl_label_events(
fn collect_tl_milestone_events(
conn: &Connection,
filter: &TimelineFilter,
limit: usize,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
let (where_clause, params) = resource_event_where(filter);
let (where_clause, mut params) = resource_event_where(filter);
let limit_param = params.len() + 1;
let sql = format!(
"SELECT e.created_at, e.action, e.milestone_title, e.actor_username,
@@ -440,8 +463,11 @@ fn collect_tl_milestone_events(
LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id
LEFT JOIN projects pi ON pi.id = i.project_id
LEFT JOIN projects pm ON pm.id = mr.project_id
WHERE {where_clause}"
WHERE {where_clause}
ORDER BY e.created_at DESC
LIMIT ?{limit_param}"
);
params.push(Box::new(limit as i64));
let mut stmt = conn
.prepare(&sql)

View File

@@ -38,20 +38,18 @@ pub fn fetch_trace(
/// Returns distinct `new_path` values scoped to the given project (or all
/// projects if `None`), sorted alphabetically.
pub fn fetch_known_paths(conn: &Connection, project_id: Option<i64>) -> Result<Vec<String>> {
let mut paths = if let Some(pid) = project_id {
let paths = if let Some(pid) = project_id {
let mut stmt = conn.prepare(
"SELECT DISTINCT new_path FROM mr_file_changes WHERE project_id = ?1 ORDER BY new_path",
)?;
let rows = stmt.query_map([pid], |row| row.get::<_, String>(0))?;
rows.filter_map(Result::ok).collect::<Vec<_>>()
rows.collect::<std::result::Result<Vec<_>, _>>()?
} else {
let mut stmt =
conn.prepare("SELECT DISTINCT new_path FROM mr_file_changes ORDER BY new_path")?;
let rows = stmt.query_map([], |row| row.get::<_, String>(0))?;
rows.filter_map(Result::ok).collect::<Vec<_>>()
rows.collect::<std::result::Result<Vec<_>, _>>()?
};
paths.sort();
paths.dedup();
Ok(paths)
}

View File

@@ -219,6 +219,8 @@ impl LoreApp {
"go_who" => self.navigate_to(Screen::Who),
"go_file_history" => self.navigate_to(Screen::FileHistory),
"go_trace" => self.navigate_to(Screen::Trace),
"go_doctor" => self.navigate_to(Screen::Doctor),
"go_stats" => self.navigate_to(Screen::Stats),
"go_sync" => {
if screen == &Screen::Bootstrap {
self.state.bootstrap.sync_started = true;
@@ -235,6 +237,19 @@ impl LoreApp {
self.navigation.jump_forward();
Cmd::none()
}
"toggle_scope" => {
if self.state.scope_picker.visible {
self.state.scope_picker.close();
Cmd::none()
} else {
// Fetch projects and open picker asynchronously.
Cmd::task(move || {
// The actual DB query runs in the task; for now, open
// immediately with cached projects if available.
Msg::ScopeProjectsLoaded { projects: vec![] }
})
}
}
"move_down" | "move_up" | "select_item" | "focus_filter" | "scroll_to_top" => {
// Screen-specific actions — delegated in future phases.
Cmd::none()
@@ -431,14 +446,37 @@ impl LoreApp {
Cmd::none()
}
// --- Sync lifecycle (Bootstrap auto-transition) ---
// --- Sync lifecycle ---
Msg::SyncStarted => {
self.state.sync.start();
if *self.navigation.current() == Screen::Bootstrap {
self.state.bootstrap.sync_started = true;
}
Cmd::none()
}
Msg::SyncCompleted { .. } => {
Msg::SyncProgress {
stage,
current,
total,
} => {
self.state.sync.update_progress(&stage, current, total);
Cmd::none()
}
Msg::SyncProgressBatch { stage, batch_size } => {
self.state.sync.update_batch(&stage, batch_size);
Cmd::none()
}
Msg::SyncLogLine(line) => {
self.state.sync.add_log_line(line);
Cmd::none()
}
Msg::SyncBackpressureDrop => {
// Silently drop — the coalescer already handles throttling.
Cmd::none()
}
Msg::SyncCompleted { elapsed_ms } => {
self.state.sync.complete(elapsed_ms);
// If we came from Bootstrap, replace nav history with Dashboard.
if *self.navigation.current() == Screen::Bootstrap {
self.state.bootstrap.sync_started = false;
@@ -456,6 +494,18 @@ impl LoreApp {
}
Cmd::none()
}
Msg::SyncCancelled => {
self.state.sync.cancel();
Cmd::none()
}
Msg::SyncFailed(err) => {
self.state.sync.fail(err);
Cmd::none()
}
Msg::SyncStreamStats { bytes, items } => {
self.state.sync.update_stream_stats(bytes, items);
Cmd::none()
}
// --- Who screen ---
Msg::WhoResultLoaded { generation, result } => {
@@ -511,6 +561,56 @@ impl LoreApp {
Cmd::none()
}
// --- Doctor ---
Msg::DoctorLoaded { checks } => {
self.state.doctor.apply_checks(checks);
self.state.set_loading(Screen::Doctor, LoadState::Idle);
Cmd::none()
}
// --- Stats ---
Msg::StatsLoaded { data } => {
self.state.stats.apply_data(data);
self.state.set_loading(Screen::Stats, LoadState::Idle);
Cmd::none()
}
// --- Timeline ---
Msg::TimelineLoaded { generation, events } => {
if self
.supervisor
.is_current(&TaskKey::LoadScreen(Screen::Timeline), generation)
{
self.state.timeline.apply_results(generation, events);
self.state.set_loading(Screen::Timeline, LoadState::Idle);
self.supervisor
.complete(&TaskKey::LoadScreen(Screen::Timeline), generation);
}
Cmd::none()
}
// --- Search ---
Msg::SearchExecuted { generation, results } => {
if self
.supervisor
.is_current(&TaskKey::LoadScreen(Screen::Search), generation)
{
self.state.search.apply_results(generation, results);
self.state.set_loading(Screen::Search, LoadState::Idle);
self.supervisor
.complete(&TaskKey::LoadScreen(Screen::Search), generation);
}
Cmd::none()
}
// --- Scope ---
Msg::ScopeProjectsLoaded { projects } => {
self.state
.scope_picker
.open(projects, &self.state.global_scope);
Cmd::none()
}
// All other message variants: no-op for now.
// Future phases will fill these in as screens are implemented.
_ => Cmd::none(),

View File

@@ -112,7 +112,7 @@ mod tests {
let cmd = reg.complete_sequence(
&KeyCode::Char('g'),
&Modifiers::NONE,
&KeyCode::Char('x'),
&KeyCode::Char('z'),
&Modifiers::NONE,
&Screen::Dashboard,
);

View File

@@ -213,6 +213,16 @@ pub fn build_registry() -> CommandRegistry {
available_in: ScreenFilter::Global,
available_in_text_mode: false,
},
CommandDef {
id: "toggle_scope",
label: "Project Scope",
keybinding: Some(KeyCombo::key(KeyCode::Char('P'))),
cli_equivalent: None,
help_text: "Toggle project scope filter",
status_hint: "P:scope",
available_in: ScreenFilter::Global,
available_in_text_mode: false,
},
// --- Navigation: g-prefix sequences ---
CommandDef {
id: "go_home",
@@ -284,6 +294,46 @@ pub fn build_registry() -> CommandRegistry {
available_in: ScreenFilter::Global,
available_in_text_mode: false,
},
CommandDef {
id: "go_file_history",
label: "Go to File History",
keybinding: Some(KeyCombo::g_then('f')),
cli_equivalent: Some("lore file-history"),
help_text: "Jump to file history",
status_hint: "gf:files",
available_in: ScreenFilter::Global,
available_in_text_mode: false,
},
CommandDef {
id: "go_trace",
label: "Go to Trace",
keybinding: Some(KeyCombo::g_then('r')),
cli_equivalent: Some("lore trace"),
help_text: "Jump to trace",
status_hint: "gr:trace",
available_in: ScreenFilter::Global,
available_in_text_mode: false,
},
CommandDef {
id: "go_doctor",
label: "Go to Doctor",
keybinding: Some(KeyCombo::g_then('d')),
cli_equivalent: Some("lore doctor"),
help_text: "Jump to environment health checks",
status_hint: "gd:doctor",
available_in: ScreenFilter::Global,
available_in_text_mode: false,
},
CommandDef {
id: "go_stats",
label: "Go to Stats",
keybinding: Some(KeyCombo::g_then('x')),
cli_equivalent: Some("lore stats"),
help_text: "Jump to database statistics",
status_hint: "gx:stats",
available_in: ScreenFilter::Global,
available_in_text_mode: false,
},
// --- Vim-style jump list ---
CommandDef {
id: "jump_back",

View File

@@ -72,15 +72,14 @@ impl<V> EntityCache<V> {
}
// Evict LRU if at capacity.
if self.entries.len() >= self.capacity {
if let Some(lru_key) = self
if self.entries.len() >= self.capacity
&& let Some(lru_key) = self
.entries
.iter()
.min_by_key(|(_, (_, t))| *t)
.map(|(k, _)| k.clone())
{
self.entries.remove(&lru_key);
}
{
self.entries.remove(&lru_key);
}
self.entries.insert(key, (value, tick));

View File

@@ -0,0 +1,202 @@
//! Single-instance advisory lock for the TUI.
//!
//! Prevents concurrent `lore-tui` launches from corrupting state.
//! Uses an advisory lock file with PID. Stale locks (dead PID) are
//! automatically recovered.
use std::fs;
use std::io::Write;
use std::path::{Path, PathBuf};
/// Advisory lock preventing concurrent TUI launches.
///
/// On `acquire()`, writes the current PID to the lock file.
/// On `Drop`, removes the lock file (best-effort).
#[derive(Debug)]
pub struct InstanceLock {
path: PathBuf,
}
/// Error returned when another instance is already running.
#[derive(Debug)]
pub struct LockConflict {
pub pid: u32,
pub path: PathBuf,
}
impl std::fmt::Display for LockConflict {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Another lore-tui instance is running (PID {}). Lock file: {}",
self.pid,
self.path.display()
)
}
}
impl std::error::Error for LockConflict {}
impl InstanceLock {
/// Try to acquire the instance lock.
///
/// - If the lock file doesn't exist, creates it with our PID.
/// - If the lock file exists with a live PID, returns `LockConflict`.
/// - If the lock file exists with a dead PID, removes the stale lock and acquires.
pub fn acquire(lock_dir: &Path) -> Result<Self, Box<dyn std::error::Error>> {
// Ensure lock directory exists.
fs::create_dir_all(lock_dir)?;
let path = lock_dir.join("tui.lock");
// Check for existing lock.
if path.exists() {
let contents = fs::read_to_string(&path).unwrap_or_default();
if let Ok(pid) = contents.trim().parse::<u32>()
&& is_process_alive(pid)
{
return Err(Box::new(LockConflict {
pid,
path: path.clone(),
}));
}
// Stale lock — PID is dead, or corrupt file. Remove and re-acquire.
fs::remove_file(&path)?;
}
// Write our PID.
let mut file = fs::File::create(&path)?;
write!(file, "{}", std::process::id())?;
file.sync_all()?;
Ok(Self { path })
}
/// Path to the lock file.
#[must_use]
pub fn path(&self) -> &Path {
&self.path
}
}
impl Drop for InstanceLock {
fn drop(&mut self) {
// Best-effort cleanup. If it fails, the stale lock will be
// recovered on next launch via the dead-PID check.
let _ = fs::remove_file(&self.path);
}
}
/// Check whether a process with the given PID is alive.
///
/// Uses `kill -0 <pid>` on Unix (exit 0 = alive, non-zero = dead).
/// On non-Unix, conservatively assumes alive.
#[cfg(unix)]
fn is_process_alive(pid: u32) -> bool {
std::process::Command::new("kill")
.args(["-0", &pid.to_string()])
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.is_ok_and(|s| s.success())
}
#[cfg(not(unix))]
fn is_process_alive(_pid: u32) -> bool {
// Conservative fallback: assume alive.
true
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_acquire_and_release() {
let dir = tempfile::tempdir().unwrap();
let lock_path = dir.path().join("tui.lock");
{
let _lock = InstanceLock::acquire(dir.path()).unwrap();
assert!(lock_path.exists());
// Lock file should contain our PID.
let contents = fs::read_to_string(&lock_path).unwrap();
assert_eq!(contents, format!("{}", std::process::id()));
}
// After drop, lock file should be removed.
assert!(!lock_path.exists());
}
#[test]
fn test_double_acquire_fails() {
let dir = tempfile::tempdir().unwrap();
let _lock = InstanceLock::acquire(dir.path()).unwrap();
// Second acquire should fail because our PID is still alive.
let result = InstanceLock::acquire(dir.path());
assert!(result.is_err());
let err = result.unwrap_err();
let conflict = err.downcast_ref::<LockConflict>().unwrap();
assert_eq!(conflict.pid, std::process::id());
}
#[test]
fn test_stale_lock_recovery() {
let dir = tempfile::tempdir().unwrap();
let lock_path = dir.path().join("tui.lock");
// Write a lock file with a dead PID (PID 1 is init, but PID 99999999
// almost certainly doesn't exist).
let dead_pid = 99_999_999u32;
fs::write(&lock_path, dead_pid.to_string()).unwrap();
// Should succeed — stale lock is recovered.
let _lock = InstanceLock::acquire(dir.path()).unwrap();
assert!(lock_path.exists());
// Lock file now contains our PID, not the dead one.
let contents = fs::read_to_string(&lock_path).unwrap();
assert_eq!(contents, format!("{}", std::process::id()));
}
#[test]
fn test_corrupt_lock_file_recovered() {
let dir = tempfile::tempdir().unwrap();
let lock_path = dir.path().join("tui.lock");
// Write garbage to the lock file.
fs::write(&lock_path, "not-a-pid").unwrap();
// Should succeed — corrupt lock is treated as stale.
let lock = InstanceLock::acquire(dir.path()).unwrap();
let contents = fs::read_to_string(lock.path()).unwrap();
assert_eq!(contents, format!("{}", std::process::id()));
}
#[test]
fn test_creates_lock_directory() {
let dir = tempfile::tempdir().unwrap();
let nested = dir.path().join("a").join("b").join("c");
let lock = InstanceLock::acquire(&nested).unwrap();
assert!(nested.join("tui.lock").exists());
drop(lock);
}
#[test]
fn test_lock_conflict_display() {
let conflict = LockConflict {
pid: 12345,
path: PathBuf::from("/tmp/tui.lock"),
};
let msg = format!("{conflict}");
assert!(msg.contains("12345"));
assert!(msg.contains("/tmp/tui.lock"));
}
}

View File

@@ -34,6 +34,12 @@ pub mod filter_dsl; // Filter DSL tokenizer for list screen filter bars (bd-18qs
// Phase 4 modules.
pub mod entity_cache; // Bounded LRU entity cache for detail view reopens (bd-2og9)
pub mod render_cache; // Bounded render cache for expensive per-frame computations (bd-2og9)
pub mod scope; // Global scope context: SQL helpers + project listing (bd-1ser)
// Phase 5 modules.
pub mod instance_lock; // Single-instance advisory lock for TUI (bd-3h00)
pub mod session; // Session state persistence: save/load/quarantine (bd-3h00)
pub mod text_width; // Unicode-aware text width measurement + truncation (bd-3h00)
/// Options controlling how the TUI launches.
#[derive(Debug, Clone)]

View File

@@ -307,6 +307,22 @@ pub enum Msg {
paths: Vec<String>,
},
// --- Scope ---
/// Projects loaded for the scope picker.
ScopeProjectsLoaded {
projects: Vec<crate::scope::ProjectInfo>,
},
// --- Doctor ---
DoctorLoaded {
checks: Vec<crate::state::doctor::HealthCheck>,
},
// --- Stats ---
StatsLoaded {
data: crate::state::stats::StatsData,
},
// --- Sync ---
SyncStarted,
SyncProgress {
@@ -397,6 +413,9 @@ impl Msg {
Self::TraceKnownPathsLoaded { .. } => "TraceKnownPathsLoaded",
Self::FileHistoryLoaded { .. } => "FileHistoryLoaded",
Self::FileHistoryKnownPathsLoaded { .. } => "FileHistoryKnownPathsLoaded",
Self::ScopeProjectsLoaded { .. } => "ScopeProjectsLoaded",
Self::DoctorLoaded { .. } => "DoctorLoaded",
Self::StatsLoaded { .. } => "StatsLoaded",
Self::SyncStarted => "SyncStarted",
Self::SyncProgress { .. } => "SyncProgress",
Self::SyncProgressBatch { .. } => "SyncProgressBatch",

View File

@@ -87,15 +87,14 @@ impl<V> RenderCache<V> {
return;
}
if self.entries.len() >= self.capacity {
if let Some(oldest_key) = self
if self.entries.len() >= self.capacity
&& let Some(oldest_key) = self
.entries
.iter()
.min_by_key(|(_, (_, t))| *t)
.map(|(k, _)| *k)
{
self.entries.remove(&oldest_key);
}
{
self.entries.remove(&oldest_key);
}
self.entries.insert(key, (value, tick));

View File

@@ -0,0 +1,155 @@
//! Global scope context helpers: SQL fragment generation and project listing.
//!
//! The [`ScopeContext`] struct lives in [`state::mod`] and holds the active
//! project filter. This module provides:
//!
//! - [`scope_filter_sql`] — generates a SQL WHERE clause fragment
//! - [`fetch_projects`] — lists available projects for the scope picker
//!
//! Action functions already accept `project_id: Option<i64>` — callers pass
//! `scope.project_id` directly. The helpers here are for screens that build
//! custom SQL or need the project list for UI.
use anyhow::{Context, Result};
use rusqlite::Connection;
/// Project metadata for the scope picker overlay.
#[derive(Debug, Clone)]
pub struct ProjectInfo {
/// Internal database ID (projects.id).
pub id: i64,
/// GitLab path (e.g., "group/repo").
pub path: String,
}
/// Generate a SQL WHERE clause fragment that filters by project_id.
///
/// Returns an empty string for `None` (all projects), or
/// `" AND {table_alias}.project_id = {id}"` for `Some(id)`.
///
/// The leading `AND` makes it safe to append to an existing WHERE clause.
///
/// # Examples
///
/// ```ignore
/// let filter = scope_filter_sql(Some(42), "mr");
/// assert_eq!(filter, " AND mr.project_id = 42");
///
/// let filter = scope_filter_sql(None, "mr");
/// assert_eq!(filter, "");
/// ```
#[must_use]
pub fn scope_filter_sql(project_id: Option<i64>, table_alias: &str) -> String {
match project_id {
Some(id) => format!(" AND {table_alias}.project_id = {id}"),
None => String::new(),
}
}
/// Fetch all projects from the database for the scope picker.
///
/// Returns projects sorted by path. Used to populate the scope picker
/// overlay when the user presses `P`.
pub fn fetch_projects(conn: &Connection) -> Result<Vec<ProjectInfo>> {
let mut stmt = conn
.prepare("SELECT id, path_with_namespace FROM projects ORDER BY path_with_namespace")
.context("preparing projects query")?;
let projects = stmt
.query_map([], |row| {
Ok(ProjectInfo {
id: row.get(0)?,
path: row.get(1)?,
})
})
.context("querying projects")?
.filter_map(std::result::Result::ok)
.collect();
Ok(projects)
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_scope_filter_sql_none_returns_empty() {
let sql = scope_filter_sql(None, "mr");
assert_eq!(sql, "");
}
#[test]
fn test_scope_filter_sql_some_returns_and_clause() {
let sql = scope_filter_sql(Some(42), "mr");
assert_eq!(sql, " AND mr.project_id = 42");
}
#[test]
fn test_scope_filter_sql_different_alias() {
let sql = scope_filter_sql(Some(7), "mfc");
assert_eq!(sql, " AND mfc.project_id = 7");
}
#[test]
fn test_fetch_projects_empty_db() {
let conn = Connection::open_in_memory().unwrap();
conn.execute_batch(
"CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
)",
)
.unwrap();
let projects = fetch_projects(&conn).unwrap();
assert!(projects.is_empty());
}
#[test]
fn test_fetch_projects_returns_sorted() {
let conn = Connection::open_in_memory().unwrap();
conn.execute_batch(
"CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
)",
)
.unwrap();
conn.execute(
"INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'z-group/repo')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (2, 200, 'a-group/repo')",
[],
)
.unwrap();
let projects = fetch_projects(&conn).unwrap();
assert_eq!(projects.len(), 2);
assert_eq!(projects[0].path, "a-group/repo");
assert_eq!(projects[0].id, 2);
assert_eq!(projects[1].path, "z-group/repo");
assert_eq!(projects[1].id, 1);
}
#[test]
fn test_scope_filter_sql_composable_in_query() {
// Verify the fragment works when embedded in a full SQL statement.
let project_id = Some(5);
let filter = scope_filter_sql(project_id, "mr");
let sql = format!(
"SELECT * FROM merge_requests mr WHERE mr.state = 'merged'{filter} ORDER BY mr.updated_at"
);
assert!(sql.contains("AND mr.project_id = 5"));
}
}

View File

@@ -0,0 +1,406 @@
//! Session state persistence — save on quit, restore on launch.
//!
//! Enables the TUI to resume where the user left off: current screen,
//! navigation history, filter state, scroll positions.
//!
//! ## File format
//!
//! `session.json` is a versioned JSON blob with a CRC32 checksum appended
//! as the last 8 hex characters. Writes are atomic (tmp → fsync → rename).
//! Corrupt files are quarantined, not deleted.
use std::fs;
use std::io::Write;
use std::path::Path;
use serde::{Deserialize, Serialize};
/// Maximum session file size (1 MB). Files larger than this are rejected.
const MAX_SESSION_SIZE: u64 = 1_024 * 1_024;
/// Current session format version. Bump when the schema changes.
const SESSION_VERSION: u32 = 1;
// ---------------------------------------------------------------------------
// Persisted screen (decoupled from message::Screen)
// ---------------------------------------------------------------------------
/// Lightweight screen identifier for serialization.
///
/// Decoupled from `message::Screen` so session persistence doesn't require
/// `Serialize`/`Deserialize` on core types.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(tag = "kind")]
pub enum PersistedScreen {
Dashboard,
IssueList,
IssueDetail { project_id: i64, iid: i64 },
MrList,
MrDetail { project_id: i64, iid: i64 },
Search,
Timeline,
Who,
Trace,
FileHistory,
Sync,
Stats,
Doctor,
}
// ---------------------------------------------------------------------------
// Session state
// ---------------------------------------------------------------------------
/// Versioned session state persisted to disk.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct SessionState {
/// Format version for migration.
pub version: u32,
/// Screen to restore on launch.
pub current_screen: PersistedScreen,
/// Navigation history (back stack).
pub nav_history: Vec<PersistedScreen>,
/// Per-screen filter text (screen name -> filter string).
pub filters: Vec<(String, String)>,
/// Per-screen scroll offset (screen name -> offset).
pub scroll_offsets: Vec<(String, u16)>,
/// Global scope project path filter (if set).
pub global_scope: Option<String>,
}
impl Default for SessionState {
fn default() -> Self {
Self {
version: SESSION_VERSION,
current_screen: PersistedScreen::Dashboard,
nav_history: Vec::new(),
filters: Vec::new(),
scroll_offsets: Vec::new(),
global_scope: None,
}
}
}
// ---------------------------------------------------------------------------
// Save / Load
// ---------------------------------------------------------------------------
/// Save session state atomically.
///
/// Writes to a temp file, fsyncs, appends CRC32 checksum, then renames
/// over the target path. This prevents partial writes on crash.
pub fn save_session(state: &SessionState, path: &Path) -> Result<(), SessionError> {
// Ensure parent directory exists.
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).map_err(|e| SessionError::Io(e.to_string()))?;
}
let json = serde_json::to_string_pretty(state)
.map_err(|e| SessionError::Serialize(e.to_string()))?;
// Check size before writing.
if json.len() as u64 > MAX_SESSION_SIZE {
return Err(SessionError::TooLarge {
size: json.len() as u64,
max: MAX_SESSION_SIZE,
});
}
// Compute CRC32 over the JSON payload.
let checksum = crc32fast::hash(json.as_bytes());
let payload = format!("{json}\n{checksum:08x}");
// Write to temp file, fsync, rename.
let tmp_path = path.with_extension("tmp");
let mut file =
fs::File::create(&tmp_path).map_err(|e| SessionError::Io(e.to_string()))?;
file.write_all(payload.as_bytes())
.map_err(|e| SessionError::Io(e.to_string()))?;
file.sync_all()
.map_err(|e| SessionError::Io(e.to_string()))?;
drop(file);
fs::rename(&tmp_path, path).map_err(|e| SessionError::Io(e.to_string()))?;
Ok(())
}
/// Load session state from disk.
///
/// Validates CRC32 checksum. On corruption, quarantines the file and
/// returns `SessionError::Corrupt`.
pub fn load_session(path: &Path) -> Result<SessionState, SessionError> {
if !path.exists() {
return Err(SessionError::NotFound);
}
// Check file size before reading.
let metadata = fs::metadata(path).map_err(|e| SessionError::Io(e.to_string()))?;
if metadata.len() > MAX_SESSION_SIZE {
quarantine(path)?;
return Err(SessionError::TooLarge {
size: metadata.len(),
max: MAX_SESSION_SIZE,
});
}
let raw = fs::read_to_string(path).map_err(|e| SessionError::Io(e.to_string()))?;
// Split: everything before the last newline is JSON, after is the checksum.
let (json, checksum_hex) = raw
.rsplit_once('\n')
.ok_or_else(|| SessionError::Corrupt("no checksum separator".into()))?;
// Validate checksum.
let expected = u32::from_str_radix(checksum_hex.trim(), 16)
.map_err(|_| SessionError::Corrupt("invalid checksum hex".into()))?;
let actual = crc32fast::hash(json.as_bytes());
if actual != expected {
quarantine(path)?;
return Err(SessionError::Corrupt(format!(
"CRC32 mismatch: expected {expected:08x}, got {actual:08x}"
)));
}
// Deserialize.
let state: SessionState = serde_json::from_str(json)
.map_err(|e| SessionError::Corrupt(format!("JSON parse error: {e}")))?;
// Version check — future-proof: reject newer versions, accept current.
if state.version > SESSION_VERSION {
return Err(SessionError::Corrupt(format!(
"session version {} is newer than supported ({})",
state.version, SESSION_VERSION
)));
}
Ok(state)
}
/// Move a corrupt session file to `.quarantine/` instead of deleting it.
fn quarantine(path: &Path) -> Result<(), SessionError> {
let quarantine_dir = path
.parent()
.unwrap_or(Path::new("."))
.join(".quarantine");
fs::create_dir_all(&quarantine_dir).map_err(|e| SessionError::Io(e.to_string()))?;
let filename = path
.file_name()
.unwrap_or_default()
.to_string_lossy()
.to_string();
let ts = chrono::Utc::now().format("%Y%m%d_%H%M%S");
let quarantine_path = quarantine_dir.join(format!("{filename}.{ts}"));
fs::rename(path, &quarantine_path).map_err(|e| SessionError::Io(e.to_string()))?;
Ok(())
}
// ---------------------------------------------------------------------------
// Errors
// ---------------------------------------------------------------------------
/// Session persistence errors.
#[derive(Debug, Clone, PartialEq)]
pub enum SessionError {
/// Session file not found (first launch).
NotFound,
/// File is corrupt (bad checksum, invalid JSON, etc.).
Corrupt(String),
/// File exceeds size limit.
TooLarge { size: u64, max: u64 },
/// I/O error.
Io(String),
/// Serialization error.
Serialize(String),
}
impl std::fmt::Display for SessionError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::NotFound => write!(f, "session file not found"),
Self::Corrupt(msg) => write!(f, "corrupt session: {msg}"),
Self::TooLarge { size, max } => {
write!(f, "session file too large ({size} bytes, max {max})")
}
Self::Io(msg) => write!(f, "session I/O error: {msg}"),
Self::Serialize(msg) => write!(f, "session serialization error: {msg}"),
}
}
}
impl std::error::Error for SessionError {}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
fn sample_state() -> SessionState {
SessionState {
version: SESSION_VERSION,
current_screen: PersistedScreen::IssueList,
nav_history: vec![PersistedScreen::Dashboard],
filters: vec![("IssueList".into(), "bug".into())],
scroll_offsets: vec![("IssueList".into(), 5)],
global_scope: Some("group/project".into()),
}
}
#[test]
fn test_session_roundtrip() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("session.json");
let state = sample_state();
save_session(&state, &path).unwrap();
let loaded = load_session(&path).unwrap();
assert_eq!(state, loaded);
}
#[test]
fn test_session_default_roundtrip() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("session.json");
let state = SessionState::default();
save_session(&state, &path).unwrap();
let loaded = load_session(&path).unwrap();
assert_eq!(state, loaded);
}
#[test]
fn test_session_not_found() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("nonexistent.json");
let result = load_session(&path);
assert_eq!(result.unwrap_err(), SessionError::NotFound);
}
#[test]
fn test_session_corruption_detected() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("session.json");
let state = sample_state();
save_session(&state, &path).unwrap();
// Tamper with the file — modify a byte in the JSON section.
let raw = fs::read_to_string(&path).unwrap();
let tampered = raw.replacen("IssueList", "MrList___", 1);
fs::write(&path, tampered).unwrap();
let result = load_session(&path);
assert!(matches!(result, Err(SessionError::Corrupt(_))));
}
#[test]
fn test_session_corruption_quarantines_file() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("session.json");
let state = sample_state();
save_session(&state, &path).unwrap();
// Tamper with the checksum line.
let raw = fs::read_to_string(&path).unwrap();
let tampered = format!("{}\ndeadbeef", raw.rsplit_once('\n').unwrap().0);
fs::write(&path, tampered).unwrap();
let _ = load_session(&path);
// Original file should be gone.
assert!(!path.exists());
// Quarantine directory should contain the file.
let quarantine_dir = dir.path().join(".quarantine");
assert!(quarantine_dir.exists());
let entries: Vec<_> = fs::read_dir(&quarantine_dir).unwrap().collect();
assert_eq!(entries.len(), 1);
}
#[test]
fn test_session_creates_parent_directory() {
let dir = tempfile::tempdir().unwrap();
let nested = dir.path().join("a").join("b").join("session.json");
let state = SessionState::default();
save_session(&state, &nested).unwrap();
assert!(nested.exists());
}
#[test]
fn test_session_persisted_screen_variants() {
let screens = vec![
PersistedScreen::Dashboard,
PersistedScreen::IssueList,
PersistedScreen::IssueDetail {
project_id: 1,
iid: 42,
},
PersistedScreen::MrList,
PersistedScreen::MrDetail {
project_id: 2,
iid: 99,
},
PersistedScreen::Search,
PersistedScreen::Timeline,
PersistedScreen::Who,
PersistedScreen::Trace,
PersistedScreen::FileHistory,
PersistedScreen::Sync,
PersistedScreen::Stats,
PersistedScreen::Doctor,
];
for screen in screens {
let state = SessionState {
current_screen: screen.clone(),
..SessionState::default()
};
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("session.json");
save_session(&state, &path).unwrap();
let loaded = load_session(&path).unwrap();
assert_eq!(state.current_screen, loaded.current_screen);
}
}
#[test]
fn test_session_max_size_enforced() {
let state = SessionState {
filters: (0..100_000)
.map(|i| (format!("key_{i}"), "x".repeat(100)))
.collect(),
..SessionState::default()
};
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("session.json");
let result = save_session(&state, &path);
assert!(matches!(result, Err(SessionError::TooLarge { .. })));
}
#[test]
fn test_session_atomic_write_no_partial() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("session.json");
let tmp_path = path.with_extension("tmp");
let state = sample_state();
save_session(&state, &path).unwrap();
// After save, no tmp file should remain.
assert!(!tmp_path.exists());
assert!(path.exists());
}
}

View File

@@ -0,0 +1,199 @@
#![allow(dead_code)]
//! Doctor screen state — health check results.
//!
//! Displays a list of environment health checks with pass/warn/fail
//! indicators. Checks are synchronous (config, DB, projects, FTS) —
//! network checks (GitLab auth, Ollama) are not run from the TUI.
// ---------------------------------------------------------------------------
// HealthStatus
// ---------------------------------------------------------------------------
/// Status of a single health check.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum HealthStatus {
Pass,
Warn,
Fail,
}
impl HealthStatus {
/// Human-readable label for display.
#[must_use]
pub fn label(self) -> &'static str {
match self {
Self::Pass => "PASS",
Self::Warn => "WARN",
Self::Fail => "FAIL",
}
}
}
// ---------------------------------------------------------------------------
// HealthCheck
// ---------------------------------------------------------------------------
/// A single health check result for display.
#[derive(Debug, Clone)]
pub struct HealthCheck {
/// Check category name (e.g., "Config", "Database").
pub name: String,
/// Pass/warn/fail status.
pub status: HealthStatus,
/// Human-readable detail (e.g., path, version, count).
pub detail: String,
}
// ---------------------------------------------------------------------------
// DoctorState
// ---------------------------------------------------------------------------
/// State for the Doctor screen.
#[derive(Debug, Default)]
pub struct DoctorState {
/// Health check results (empty until loaded).
pub checks: Vec<HealthCheck>,
/// Whether checks have been loaded at least once.
pub loaded: bool,
}
impl DoctorState {
/// Apply loaded health check results.
pub fn apply_checks(&mut self, checks: Vec<HealthCheck>) {
self.checks = checks;
self.loaded = true;
}
/// Overall status — worst status across all checks.
#[must_use]
pub fn overall_status(&self) -> HealthStatus {
if self.checks.iter().any(|c| c.status == HealthStatus::Fail) {
HealthStatus::Fail
} else if self.checks.iter().any(|c| c.status == HealthStatus::Warn) {
HealthStatus::Warn
} else {
HealthStatus::Pass
}
}
/// Count of checks by status.
#[must_use]
pub fn count_by_status(&self, status: HealthStatus) -> usize {
self.checks.iter().filter(|c| c.status == status).count()
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
fn sample_checks() -> Vec<HealthCheck> {
vec![
HealthCheck {
name: "Config".into(),
status: HealthStatus::Pass,
detail: "/home/user/.config/lore/config.json".into(),
},
HealthCheck {
name: "Database".into(),
status: HealthStatus::Pass,
detail: "schema v12".into(),
},
HealthCheck {
name: "Projects".into(),
status: HealthStatus::Warn,
detail: "0 projects configured".into(),
},
HealthCheck {
name: "FTS Index".into(),
status: HealthStatus::Fail,
detail: "No documents indexed".into(),
},
]
}
#[test]
fn test_default_state() {
let state = DoctorState::default();
assert!(state.checks.is_empty());
assert!(!state.loaded);
}
#[test]
fn test_apply_checks() {
let mut state = DoctorState::default();
state.apply_checks(sample_checks());
assert!(state.loaded);
assert_eq!(state.checks.len(), 4);
}
#[test]
fn test_overall_status_fail_wins() {
let mut state = DoctorState::default();
state.apply_checks(sample_checks());
assert_eq!(state.overall_status(), HealthStatus::Fail);
}
#[test]
fn test_overall_status_all_pass() {
let mut state = DoctorState::default();
state.apply_checks(vec![
HealthCheck {
name: "Config".into(),
status: HealthStatus::Pass,
detail: "ok".into(),
},
HealthCheck {
name: "Database".into(),
status: HealthStatus::Pass,
detail: "ok".into(),
},
]);
assert_eq!(state.overall_status(), HealthStatus::Pass);
}
#[test]
fn test_overall_status_warn_without_fail() {
let mut state = DoctorState::default();
state.apply_checks(vec![
HealthCheck {
name: "Config".into(),
status: HealthStatus::Pass,
detail: "ok".into(),
},
HealthCheck {
name: "Ollama".into(),
status: HealthStatus::Warn,
detail: "not running".into(),
},
]);
assert_eq!(state.overall_status(), HealthStatus::Warn);
}
#[test]
fn test_overall_status_empty_is_pass() {
let state = DoctorState::default();
assert_eq!(state.overall_status(), HealthStatus::Pass);
}
#[test]
fn test_count_by_status() {
let mut state = DoctorState::default();
state.apply_checks(sample_checks());
assert_eq!(state.count_by_status(HealthStatus::Pass), 2);
assert_eq!(state.count_by_status(HealthStatus::Warn), 1);
assert_eq!(state.count_by_status(HealthStatus::Fail), 1);
}
#[test]
fn test_health_status_labels() {
assert_eq!(HealthStatus::Pass.label(), "PASS");
assert_eq!(HealthStatus::Warn.label(), "WARN");
assert_eq!(HealthStatus::Fail.label(), "FAIL");
}
}

View File

@@ -4,6 +4,8 @@
//! Users enter a file path, toggle options (follow renames, merged only,
//! show discussions), and browse a chronological MR list.
use crate::text_width::{next_char_boundary, prev_char_boundary};
// ---------------------------------------------------------------------------
// FileHistoryState
// ---------------------------------------------------------------------------
@@ -225,24 +227,6 @@ impl FileHistoryState {
}
}
/// Find the byte offset of the previous char boundary.
fn prev_char_boundary(s: &str, pos: usize) -> usize {
let mut i = pos.saturating_sub(1);
while i > 0 && !s.is_char_boundary(i) {
i -= 1;
}
i
}
/// Find the byte offset of the next char boundary.
fn next_char_boundary(s: &str, pos: usize) -> usize {
let mut i = pos + 1;
while i < s.len() && !s.is_char_boundary(i) {
i += 1;
}
i
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------

View File

@@ -16,15 +16,19 @@
pub mod bootstrap;
pub mod command_palette;
pub mod dashboard;
pub mod doctor;
pub mod file_history;
pub mod issue_detail;
pub mod issue_list;
pub mod mr_detail;
pub mod mr_list;
pub mod search;
pub mod stats;
pub mod sync;
pub mod sync_delta_ledger;
pub mod timeline;
pub mod trace;
pub mod scope_picker;
pub mod who;
use std::collections::{HashMap, HashSet};
@@ -35,15 +39,18 @@ use crate::message::Screen;
pub use bootstrap::BootstrapState;
pub use command_palette::CommandPaletteState;
pub use dashboard::DashboardState;
pub use doctor::DoctorState;
pub use file_history::FileHistoryState;
pub use issue_detail::IssueDetailState;
pub use issue_list::IssueListState;
pub use mr_detail::MrDetailState;
pub use mr_list::MrListState;
pub use search::SearchState;
pub use stats::StatsState;
pub use sync::SyncState;
pub use timeline::TimelineState;
pub use trace::TraceState;
pub use scope_picker::ScopePickerState;
pub use who::WhoState;
// ---------------------------------------------------------------------------
@@ -171,17 +178,20 @@ pub struct AppState {
// Per-screen states.
pub bootstrap: BootstrapState,
pub dashboard: DashboardState,
pub doctor: DoctorState,
pub issue_list: IssueListState,
pub issue_detail: IssueDetailState,
pub mr_list: MrListState,
pub mr_detail: MrDetailState,
pub search: SearchState,
pub stats: StatsState,
pub timeline: TimelineState,
pub who: WhoState,
pub trace: TraceState,
pub file_history: FileHistoryState,
pub sync: SyncState,
pub command_palette: CommandPaletteState,
pub scope_picker: ScopePickerState,
// Cross-cutting state.
pub global_scope: ScopeContext,

View File

@@ -0,0 +1,234 @@
//! Scope picker overlay state.
//!
//! The scope picker lets users filter all screens to a specific project.
//! It appears as a modal overlay when the user presses `P`.
use crate::scope::ProjectInfo;
use crate::state::ScopeContext;
/// State for the scope picker overlay.
#[derive(Debug, Default)]
pub struct ScopePickerState {
/// Available projects (populated on open).
pub projects: Vec<ProjectInfo>,
/// Currently highlighted index (0 = "All Projects", 1..N = specific projects).
pub selected_index: usize,
/// Whether the picker overlay is visible.
pub visible: bool,
/// Scroll offset for long project lists.
pub scroll_offset: usize,
}
/// Max visible rows in the picker before scrolling kicks in.
const MAX_VISIBLE_ROWS: usize = 15;
impl ScopePickerState {
/// Open the picker with the given project list.
///
/// Pre-selects the row matching the current scope, or "All Projects" (index 0)
/// if no project filter is active.
pub fn open(&mut self, projects: Vec<ProjectInfo>, current_scope: &ScopeContext) {
self.projects = projects;
self.visible = true;
self.scroll_offset = 0;
// Pre-select the currently active scope.
self.selected_index = match current_scope.project_id {
None => 0, // "All Projects" row
Some(id) => self
.projects
.iter()
.position(|p| p.id == id)
.map_or(0, |i| i + 1), // +1 because index 0 is "All Projects"
};
self.ensure_visible();
}
/// Close the picker without changing scope.
pub fn close(&mut self) {
self.visible = false;
}
/// Move selection up.
pub fn select_prev(&mut self) {
if self.selected_index > 0 {
self.selected_index -= 1;
self.ensure_visible();
}
}
/// Move selection down.
pub fn select_next(&mut self) {
let max_index = self.projects.len(); // 0="All" + N projects
if self.selected_index < max_index {
self.selected_index += 1;
self.ensure_visible();
}
}
/// Confirm the current selection and return the new scope.
#[must_use]
pub fn confirm(&self) -> ScopeContext {
if self.selected_index == 0 {
// "All Projects"
ScopeContext {
project_id: None,
project_name: None,
}
} else {
let project = &self.projects[self.selected_index - 1];
ScopeContext {
project_id: Some(project.id),
project_name: Some(project.path.clone()),
}
}
}
/// Total number of rows (1 for "All" + project count).
#[must_use]
pub fn row_count(&self) -> usize {
1 + self.projects.len()
}
/// Ensure the selected index is within the visible scroll window.
fn ensure_visible(&mut self) {
if self.selected_index < self.scroll_offset {
self.scroll_offset = self.selected_index;
} else if self.selected_index >= self.scroll_offset + MAX_VISIBLE_ROWS {
self.scroll_offset = self.selected_index.saturating_sub(MAX_VISIBLE_ROWS - 1);
}
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
fn sample_projects() -> Vec<ProjectInfo> {
vec![
ProjectInfo {
id: 1,
path: "alpha/repo".into(),
},
ProjectInfo {
id: 2,
path: "beta/repo".into(),
},
ProjectInfo {
id: 3,
path: "gamma/repo".into(),
},
]
}
#[test]
fn test_open_no_scope_selects_all() {
let mut picker = ScopePickerState::default();
let scope = ScopeContext::default();
picker.open(sample_projects(), &scope);
assert!(picker.visible);
assert_eq!(picker.selected_index, 0); // "All Projects"
assert_eq!(picker.projects.len(), 3);
}
#[test]
fn test_open_with_scope_preselects_project() {
let mut picker = ScopePickerState::default();
let scope = ScopeContext {
project_id: Some(2),
project_name: Some("beta/repo".into()),
};
picker.open(sample_projects(), &scope);
assert_eq!(picker.selected_index, 2); // index 1 in projects = index 2 in picker
}
#[test]
fn test_select_prev_and_next() {
let mut picker = ScopePickerState::default();
picker.open(sample_projects(), &ScopeContext::default());
picker.select_next();
assert_eq!(picker.selected_index, 1);
picker.select_next();
assert_eq!(picker.selected_index, 2);
picker.select_prev();
assert_eq!(picker.selected_index, 1);
}
#[test]
fn test_select_prev_at_zero_stays() {
let mut picker = ScopePickerState::default();
picker.open(sample_projects(), &ScopeContext::default());
picker.select_prev();
assert_eq!(picker.selected_index, 0);
}
#[test]
fn test_select_next_at_max_stays() {
let mut picker = ScopePickerState::default();
picker.open(sample_projects(), &ScopeContext::default());
// 4 total rows (All + 3 projects), max index = 3
for _ in 0..10 {
picker.select_next();
}
assert_eq!(picker.selected_index, 3);
}
#[test]
fn test_confirm_all_projects() {
let mut picker = ScopePickerState::default();
picker.open(sample_projects(), &ScopeContext::default());
let scope = picker.confirm();
assert!(scope.project_id.is_none());
assert!(scope.project_name.is_none());
}
#[test]
fn test_confirm_specific_project() {
let mut picker = ScopePickerState::default();
picker.open(sample_projects(), &ScopeContext::default());
picker.select_next(); // index 1 = first project (alpha/repo, id=1)
let scope = picker.confirm();
assert_eq!(scope.project_id, Some(1));
assert_eq!(scope.project_name.as_deref(), Some("alpha/repo"));
}
#[test]
fn test_close_hides_picker() {
let mut picker = ScopePickerState::default();
picker.open(sample_projects(), &ScopeContext::default());
assert!(picker.visible);
picker.close();
assert!(!picker.visible);
}
#[test]
fn test_row_count() {
let mut picker = ScopePickerState::default();
picker.open(sample_projects(), &ScopeContext::default());
assert_eq!(picker.row_count(), 4); // "All" + 3 projects
}
#[test]
fn test_open_with_unknown_project_selects_all() {
let mut picker = ScopePickerState::default();
let scope = ScopeContext {
project_id: Some(999), // Not in list
project_name: Some("unknown".into()),
};
picker.open(sample_projects(), &scope);
assert_eq!(picker.selected_index, 0); // Falls back to "All"
}
}

View File

@@ -0,0 +1,153 @@
#![allow(dead_code)]
//! Stats screen state — database and index statistics.
//!
//! Shows entity counts, FTS coverage, embedding coverage, and queue
//! health. Data is produced by synchronous DB queries.
// ---------------------------------------------------------------------------
// StatsData
// ---------------------------------------------------------------------------
/// Database statistics for TUI display.
#[derive(Debug, Clone, Default)]
pub struct StatsData {
/// Total documents in the database.
pub total_documents: i64,
/// Issues stored.
pub issues: i64,
/// Merge requests stored.
pub merge_requests: i64,
/// Discussions stored.
pub discussions: i64,
/// Notes stored.
pub notes: i64,
/// Documents indexed in FTS.
pub fts_indexed: i64,
/// Documents with embeddings.
pub embedded_documents: i64,
/// Total embedding chunks.
pub total_chunks: i64,
/// Embedding coverage percentage (0.0100.0).
pub coverage_pct: f64,
/// Pending queue items (dirty sources).
pub queue_pending: i64,
/// Failed queue items.
pub queue_failed: i64,
}
impl StatsData {
/// FTS coverage percentage relative to total documents.
#[must_use]
pub fn fts_coverage_pct(&self) -> f64 {
if self.total_documents == 0 {
0.0
} else {
(self.fts_indexed as f64 / self.total_documents as f64) * 100.0
}
}
/// Whether there are pending queue items that need processing.
#[must_use]
pub fn has_queue_work(&self) -> bool {
self.queue_pending > 0 || self.queue_failed > 0
}
}
// ---------------------------------------------------------------------------
// StatsState
// ---------------------------------------------------------------------------
/// State for the Stats screen.
#[derive(Debug, Default)]
pub struct StatsState {
/// Statistics data (None until loaded).
pub data: Option<StatsData>,
/// Whether data has been loaded at least once.
pub loaded: bool,
}
impl StatsState {
/// Apply loaded stats data.
pub fn apply_data(&mut self, data: StatsData) {
self.data = Some(data);
self.loaded = true;
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
fn sample_stats() -> StatsData {
StatsData {
total_documents: 500,
issues: 200,
merge_requests: 150,
discussions: 100,
notes: 50,
fts_indexed: 450,
embedded_documents: 300,
total_chunks: 1200,
coverage_pct: 60.0,
queue_pending: 5,
queue_failed: 1,
}
}
#[test]
fn test_default_state() {
let state = StatsState::default();
assert!(state.data.is_none());
assert!(!state.loaded);
}
#[test]
fn test_apply_data() {
let mut state = StatsState::default();
state.apply_data(sample_stats());
assert!(state.loaded);
assert!(state.data.is_some());
}
#[test]
fn test_fts_coverage_pct() {
let stats = sample_stats();
let pct = stats.fts_coverage_pct();
assert!((pct - 90.0).abs() < 0.01); // 450/500 = 90%
}
#[test]
fn test_fts_coverage_pct_zero_documents() {
let stats = StatsData::default();
assert_eq!(stats.fts_coverage_pct(), 0.0);
}
#[test]
fn test_has_queue_work() {
let stats = sample_stats();
assert!(stats.has_queue_work());
}
#[test]
fn test_no_queue_work() {
let stats = StatsData {
queue_pending: 0,
queue_failed: 0,
..sample_stats()
};
assert!(!stats.has_queue_work());
}
#[test]
fn test_stats_data_default() {
let stats = StatsData::default();
assert_eq!(stats.total_documents, 0);
assert_eq!(stats.issues, 0);
assert_eq!(stats.coverage_pct, 0.0);
}
}

View File

@@ -1,15 +1,597 @@
#![allow(dead_code)]
//! Sync screen state.
//! Sync screen state: progress tracking, coalescing, and summary.
//!
//! The sync screen shows real-time progress during data synchronization
//! and transitions to a summary view when complete. A progress coalescer
//! prevents render thrashing from rapid progress updates.
use std::time::Instant;
// ---------------------------------------------------------------------------
// Sync lanes (entity types being synced)
// ---------------------------------------------------------------------------
/// Sync entity types that progress is tracked for.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum SyncLane {
Issues,
MergeRequests,
Discussions,
Notes,
Events,
Statuses,
}
impl SyncLane {
/// Human-readable label for this lane.
#[must_use]
pub fn label(self) -> &'static str {
match self {
Self::Issues => "Issues",
Self::MergeRequests => "MRs",
Self::Discussions => "Discussions",
Self::Notes => "Notes",
Self::Events => "Events",
Self::Statuses => "Statuses",
}
}
/// All lanes in display order.
pub const ALL: &'static [SyncLane] = &[
Self::Issues,
Self::MergeRequests,
Self::Discussions,
Self::Notes,
Self::Events,
Self::Statuses,
];
}
// ---------------------------------------------------------------------------
// Per-lane progress
// ---------------------------------------------------------------------------
/// Progress for a single sync lane.
#[derive(Debug, Clone, Default)]
pub struct LaneProgress {
/// Current items processed.
pub current: u64,
/// Total items expected (0 = unknown).
pub total: u64,
/// Whether this lane has completed.
pub done: bool,
}
impl LaneProgress {
/// Fraction complete (0.0..=1.0). Returns 0.0 if total is unknown.
#[must_use]
pub fn fraction(&self) -> f64 {
if self.total == 0 {
return 0.0;
}
(self.current as f64 / self.total as f64).clamp(0.0, 1.0)
}
}
// ---------------------------------------------------------------------------
// Sync summary
// ---------------------------------------------------------------------------
/// Per-entity-type change counts after sync completes.
#[derive(Debug, Clone, Default)]
pub struct EntityChangeCounts {
pub new: u64,
pub updated: u64,
}
/// Summary of a completed sync run.
#[derive(Debug, Clone, Default)]
pub struct SyncSummary {
pub issues: EntityChangeCounts,
pub merge_requests: EntityChangeCounts,
pub discussions: EntityChangeCounts,
pub notes: EntityChangeCounts,
pub elapsed_ms: u64,
/// Per-project errors (project path -> error message).
pub project_errors: Vec<(String, String)>,
}
impl SyncSummary {
/// Total number of changes across all entity types.
#[must_use]
pub fn total_changes(&self) -> u64 {
self.issues.new
+ self.issues.updated
+ self.merge_requests.new
+ self.merge_requests.updated
+ self.discussions.new
+ self.discussions.updated
+ self.notes.new
+ self.notes.updated
}
/// Whether any errors occurred during sync.
#[must_use]
pub fn has_errors(&self) -> bool {
!self.project_errors.is_empty()
}
}
// ---------------------------------------------------------------------------
// Sync screen mode
// ---------------------------------------------------------------------------
/// Display mode for the sync screen.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
pub enum SyncScreenMode {
/// Full-screen sync progress with per-lane bars.
#[default]
FullScreen,
/// Compact single-line progress for embedding in Bootstrap screen.
Inline,
}
// ---------------------------------------------------------------------------
// Sync phase
// ---------------------------------------------------------------------------
/// Current phase of the sync operation.
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub enum SyncPhase {
/// Sync hasn't started yet.
#[default]
Idle,
/// Sync is running.
Running,
/// Sync completed successfully.
Complete,
/// Sync was cancelled by user.
Cancelled,
/// Sync failed with an error.
Failed(String),
}
// ---------------------------------------------------------------------------
// Progress coalescer
// ---------------------------------------------------------------------------
/// Batches rapid progress updates to prevent render thrashing.
///
/// At most one update is emitted per `floor_ms`. Updates arriving faster
/// are coalesced — only the latest value survives.
#[derive(Debug)]
pub struct ProgressCoalescer {
/// Minimum interval between emitted updates.
floor_ms: u64,
/// Timestamp of the last emitted update.
last_emit: Option<Instant>,
/// Number of updates coalesced (dropped) since last emit.
coalesced_count: u64,
}
impl ProgressCoalescer {
/// Create a new coalescer with the given floor interval in milliseconds.
#[must_use]
pub fn new(floor_ms: u64) -> Self {
Self {
floor_ms,
last_emit: None,
coalesced_count: 0,
}
}
/// Default coalescer with 100ms floor (10 updates/second max).
#[must_use]
pub fn default_floor() -> Self {
Self::new(100)
}
/// Should this update be emitted?
///
/// Returns `true` if enough time has elapsed since the last emit.
/// The caller should only render/process the update when this returns true.
pub fn should_emit(&mut self) -> bool {
let now = Instant::now();
match self.last_emit {
None => {
self.last_emit = Some(now);
self.coalesced_count = 0;
true
}
Some(last) => {
let elapsed_ms = now.duration_since(last).as_millis() as u64;
if elapsed_ms >= self.floor_ms {
self.last_emit = Some(now);
self.coalesced_count = 0;
true
} else {
self.coalesced_count += 1;
false
}
}
}
}
/// Number of updates that have been coalesced since the last emit.
#[must_use]
pub fn coalesced_count(&self) -> u64 {
self.coalesced_count
}
/// Reset the coalescer (e.g., when sync restarts).
pub fn reset(&mut self) {
self.last_emit = None;
self.coalesced_count = 0;
}
}
// ---------------------------------------------------------------------------
// SyncState
// ---------------------------------------------------------------------------
/// State for the sync progress/summary screen.
#[derive(Debug, Default)]
#[derive(Debug)]
pub struct SyncState {
/// Current sync phase.
pub phase: SyncPhase,
/// Display mode (full screen vs inline).
pub mode: SyncScreenMode,
/// Per-lane progress (updated during Running phase).
pub lanes: [LaneProgress; 6],
/// Current stage label (e.g., "Fetching issues...").
pub stage: String,
pub current: u64,
pub total: u64,
/// Log lines from the sync process.
pub log_lines: Vec<String>,
pub completed: bool,
pub elapsed_ms: Option<u64>,
pub error: Option<String>,
/// Stream throughput stats (items per second).
pub items_per_sec: f64,
/// Bytes synced.
pub bytes_synced: u64,
/// Total items synced.
pub items_synced: u64,
/// When the current sync run started (for throughput calculation).
pub started_at: Option<Instant>,
/// Progress coalescer for render throttling.
pub coalescer: ProgressCoalescer,
/// Summary (populated after sync completes).
pub summary: Option<SyncSummary>,
/// Scroll offset for log lines view.
pub log_scroll_offset: usize,
}
impl Default for SyncState {
fn default() -> Self {
Self {
phase: SyncPhase::Idle,
mode: SyncScreenMode::FullScreen,
lanes: Default::default(),
stage: String::new(),
log_lines: Vec::new(),
items_per_sec: 0.0,
bytes_synced: 0,
items_synced: 0,
started_at: None,
coalescer: ProgressCoalescer::default_floor(),
summary: None,
log_scroll_offset: 0,
}
}
}
impl SyncState {
/// Reset state for a new sync run.
pub fn start(&mut self) {
self.phase = SyncPhase::Running;
self.lanes = Default::default();
self.stage.clear();
self.log_lines.clear();
self.items_per_sec = 0.0;
self.bytes_synced = 0;
self.items_synced = 0;
self.started_at = Some(Instant::now());
self.coalescer.reset();
self.summary = None;
self.log_scroll_offset = 0;
}
/// Apply a progress update for a specific lane.
pub fn update_progress(&mut self, stage: &str, current: u64, total: u64) {
self.stage = stage.to_string();
// Map stage name to lane index.
if let Some(lane) = self.lane_for_stage(stage) {
lane.current = current;
lane.total = total;
}
}
/// Apply a batch progress increment.
pub fn update_batch(&mut self, stage: &str, batch_size: u64) {
self.stage = stage.to_string();
if let Some(lane) = self.lane_for_stage(stage) {
lane.current += batch_size;
}
}
/// Mark sync as completed with summary.
pub fn complete(&mut self, elapsed_ms: u64) {
self.phase = SyncPhase::Complete;
// Mark all lanes as done.
for lane in &mut self.lanes {
lane.done = true;
}
// Build summary from lane data if not already set.
if self.summary.is_none() {
self.summary = Some(SyncSummary {
elapsed_ms,
..Default::default()
});
} else if let Some(ref mut summary) = self.summary {
summary.elapsed_ms = elapsed_ms;
}
}
/// Mark sync as cancelled.
pub fn cancel(&mut self) {
self.phase = SyncPhase::Cancelled;
}
/// Mark sync as failed.
pub fn fail(&mut self, error: String) {
self.phase = SyncPhase::Failed(error);
}
/// Add a log line.
pub fn add_log_line(&mut self, line: String) {
self.log_lines.push(line);
// Auto-scroll to bottom.
if self.log_lines.len() > 1 {
self.log_scroll_offset = self.log_lines.len().saturating_sub(20);
}
}
/// Update stream stats.
pub fn update_stream_stats(&mut self, bytes: u64, items: u64) {
self.bytes_synced = bytes;
self.items_synced = items;
// Compute actual throughput from elapsed time since sync start.
if items > 0 {
if let Some(started) = self.started_at {
let elapsed_secs = started.elapsed().as_secs_f64();
if elapsed_secs > 0.0 {
self.items_per_sec = items as f64 / elapsed_secs;
}
}
}
}
/// Whether sync is currently running.
#[must_use]
pub fn is_running(&self) -> bool {
self.phase == SyncPhase::Running
}
/// Overall progress fraction (average of all lanes).
#[must_use]
pub fn overall_progress(&self) -> f64 {
let active_lanes: Vec<&LaneProgress> =
self.lanes.iter().filter(|l| l.total > 0).collect();
if active_lanes.is_empty() {
return 0.0;
}
let sum: f64 = active_lanes.iter().map(|l| l.fraction()).sum();
sum / active_lanes.len() as f64
}
/// Map a stage name to the corresponding lane.
fn lane_for_stage(&mut self, stage: &str) -> Option<&mut LaneProgress> {
let lower = stage.to_lowercase();
let idx = if lower.contains("issue") {
Some(0)
} else if lower.contains("merge") || lower.contains("mr") {
Some(1)
} else if lower.contains("discussion") {
Some(2)
} else if lower.contains("note") {
Some(3)
} else if lower.contains("event") {
Some(4)
} else if lower.contains("status") {
Some(5)
} else {
None
};
idx.map(|i| &mut self.lanes[i])
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
use std::time::Duration;
#[test]
fn test_lane_progress_fraction() {
let lane = LaneProgress {
current: 50,
total: 100,
done: false,
};
assert!((lane.fraction() - 0.5).abs() < f64::EPSILON);
}
#[test]
fn test_lane_progress_fraction_zero_total() {
let lane = LaneProgress::default();
assert!((lane.fraction()).abs() < f64::EPSILON);
}
#[test]
fn test_sync_summary_total_changes() {
let summary = SyncSummary {
issues: EntityChangeCounts { new: 5, updated: 3 },
merge_requests: EntityChangeCounts { new: 2, updated: 1 },
..Default::default()
};
assert_eq!(summary.total_changes(), 11);
}
#[test]
fn test_sync_summary_has_errors() {
let mut summary = SyncSummary::default();
assert!(!summary.has_errors());
summary
.project_errors
.push(("grp/repo".into(), "timeout".into()));
assert!(summary.has_errors());
}
#[test]
fn test_sync_state_start_resets() {
let mut state = SyncState {
stage: "old".into(),
phase: SyncPhase::Complete,
..SyncState::default()
};
state.log_lines.push("old log".into());
state.start();
assert_eq!(state.phase, SyncPhase::Running);
assert!(state.stage.is_empty());
assert!(state.log_lines.is_empty());
}
#[test]
fn test_sync_state_update_progress() {
let mut state = SyncState::default();
state.start();
state.update_progress("Fetching issues", 10, 50);
assert_eq!(state.lanes[0].current, 10);
assert_eq!(state.lanes[0].total, 50);
assert_eq!(state.stage, "Fetching issues");
}
#[test]
fn test_sync_state_update_batch() {
let mut state = SyncState::default();
state.start();
state.update_batch("MR processing", 5);
state.update_batch("MR processing", 3);
assert_eq!(state.lanes[1].current, 8); // MR lane
}
#[test]
fn test_sync_state_complete() {
let mut state = SyncState::default();
state.start();
state.complete(5000);
assert_eq!(state.phase, SyncPhase::Complete);
assert!(state.summary.is_some());
assert_eq!(state.summary.as_ref().unwrap().elapsed_ms, 5000);
}
#[test]
fn test_sync_state_overall_progress() {
let mut state = SyncState::default();
state.start();
state.update_progress("issues", 50, 100);
state.update_progress("merge requests", 25, 100);
// Two active lanes: 0.5 and 0.25, average = 0.375
assert!((state.overall_progress() - 0.375).abs() < 0.01);
}
#[test]
fn test_sync_state_overall_progress_no_active_lanes() {
let state = SyncState::default();
assert!((state.overall_progress()).abs() < f64::EPSILON);
}
#[test]
fn test_progress_coalescer_first_always_emits() {
let mut coalescer = ProgressCoalescer::new(100);
assert!(coalescer.should_emit());
}
#[test]
fn test_progress_coalescer_rapid_updates_coalesced() {
let mut coalescer = ProgressCoalescer::new(100);
assert!(coalescer.should_emit()); // First always emits.
// Rapid-fire updates within 100ms should be coalesced.
let mut emitted = 0;
for _ in 0..50 {
if coalescer.should_emit() {
emitted += 1;
}
}
// With ~0ms between calls, at most 0-1 additional emits expected.
assert!(
emitted <= 1,
"Expected at most 1 emit, got {emitted}"
);
}
#[test]
fn test_progress_coalescer_emits_after_floor() {
let mut coalescer = ProgressCoalescer::new(50);
assert!(coalescer.should_emit());
// Wait longer than floor.
thread::sleep(Duration::from_millis(60));
assert!(coalescer.should_emit());
}
#[test]
fn test_progress_coalescer_reset() {
let mut coalescer = ProgressCoalescer::new(100);
coalescer.should_emit();
coalescer.should_emit(); // Coalesced.
coalescer.reset();
assert!(coalescer.should_emit()); // Fresh start.
}
#[test]
fn test_sync_lane_labels() {
assert_eq!(SyncLane::Issues.label(), "Issues");
assert_eq!(SyncLane::MergeRequests.label(), "MRs");
assert_eq!(SyncLane::Notes.label(), "Notes");
}
#[test]
fn test_sync_state_add_log_line() {
let mut state = SyncState::default();
state.add_log_line("line 1".into());
state.add_log_line("line 2".into());
assert_eq!(state.log_lines.len(), 2);
assert_eq!(state.log_lines[0], "line 1");
}
#[test]
fn test_sync_state_cancel() {
let mut state = SyncState::default();
state.start();
state.cancel();
assert_eq!(state.phase, SyncPhase::Cancelled);
}
#[test]
fn test_sync_state_fail() {
let mut state = SyncState::default();
state.start();
state.fail("network timeout".into());
assert!(matches!(state.phase, SyncPhase::Failed(_)));
}
}

View File

@@ -0,0 +1,222 @@
#![allow(dead_code)]
//! Sync delta ledger — records entity changes during a sync run.
//!
//! After sync completes, the dashboard and list screens can query the
//! ledger to highlight "new since last sync" items. The ledger is
//! ephemeral (per-run, not persisted to disk).
use std::collections::HashSet;
/// Kind of change that occurred to an entity during sync.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ChangeKind {
New,
Updated,
}
/// Entity type for the ledger.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LedgerEntityType {
Issue,
MergeRequest,
Discussion,
Note,
}
/// Per-run record of changed entity IDs during sync.
///
/// Used to highlight newly synced items in list/dashboard views.
#[derive(Debug, Default)]
pub struct SyncDeltaLedger {
pub new_issue_iids: HashSet<i64>,
pub updated_issue_iids: HashSet<i64>,
pub new_mr_iids: HashSet<i64>,
pub updated_mr_iids: HashSet<i64>,
pub new_discussion_count: u64,
pub updated_discussion_count: u64,
pub new_note_count: u64,
}
impl SyncDeltaLedger {
/// Record a change to an entity.
pub fn record_change(&mut self, entity_type: LedgerEntityType, iid: i64, kind: ChangeKind) {
match (entity_type, kind) {
(LedgerEntityType::Issue, ChangeKind::New) => {
self.new_issue_iids.insert(iid);
}
(LedgerEntityType::Issue, ChangeKind::Updated) => {
self.updated_issue_iids.insert(iid);
}
(LedgerEntityType::MergeRequest, ChangeKind::New) => {
self.new_mr_iids.insert(iid);
}
(LedgerEntityType::MergeRequest, ChangeKind::Updated) => {
self.updated_mr_iids.insert(iid);
}
(LedgerEntityType::Discussion, ChangeKind::New) => {
self.new_discussion_count += 1;
}
(LedgerEntityType::Discussion, ChangeKind::Updated) => {
self.updated_discussion_count += 1;
}
(LedgerEntityType::Note, ChangeKind::New) => {
self.new_note_count += 1;
}
(LedgerEntityType::Note, ChangeKind::Updated) => {
// Notes don't have a meaningful "updated" count.
}
}
}
/// Produce a summary of changes from this sync run.
#[must_use]
pub fn summary(&self) -> super::sync::SyncSummary {
use super::sync::{EntityChangeCounts, SyncSummary};
SyncSummary {
issues: EntityChangeCounts {
new: self.new_issue_iids.len() as u64,
updated: self.updated_issue_iids.len() as u64,
},
merge_requests: EntityChangeCounts {
new: self.new_mr_iids.len() as u64,
updated: self.updated_mr_iids.len() as u64,
},
discussions: EntityChangeCounts {
new: self.new_discussion_count,
updated: self.updated_discussion_count,
},
notes: EntityChangeCounts {
new: self.new_note_count,
updated: 0,
},
..Default::default()
}
}
/// Whether any entity was an issue IID that was newly added in this sync.
#[must_use]
pub fn is_new_issue(&self, iid: i64) -> bool {
self.new_issue_iids.contains(&iid)
}
/// Whether any entity was an MR IID that was newly added in this sync.
#[must_use]
pub fn is_new_mr(&self, iid: i64) -> bool {
self.new_mr_iids.contains(&iid)
}
/// Total changes recorded.
#[must_use]
pub fn total_changes(&self) -> u64 {
self.new_issue_iids.len() as u64
+ self.updated_issue_iids.len() as u64
+ self.new_mr_iids.len() as u64
+ self.updated_mr_iids.len() as u64
+ self.new_discussion_count
+ self.updated_discussion_count
+ self.new_note_count
}
/// Clear the ledger for a new sync run.
pub fn clear(&mut self) {
self.new_issue_iids.clear();
self.updated_issue_iids.clear();
self.new_mr_iids.clear();
self.updated_mr_iids.clear();
self.new_discussion_count = 0;
self.updated_discussion_count = 0;
self.new_note_count = 0;
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_record_new_issues() {
let mut ledger = SyncDeltaLedger::default();
ledger.record_change(LedgerEntityType::Issue, 1, ChangeKind::New);
ledger.record_change(LedgerEntityType::Issue, 2, ChangeKind::New);
ledger.record_change(LedgerEntityType::Issue, 3, ChangeKind::Updated);
assert_eq!(ledger.new_issue_iids.len(), 2);
assert_eq!(ledger.updated_issue_iids.len(), 1);
assert!(ledger.is_new_issue(1));
assert!(ledger.is_new_issue(2));
assert!(!ledger.is_new_issue(3));
}
#[test]
fn test_record_new_mrs() {
let mut ledger = SyncDeltaLedger::default();
ledger.record_change(LedgerEntityType::MergeRequest, 10, ChangeKind::New);
ledger.record_change(LedgerEntityType::MergeRequest, 20, ChangeKind::Updated);
assert!(ledger.is_new_mr(10));
assert!(!ledger.is_new_mr(20));
}
#[test]
fn test_summary_counts() {
let mut ledger = SyncDeltaLedger::default();
ledger.record_change(LedgerEntityType::Issue, 1, ChangeKind::New);
ledger.record_change(LedgerEntityType::Issue, 2, ChangeKind::New);
ledger.record_change(LedgerEntityType::Issue, 3, ChangeKind::Updated);
ledger.record_change(LedgerEntityType::MergeRequest, 10, ChangeKind::New);
ledger.record_change(LedgerEntityType::Discussion, 0, ChangeKind::New);
ledger.record_change(LedgerEntityType::Note, 0, ChangeKind::New);
let summary = ledger.summary();
assert_eq!(summary.issues.new, 2);
assert_eq!(summary.issues.updated, 1);
assert_eq!(summary.merge_requests.new, 1);
assert_eq!(summary.discussions.new, 1);
assert_eq!(summary.notes.new, 1);
}
#[test]
fn test_total_changes() {
let mut ledger = SyncDeltaLedger::default();
ledger.record_change(LedgerEntityType::Issue, 1, ChangeKind::New);
ledger.record_change(LedgerEntityType::MergeRequest, 10, ChangeKind::Updated);
ledger.record_change(LedgerEntityType::Note, 0, ChangeKind::New);
assert_eq!(ledger.total_changes(), 3);
}
#[test]
fn test_dedup_same_iid() {
let mut ledger = SyncDeltaLedger::default();
// Recording same IID twice should deduplicate.
ledger.record_change(LedgerEntityType::Issue, 1, ChangeKind::New);
ledger.record_change(LedgerEntityType::Issue, 1, ChangeKind::New);
assert_eq!(ledger.new_issue_iids.len(), 1);
}
#[test]
fn test_clear() {
let mut ledger = SyncDeltaLedger::default();
ledger.record_change(LedgerEntityType::Issue, 1, ChangeKind::New);
ledger.record_change(LedgerEntityType::Note, 0, ChangeKind::New);
ledger.clear();
assert_eq!(ledger.total_changes(), 0);
assert!(ledger.new_issue_iids.is_empty());
}
#[test]
fn test_empty_ledger_summary() {
let ledger = SyncDeltaLedger::default();
let summary = ledger.summary();
assert_eq!(summary.total_changes(), 0);
assert!(!summary.has_errors());
}
}

View File

@@ -9,6 +9,8 @@ use std::collections::HashSet;
use lore::core::trace::TraceResult;
use crate::text_width::{next_char_boundary, prev_char_boundary};
// ---------------------------------------------------------------------------
// TraceState
// ---------------------------------------------------------------------------
@@ -18,7 +20,7 @@ use lore::core::trace::TraceResult;
pub struct TraceState {
/// User-entered file path (with optional :line suffix).
pub path_input: String,
/// Cursor position within `path_input`.
/// Cursor position within `path_input` (byte offset).
pub path_cursor: usize,
/// Whether the path input field has keyboard focus.
pub path_focused: bool,
@@ -188,48 +190,35 @@ impl TraceState {
// --- Text editing helpers ---
/// Insert a character at the cursor position.
/// Insert a character at the cursor position (byte offset).
pub fn insert_char(&mut self, ch: char) {
let byte_pos = self
.path_input
.char_indices()
.nth(self.path_cursor)
.map_or(self.path_input.len(), |(i, _)| i);
self.path_input.insert(byte_pos, ch);
self.path_cursor += 1;
self.path_input.insert(self.path_cursor, ch);
self.path_cursor += ch.len_utf8();
self.update_autocomplete();
}
/// Delete the character before the cursor.
/// Delete the character before the cursor (byte offset).
pub fn delete_char_before_cursor(&mut self) {
if self.path_cursor == 0 {
return;
}
self.path_cursor -= 1;
let byte_pos = self
.path_input
.char_indices()
.nth(self.path_cursor)
.map_or(self.path_input.len(), |(i, _)| i);
let end = self
.path_input
.char_indices()
.nth(self.path_cursor + 1)
.map_or(self.path_input.len(), |(i, _)| i);
self.path_input.drain(byte_pos..end);
let prev = prev_char_boundary(&self.path_input, self.path_cursor);
self.path_input.drain(prev..self.path_cursor);
self.path_cursor = prev;
self.update_autocomplete();
}
/// Move cursor left.
/// Move cursor left (byte offset).
pub fn cursor_left(&mut self) {
self.path_cursor = self.path_cursor.saturating_sub(1);
if self.path_cursor > 0 {
self.path_cursor = prev_char_boundary(&self.path_input, self.path_cursor);
}
}
/// Move cursor right.
/// Move cursor right (byte offset).
pub fn cursor_right(&mut self) {
let max = self.path_input.chars().count();
if self.path_cursor < max {
self.path_cursor += 1;
if self.path_cursor < self.path_input.len() {
self.path_cursor = next_char_boundary(&self.path_input, self.path_cursor);
}
}
@@ -266,7 +255,7 @@ impl TraceState {
pub fn accept_autocomplete(&mut self) {
if let Some(match_) = self.autocomplete_matches.get(self.autocomplete_index) {
self.path_input = match_.clone();
self.path_cursor = self.path_input.chars().count();
self.path_cursor = self.path_input.len();
self.autocomplete_matches.clear();
}
}

View File

@@ -5,6 +5,8 @@
use lore::core::who_types::WhoResult;
use crate::text_width::{next_char_boundary, prev_char_boundary};
// ---------------------------------------------------------------------------
// WhoMode
// ---------------------------------------------------------------------------
@@ -291,24 +293,6 @@ impl WhoState {
}
}
/// Find the byte offset of the previous char boundary.
fn prev_char_boundary(s: &str, pos: usize) -> usize {
let mut i = pos.saturating_sub(1);
while i > 0 && !s.is_char_boundary(i) {
i -= 1;
}
i
}
/// Find the byte offset of the next char boundary.
fn next_char_boundary(s: &str, pos: usize) -> usize {
let mut i = pos + 1;
while i < s.len() && !s.is_char_boundary(i) {
i += 1;
}
i
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------

View File

@@ -0,0 +1,300 @@
//! Unicode-aware text width measurement and truncation.
//!
//! Terminal cells aren't 1:1 with bytes or even chars. CJK characters
//! occupy 2 cells, emoji ZWJ sequences are single grapheme clusters,
//! and combining marks have zero width. This module provides correct
//! measurement and truncation that never splits a grapheme cluster.
use unicode_segmentation::UnicodeSegmentation;
use unicode_width::UnicodeWidthStr;
/// Measure the display width of a string in terminal cells.
///
/// - ASCII characters: 1 cell each
/// - CJK characters: 2 cells each
/// - Emoji: varies (ZWJ sequences treated as grapheme clusters)
/// - Combining marks: 0 cells
#[must_use]
pub fn measure_display_width(s: &str) -> usize {
UnicodeWidthStr::width(s)
}
/// Truncate a string to fit within `max_width` terminal cells.
///
/// Appends an ellipsis character if truncation occurs. Never splits
/// a grapheme cluster — if appending the next cluster would exceed
/// the limit, it stops before that cluster.
///
/// The ellipsis itself occupies 1 cell of the budget.
#[must_use]
pub fn truncate_display_width(s: &str, max_width: usize) -> String {
let full_width = measure_display_width(s);
if full_width <= max_width {
return s.to_string();
}
if max_width == 0 {
return String::new();
}
// Reserve 1 cell for the ellipsis.
let budget = max_width.saturating_sub(1);
let mut result = String::new();
let mut used = 0;
for grapheme in s.graphemes(true) {
let gw = UnicodeWidthStr::width(grapheme);
if used + gw > budget {
break;
}
result.push_str(grapheme);
used += gw;
}
result.push('\u{2026}'); // ellipsis
result
}
/// Pad a string with trailing spaces to reach `width` terminal cells.
///
/// If the string is already wider than `width`, returns it unchanged.
#[must_use]
pub fn pad_display_width(s: &str, width: usize) -> String {
let current = measure_display_width(s);
if current >= width {
return s.to_string();
}
let padding = width - current;
let mut result = s.to_string();
for _ in 0..padding {
result.push(' ');
}
result
}
// ---------------------------------------------------------------------------
// Cursor / char-boundary helpers
// ---------------------------------------------------------------------------
/// Find the byte offset of the previous char boundary before `pos`.
///
/// Walks backwards from `pos - 1` until a valid char boundary is found.
/// Returns 0 if `pos` is 0 or 1.
pub(crate) fn prev_char_boundary(s: &str, pos: usize) -> usize {
let mut i = pos.saturating_sub(1);
while i > 0 && !s.is_char_boundary(i) {
i -= 1;
}
i
}
/// Find the byte offset of the next char boundary after `pos`.
///
/// Walks forward from `pos + 1` until a valid char boundary is found.
/// Returns `s.len()` if already at or past the end.
pub(crate) fn next_char_boundary(s: &str, pos: usize) -> usize {
let mut i = pos + 1;
while i < s.len() && !s.is_char_boundary(i) {
i += 1;
}
i
}
/// Convert a byte-offset cursor position to a display-column offset.
///
/// Snaps to the nearest char boundary at or before `cursor`, then counts
/// the number of characters from the start of the string to that point.
/// This gives the correct terminal column offset for cursor rendering.
pub(crate) fn cursor_cell_offset(text: &str, cursor: usize) -> u16 {
let mut idx = cursor.min(text.len());
while idx > 0 && !text.is_char_boundary(idx) {
idx -= 1;
}
text[..idx].chars().count().min(u16::MAX as usize) as u16
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
// --- measure_display_width ---
#[test]
fn test_measure_ascii() {
assert_eq!(measure_display_width("Hello"), 5);
}
#[test]
fn test_measure_empty() {
assert_eq!(measure_display_width(""), 0);
}
#[test]
fn test_measure_cjk_width() {
// TDD anchor from the bead spec
assert_eq!(measure_display_width("Hello"), 5);
assert_eq!(measure_display_width("\u{65E5}\u{672C}\u{8A9E}"), 6); // 日本語 = 3 chars * 2 cells
}
#[test]
fn test_measure_mixed_ascii_cjk() {
// "Hi日本" = 2 + 2 + 2 = 6
assert_eq!(measure_display_width("Hi\u{65E5}\u{672C}"), 6);
}
#[test]
fn test_measure_combining_marks() {
// e + combining acute accent = 1 cell (combining mark is 0-width)
assert_eq!(measure_display_width("e\u{0301}"), 1);
}
// --- truncate_display_width ---
#[test]
fn test_truncate_no_truncation_needed() {
assert_eq!(truncate_display_width("Hello", 10), "Hello");
}
#[test]
fn test_truncate_exact_fit() {
assert_eq!(truncate_display_width("Hello", 5), "Hello");
}
#[test]
fn test_truncate_ascii() {
// "Hello World" is 11 cells. Truncate to 8: budget=7 for text + 1 for ellipsis
let result = truncate_display_width("Hello World", 8);
assert_eq!(measure_display_width(&result), 8); // 7 chars + ellipsis
assert!(result.ends_with('\u{2026}'));
}
#[test]
fn test_truncate_cjk_no_split() {
// 日本語テスト = 6 chars * 2 cells = 12 cells
// Truncate to 5: budget=4 for text + 1 for ellipsis
// Can fit 2 CJK chars (4 cells), then ellipsis
let result = truncate_display_width("\u{65E5}\u{672C}\u{8A9E}\u{30C6}\u{30B9}\u{30C8}", 5);
assert!(result.ends_with('\u{2026}'));
assert!(measure_display_width(&result) <= 5);
}
#[test]
fn test_truncate_zero_width() {
assert_eq!(truncate_display_width("Hello", 0), "");
}
#[test]
fn test_truncate_width_one() {
// Only room for the ellipsis itself
let result = truncate_display_width("Hello", 1);
assert_eq!(result, "\u{2026}");
}
#[test]
fn test_truncate_emoji() {
// Family emoji (ZWJ sequence) — should not be split
let family = "\u{1F468}\u{200D}\u{1F469}\u{200D}\u{1F467}"; // 👨‍👩‍👧
let result = truncate_display_width(&format!("{family}Hello"), 3);
// The emoji grapheme cluster is > 1 cell; if it doesn't fit in budget,
// it should be skipped entirely, leaving just the ellipsis or less.
assert!(measure_display_width(&result) <= 3);
}
// --- pad_display_width ---
#[test]
fn test_pad_basic() {
let result = pad_display_width("Hi", 5);
assert_eq!(result, "Hi ");
assert_eq!(measure_display_width(&result), 5);
}
#[test]
fn test_pad_already_wide_enough() {
assert_eq!(pad_display_width("Hello", 3), "Hello");
}
#[test]
fn test_pad_exact_width() {
assert_eq!(pad_display_width("Hello", 5), "Hello");
}
#[test]
fn test_pad_cjk() {
// 日本 = 4 cells, pad to 6 = 2 spaces
let result = pad_display_width("\u{65E5}\u{672C}", 6);
assert_eq!(measure_display_width(&result), 6);
assert!(result.ends_with(" "));
}
// --- prev_char_boundary / next_char_boundary ---
#[test]
fn test_prev_char_boundary_ascii() {
assert_eq!(prev_char_boundary("hello", 3), 2);
assert_eq!(prev_char_boundary("hello", 1), 0);
}
#[test]
fn test_prev_char_boundary_at_zero() {
assert_eq!(prev_char_boundary("hello", 0), 0);
}
#[test]
fn test_prev_char_boundary_multibyte() {
// "aé" = 'a' (1 byte) + 'é' (2 bytes) = 3 bytes total
let s = "a\u{00E9}b";
// Position 3 = start of 'b', prev boundary = 1 (start of 'é')
assert_eq!(prev_char_boundary(s, 3), 1);
// Position 2 = mid-'é' byte, should snap to 1
assert_eq!(prev_char_boundary(s, 2), 1);
}
#[test]
fn test_next_char_boundary_ascii() {
assert_eq!(next_char_boundary("hello", 0), 1);
assert_eq!(next_char_boundary("hello", 3), 4);
}
#[test]
fn test_next_char_boundary_multibyte() {
// "aé" = 'a' (1 byte) + 'é' (2 bytes)
let s = "a\u{00E9}b";
// Position 1 = start of 'é', next boundary = 3 (start of 'b')
assert_eq!(next_char_boundary(s, 1), 3);
}
#[test]
fn test_next_char_boundary_at_end() {
assert_eq!(next_char_boundary("hi", 2), 3);
}
// --- cursor_cell_offset ---
#[test]
fn test_cursor_cell_offset_ascii() {
assert_eq!(cursor_cell_offset("hello", 0), 0);
assert_eq!(cursor_cell_offset("hello", 3), 3);
assert_eq!(cursor_cell_offset("hello", 5), 5);
}
#[test]
fn test_cursor_cell_offset_multibyte() {
// "aéb" = byte offsets: a=0, é=1..3, b=3
let s = "a\u{00E9}b";
assert_eq!(cursor_cell_offset(s, 0), 0); // before 'a'
assert_eq!(cursor_cell_offset(s, 1), 1); // after 'a', before 'é'
assert_eq!(cursor_cell_offset(s, 2), 1); // mid-'é', snaps back to 1
assert_eq!(cursor_cell_offset(s, 3), 2); // after 'é', before 'b'
assert_eq!(cursor_cell_offset(s, 4), 3); // after 'b'
}
#[test]
fn test_cursor_cell_offset_beyond_end() {
assert_eq!(cursor_cell_offset("hi", 99), 2);
}
}

View File

@@ -26,3 +26,18 @@ pub use filter_bar::{FilterBarColors, FilterBarState, render_filter_bar};
pub use help_overlay::render_help_overlay;
pub use loading::render_loading;
pub use status_bar::render_status_bar;
/// Truncate a string to at most `max_chars` display characters.
///
/// Uses Unicode ellipsis `…` for truncation. If `max_chars` is too small
/// for an ellipsis (<=1), just truncates without one.
pub fn truncate_str(s: &str, max_chars: usize) -> String {
if s.chars().count() <= max_chars {
s.to_string()
} else if max_chars <= 1 {
s.chars().take(max_chars).collect()
} else {
let truncated: String = s.chars().take(max_chars.saturating_sub(1)).collect();
format!("{truncated}\u{2026}")
}
}

View File

@@ -0,0 +1,289 @@
//! Doctor screen view — health check results.
//!
//! Renders a vertical list of health checks with colored status
//! indicators (green PASS, yellow WARN, red FAIL).
use ftui::core::geometry::Rect;
use ftui::render::cell::{Cell, PackedRgba};
use ftui::render::drawing::Draw;
use ftui::render::frame::Frame;
use crate::state::doctor::{DoctorState, HealthStatus};
use super::{TEXT, TEXT_MUTED};
/// Pass green.
const PASS_FG: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39);
/// Warning yellow.
const WARN_FG: PackedRgba = PackedRgba::rgb(0xD0, 0xA2, 0x15);
/// Fail red.
const FAIL_FG: PackedRgba = PackedRgba::rgb(0xD1, 0x4D, 0x41);
// ---------------------------------------------------------------------------
// Public entry point
// ---------------------------------------------------------------------------
/// Render the doctor screen.
pub fn render_doctor(frame: &mut Frame<'_>, state: &DoctorState, area: Rect) {
if area.width < 10 || area.height < 3 {
return;
}
let max_x = area.right();
if !state.loaded {
// Not yet loaded — show centered prompt.
let msg = "Loading health checks...";
let x = area.x + area.width.saturating_sub(msg.len() as u16) / 2;
let y = area.y + area.height / 2;
frame.print_text_clipped(
x,
y,
msg,
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
return;
}
// Title.
let overall = state.overall_status();
let title_fg = status_color(overall);
let title = format!("Doctor — {}", overall.label());
frame.print_text_clipped(
area.x + 2,
area.y + 1,
&title,
Cell {
fg: title_fg,
..Cell::default()
},
max_x,
);
// Summary line.
let pass_count = state.count_by_status(HealthStatus::Pass);
let warn_count = state.count_by_status(HealthStatus::Warn);
let fail_count = state.count_by_status(HealthStatus::Fail);
let summary = format!(
"{} passed, {} warnings, {} failed",
pass_count, warn_count, fail_count
);
frame.print_text_clipped(
area.x + 2,
area.y + 2,
&summary,
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
// Health check rows.
let rows_start_y = area.y + 4;
let name_width = 16u16;
for (i, check) in state.checks.iter().enumerate() {
let y = rows_start_y + i as u16;
if y >= area.bottom().saturating_sub(2) {
break;
}
// Status badge.
let badge = format!("[{}]", check.status.label());
let badge_fg = status_color(check.status);
frame.print_text_clipped(
area.x + 2,
y,
&badge,
Cell {
fg: badge_fg,
..Cell::default()
},
max_x,
);
// Check name.
let name_x = area.x + 2 + 7; // "[PASS] " = 7 chars
let name = format!("{:<width$}", check.name, width = name_width as usize);
frame.print_text_clipped(
name_x,
y,
&name,
Cell {
fg: TEXT,
..Cell::default()
},
max_x,
);
// Detail text.
let detail_x = name_x + name_width;
let max_detail = area.right().saturating_sub(detail_x + 1) as usize;
let detail = if check.detail.len() > max_detail {
format!(
"{}...",
&check.detail[..check.detail.floor_char_boundary(max_detail.saturating_sub(3))]
)
} else {
check.detail.clone()
};
frame.print_text_clipped(
detail_x,
y,
&detail,
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
}
// Hint at bottom.
let hint_y = area.bottom().saturating_sub(1);
frame.print_text_clipped(
area.x + 2,
hint_y,
"Esc: back | lore doctor (full check)",
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
}
/// Map health status to a display color.
fn status_color(status: HealthStatus) -> PackedRgba {
match status {
HealthStatus::Pass => PASS_FG,
HealthStatus::Warn => WARN_FG,
HealthStatus::Fail => FAIL_FG,
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::state::doctor::HealthCheck;
use ftui::render::grapheme_pool::GraphemePool;
macro_rules! with_frame {
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
let mut pool = GraphemePool::new();
let mut $frame = Frame::new($width, $height, &mut pool);
$body
}};
}
fn sample_checks() -> Vec<HealthCheck> {
vec![
HealthCheck {
name: "Config".into(),
status: HealthStatus::Pass,
detail: "/home/user/.config/lore/config.json".into(),
},
HealthCheck {
name: "Database".into(),
status: HealthStatus::Pass,
detail: "schema v12".into(),
},
HealthCheck {
name: "Projects".into(),
status: HealthStatus::Warn,
detail: "0 projects configured".into(),
},
HealthCheck {
name: "FTS Index".into(),
status: HealthStatus::Fail,
detail: "No documents indexed".into(),
},
]
}
#[test]
fn test_render_not_loaded() {
with_frame!(80, 24, |frame| {
let state = DoctorState::default();
let area = frame.bounds();
render_doctor(&mut frame, &state, area);
});
}
#[test]
fn test_render_with_checks() {
with_frame!(80, 24, |frame| {
let mut state = DoctorState::default();
state.apply_checks(sample_checks());
let area = frame.bounds();
render_doctor(&mut frame, &state, area);
});
}
#[test]
fn test_render_all_pass() {
with_frame!(80, 24, |frame| {
let mut state = DoctorState::default();
state.apply_checks(vec![HealthCheck {
name: "Config".into(),
status: HealthStatus::Pass,
detail: "ok".into(),
}]);
let area = frame.bounds();
render_doctor(&mut frame, &state, area);
});
}
#[test]
fn test_render_tiny_terminal() {
with_frame!(8, 2, |frame| {
let mut state = DoctorState::default();
state.apply_checks(sample_checks());
let area = frame.bounds();
render_doctor(&mut frame, &state, area);
// Should not panic.
});
}
#[test]
fn test_render_narrow_terminal_truncates() {
with_frame!(40, 20, |frame| {
let mut state = DoctorState::default();
state.apply_checks(vec![HealthCheck {
name: "Database".into(),
status: HealthStatus::Pass,
detail: "This is a very long detail string that should be truncated".into(),
}]);
let area = frame.bounds();
render_doctor(&mut frame, &state, area);
});
}
#[test]
fn test_render_many_checks_clips() {
with_frame!(80, 10, |frame| {
let mut state = DoctorState::default();
let mut checks = Vec::new();
for i in 0..20 {
checks.push(HealthCheck {
name: format!("Check {i}"),
status: HealthStatus::Pass,
detail: "ok".into(),
});
}
state.apply_checks(checks);
let area = frame.bounds();
render_doctor(&mut frame, &state, area);
// Should clip without panicking.
});
}
}

View File

@@ -22,6 +22,7 @@ use ftui::render::drawing::Draw;
use ftui::render::frame::Frame;
use crate::state::file_history::{FileHistoryResult, FileHistoryState};
use super::common::truncate_str;
// ---------------------------------------------------------------------------
// Colors (Flexoki palette)
@@ -136,7 +137,8 @@ fn render_path_input(frame: &mut Frame<'_>, state: &FileHistoryState, x: u16, y:
// Cursor indicator.
if state.path_focused {
let cursor_x = after_label + state.path_cursor as u16;
let cursor_col = state.path_input[..state.path_cursor].chars().count() as u16;
let cursor_x = after_label + cursor_col;
if cursor_x < max_x {
let cursor_cell = Cell {
fg: PackedRgba::rgb(0x10, 0x0F, 0x0F), // dark bg
@@ -446,16 +448,6 @@ fn render_hint_bar(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) {
frame.print_text_clipped(x + 1, y, hints, style, max_x);
}
/// Truncate a string to at most `max_chars` display characters.
fn truncate_str(s: &str, max_chars: usize) -> String {
if s.chars().count() <= max_chars {
s.to_string()
} else {
let truncated: String = s.chars().take(max_chars.saturating_sub(1)).collect();
format!("{truncated}")
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------

View File

@@ -10,6 +10,7 @@ pub mod bootstrap;
pub mod command_palette;
pub mod common;
pub mod dashboard;
pub mod doctor;
pub mod file_history;
pub mod issue_detail;
pub mod issue_list;
@@ -18,11 +19,13 @@ pub mod mr_list;
pub mod search;
pub mod timeline;
pub mod trace;
pub mod scope_picker;
pub mod stats;
pub mod sync;
pub mod who;
use ftui::layout::{Constraint, Flex};
use ftui::render::cell::{Cell, PackedRgba};
use ftui::render::drawing::Draw;
use ftui::render::cell::PackedRgba;
use ftui::render::frame::Frame;
use crate::app::LoreApp;
@@ -34,6 +37,7 @@ use common::{
render_breadcrumb, render_error_toast, render_help_overlay, render_loading, render_status_bar,
};
use dashboard::render_dashboard;
use doctor::render_doctor;
use file_history::render_file_history;
use issue_detail::render_issue_detail;
use issue_list::render_issue_list;
@@ -42,6 +46,9 @@ use mr_list::render_mr_list;
use search::render_search;
use timeline::render_timeline;
use trace::render_trace;
use scope_picker::render_scope_picker;
use stats::render_stats;
use sync::render_sync;
use who::render_who;
// ---------------------------------------------------------------------------
@@ -56,41 +63,6 @@ const ERROR_BG: PackedRgba = PackedRgba::rgb(0xAF, 0x3A, 0x29); // red
const ERROR_FG: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx
const BORDER: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2
fn render_sync_placeholder(frame: &mut Frame<'_>, area: ftui::core::geometry::Rect) {
if area.width < 10 || area.height < 5 {
return;
}
let max_x = area.right();
let center_y = area.y + area.height / 2;
let title = "Sync";
let title_x = area.x + area.width.saturating_sub(title.len() as u16) / 2;
frame.print_text_clipped(
title_x,
center_y.saturating_sub(1),
title,
Cell {
fg: ACCENT,
..Cell::default()
},
max_x,
);
let body = "Run `lore sync` in another terminal.";
let body_x = area.x + area.width.saturating_sub(body.len() as u16) / 2;
frame.print_text_clipped(
body_x,
center_y + 1,
body,
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
}
// ---------------------------------------------------------------------------
// render_screen
// ---------------------------------------------------------------------------
@@ -144,7 +116,7 @@ pub fn render_screen(frame: &mut Frame<'_>, app: &LoreApp) {
if screen == &Screen::Bootstrap {
render_bootstrap(frame, &app.state.bootstrap, content_area);
} else if screen == &Screen::Sync {
render_sync_placeholder(frame, content_area);
render_sync(frame, &app.state.sync, content_area);
} else if screen == &Screen::Dashboard {
render_dashboard(frame, &app.state.dashboard, content_area);
} else if screen == &Screen::IssueList {
@@ -165,6 +137,10 @@ pub fn render_screen(frame: &mut Frame<'_>, app: &LoreApp) {
render_file_history(frame, &app.state.file_history, content_area);
} else if screen == &Screen::Trace {
render_trace(frame, &app.state.trace, content_area);
} else if screen == &Screen::Doctor {
render_doctor(frame, &app.state.doctor, content_area);
} else if screen == &Screen::Stats {
render_stats(frame, &app.state.stats, content_area);
}
// --- Status bar ---
@@ -189,6 +165,14 @@ pub fn render_screen(frame: &mut Frame<'_>, app: &LoreApp) {
// Command palette overlay.
render_command_palette(frame, &app.state.command_palette, bounds);
// Scope picker overlay.
render_scope_picker(
frame,
&app.state.scope_picker,
&app.state.global_scope,
bounds,
);
// Help overlay.
if app.state.show_help {
render_help_overlay(
@@ -279,7 +263,7 @@ mod tests {
});
assert!(
has_content,
"Expected sync placeholder content in center area"
"Expected sync idle content in center area"
);
});
}

View File

@@ -0,0 +1,276 @@
//! Scope picker overlay — modal project filter selector.
//!
//! Renders a centered modal listing all available projects. The user
//! selects "All Projects" or a specific project to filter all screens.
use ftui::core::geometry::Rect;
use ftui::render::cell::{Cell, PackedRgba};
use ftui::render::drawing::{BorderChars, Draw};
use ftui::render::frame::Frame;
use crate::state::scope_picker::ScopePickerState;
use crate::state::ScopeContext;
use super::{ACCENT, BG_SURFACE, BORDER, TEXT, TEXT_MUTED};
/// Selection highlight background.
const SELECTION_BG: PackedRgba = PackedRgba::rgb(0x3A, 0x3A, 0x34);
// ---------------------------------------------------------------------------
// render_scope_picker
// ---------------------------------------------------------------------------
/// Render the scope picker overlay centered on the screen.
///
/// Only renders if `state.visible`. The modal is 50% width, up to 40x20.
pub fn render_scope_picker(
frame: &mut Frame<'_>,
state: &ScopePickerState,
current_scope: &ScopeContext,
area: Rect,
) {
if !state.visible {
return;
}
if area.height < 5 || area.width < 20 {
return;
}
// Modal dimensions.
let modal_width = (area.width / 2).clamp(25, 40);
let row_count = state.row_count();
// +3 for border top, title gap, border bottom.
let modal_height = ((row_count + 3) as u16).clamp(5, 20).min(area.height - 2);
let modal_x = area.x + (area.width.saturating_sub(modal_width)) / 2;
let modal_y = area.y + (area.height.saturating_sub(modal_height)) / 2;
let modal_rect = Rect::new(modal_x, modal_y, modal_width, modal_height);
// Clear background.
let bg_cell = Cell {
fg: TEXT,
bg: BG_SURFACE,
..Cell::default()
};
for y in modal_rect.y..modal_rect.bottom() {
for x in modal_rect.x..modal_rect.right() {
frame.buffer.set(x, y, bg_cell);
}
}
// Border.
let border_cell = Cell {
fg: BORDER,
bg: BG_SURFACE,
..Cell::default()
};
frame.draw_border(modal_rect, BorderChars::ROUNDED, border_cell);
// Title.
let title = " Project Scope ";
let title_x = modal_x + (modal_width.saturating_sub(title.len() as u16)) / 2;
let title_cell = Cell {
fg: ACCENT,
bg: BG_SURFACE,
..Cell::default()
};
frame.print_text_clipped(title_x, modal_y, title, title_cell, modal_rect.right());
// Content area (inside border).
let content_x = modal_x + 1;
let content_max_x = modal_rect.right().saturating_sub(1);
let content_width = content_max_x.saturating_sub(content_x);
let first_row_y = modal_y + 1;
let max_rows = (modal_height.saturating_sub(2)) as usize; // Inside borders.
// Render rows.
let visible_end = (state.scroll_offset + max_rows).min(row_count);
for vis_idx in 0..max_rows {
let row_idx = state.scroll_offset + vis_idx;
if row_idx >= row_count {
break;
}
let y = first_row_y + vis_idx as u16;
let selected = row_idx == state.selected_index;
let bg = if selected { SELECTION_BG } else { BG_SURFACE };
// Fill row background.
if selected {
let sel_cell = Cell {
fg: TEXT,
bg,
..Cell::default()
};
for x in content_x..content_max_x {
frame.buffer.set(x, y, sel_cell);
}
}
// Row content.
let (label, is_active) = if row_idx == 0 {
let active = current_scope.project_id.is_none();
("All Projects".to_string(), active)
} else {
let project = &state.projects[row_idx - 1];
let active = current_scope.project_id == Some(project.id);
(project.path.clone(), active)
};
// Active indicator.
let prefix = if is_active { "> " } else { " " };
let fg = if is_active { ACCENT } else { TEXT };
let cell = Cell {
fg,
bg,
..Cell::default()
};
// Truncate label to fit.
let max_label_len = content_width.saturating_sub(2) as usize; // 2 for prefix
let display = if label.len() > max_label_len {
format!("{prefix}{}...", &label[..label.floor_char_boundary(max_label_len.saturating_sub(3))])
} else {
format!("{prefix}{label}")
};
frame.print_text_clipped(content_x, y, &display, cell, content_max_x);
}
// Scroll indicators.
if state.scroll_offset > 0 {
let arrow_cell = Cell {
fg: TEXT_MUTED,
bg: BG_SURFACE,
..Cell::default()
};
frame.print_text_clipped(
content_max_x.saturating_sub(1),
first_row_y,
"^",
arrow_cell,
modal_rect.right(),
);
}
if visible_end < row_count {
let arrow_cell = Cell {
fg: TEXT_MUTED,
bg: BG_SURFACE,
..Cell::default()
};
let bottom_y = first_row_y + (max_rows as u16).saturating_sub(1);
frame.print_text_clipped(
content_max_x.saturating_sub(1),
bottom_y,
"v",
arrow_cell,
modal_rect.right(),
);
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::scope::ProjectInfo;
use ftui::render::grapheme_pool::GraphemePool;
macro_rules! with_frame {
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
let mut pool = GraphemePool::new();
let mut $frame = Frame::new($width, $height, &mut pool);
$body
}};
}
fn sample_projects() -> Vec<ProjectInfo> {
vec![
ProjectInfo {
id: 1,
path: "alpha/repo".into(),
},
ProjectInfo {
id: 2,
path: "beta/repo".into(),
},
]
}
#[test]
fn test_render_hidden_noop() {
with_frame!(80, 24, |frame| {
let state = ScopePickerState::default();
let scope = ScopeContext::default();
let area = frame.bounds();
render_scope_picker(&mut frame, &state, &scope, area);
// Should not panic.
});
}
#[test]
fn test_render_visible_no_panic() {
with_frame!(80, 24, |frame| {
let mut state = ScopePickerState::default();
let scope = ScopeContext::default();
state.open(sample_projects(), &scope);
let area = frame.bounds();
render_scope_picker(&mut frame, &state, &scope, area);
});
}
#[test]
fn test_render_with_selection() {
with_frame!(80, 24, |frame| {
let mut state = ScopePickerState::default();
let scope = ScopeContext::default();
state.open(sample_projects(), &scope);
state.select_next(); // Move to first project
let area = frame.bounds();
render_scope_picker(&mut frame, &state, &scope, area);
});
}
#[test]
fn test_render_tiny_terminal_noop() {
with_frame!(15, 4, |frame| {
let mut state = ScopePickerState::default();
let scope = ScopeContext::default();
state.open(sample_projects(), &scope);
let area = frame.bounds();
render_scope_picker(&mut frame, &state, &scope, area);
// Should not panic on tiny terminals.
});
}
#[test]
fn test_render_active_scope_highlighted() {
with_frame!(80, 24, |frame| {
let mut state = ScopePickerState::default();
let scope = ScopeContext {
project_id: Some(2),
project_name: Some("beta/repo".into()),
};
state.open(sample_projects(), &scope);
let area = frame.bounds();
render_scope_picker(&mut frame, &state, &scope, area);
});
}
#[test]
fn test_render_empty_project_list() {
with_frame!(80, 24, |frame| {
let mut state = ScopePickerState::default();
let scope = ScopeContext::default();
state.open(vec![], &scope);
let area = frame.bounds();
render_scope_picker(&mut frame, &state, &scope, area);
// Only "All Projects" row, should not panic.
});
}
}

View File

@@ -0,0 +1,443 @@
//! Stats screen view — database and index statistics.
//!
//! Renders entity counts, FTS/embedding coverage, and queue health
//! as a simple table layout.
use ftui::core::geometry::Rect;
use ftui::render::cell::{Cell, PackedRgba};
use ftui::render::drawing::Draw;
use ftui::render::frame::Frame;
use crate::state::stats::StatsState;
use super::{ACCENT, TEXT, TEXT_MUTED};
/// Success green (for good coverage).
const GOOD_FG: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39);
/// Warning yellow (for partial coverage).
const WARN_FG: PackedRgba = PackedRgba::rgb(0xD0, 0xA2, 0x15);
// ---------------------------------------------------------------------------
// Public entry point
// ---------------------------------------------------------------------------
/// Render the stats screen.
pub fn render_stats(frame: &mut Frame<'_>, state: &StatsState, area: Rect) {
if area.width < 10 || area.height < 3 {
return;
}
let max_x = area.right();
if !state.loaded {
let msg = "Loading statistics...";
let x = area.x + area.width.saturating_sub(msg.len() as u16) / 2;
let y = area.y + area.height / 2;
frame.print_text_clipped(
x,
y,
msg,
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
return;
}
let data = match &state.data {
Some(d) => d,
None => return,
};
// Title.
frame.print_text_clipped(
area.x + 2,
area.y + 1,
"Database Statistics",
Cell {
fg: ACCENT,
..Cell::default()
},
max_x,
);
let mut y = area.y + 3;
let label_width = 22u16;
let value_x = area.x + 2 + label_width;
// --- Entity Counts section ---
if y < area.bottom().saturating_sub(2) {
frame.print_text_clipped(
area.x + 2,
y,
"Entities",
Cell {
fg: TEXT,
..Cell::default()
},
max_x,
);
y += 1;
}
let entity_rows: [(&str, i64); 4] = [
(" Issues", data.issues),
(" Merge Requests", data.merge_requests),
(" Discussions", data.discussions),
(" Notes", data.notes),
];
for (label, count) in &entity_rows {
if y >= area.bottom().saturating_sub(2) {
break;
}
render_stat_row(frame, area.x + 2, y, label, &format_count(*count), label_width, max_x);
y += 1;
}
// Total.
if y < area.bottom().saturating_sub(2) {
let total = data.issues + data.merge_requests + data.discussions + data.notes;
render_stat_row(
frame,
area.x + 2,
y,
" Total",
&format_count(total),
label_width,
max_x,
);
y += 1;
}
y += 1; // Blank line.
// --- Index Coverage section ---
if y < area.bottom().saturating_sub(2) {
frame.print_text_clipped(
area.x + 2,
y,
"Index Coverage",
Cell {
fg: TEXT,
..Cell::default()
},
max_x,
);
y += 1;
}
// FTS.
if y < area.bottom().saturating_sub(2) {
let fts_pct = data.fts_coverage_pct();
let fts_text = format!("{} ({:.0}%)", format_count(data.fts_indexed), fts_pct);
let fg = coverage_color(fts_pct);
frame.print_text_clipped(
area.x + 2,
y,
&format!("{:<width$}", " FTS Indexed", width = label_width as usize),
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
value_x,
);
frame.print_text_clipped(
value_x,
y,
&fts_text,
Cell {
fg,
..Cell::default()
},
max_x,
);
y += 1;
}
// Embeddings.
if y < area.bottom().saturating_sub(2) {
let embed_text = format!(
"{} ({:.0}%)",
format_count(data.embedded_documents),
data.coverage_pct
);
let fg = coverage_color(data.coverage_pct);
frame.print_text_clipped(
area.x + 2,
y,
&format!("{:<width$}", " Embeddings", width = label_width as usize),
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
value_x,
);
frame.print_text_clipped(
value_x,
y,
&embed_text,
Cell {
fg,
..Cell::default()
},
max_x,
);
y += 1;
}
// Chunks.
if y < area.bottom().saturating_sub(2) {
render_stat_row(
frame,
area.x + 2,
y,
" Chunks",
&format_count(data.total_chunks),
label_width,
max_x,
);
y += 1;
}
y += 1; // Blank line.
// --- Queue section ---
if data.has_queue_work() && y < area.bottom().saturating_sub(2) {
frame.print_text_clipped(
area.x + 2,
y,
"Queue",
Cell {
fg: TEXT,
..Cell::default()
},
max_x,
);
y += 1;
if y < area.bottom().saturating_sub(2) {
render_stat_row(
frame,
area.x + 2,
y,
" Pending",
&format_count(data.queue_pending),
label_width,
max_x,
);
y += 1;
}
if data.queue_failed > 0 && y < area.bottom().saturating_sub(2) {
let failed_cell = Cell {
fg: WARN_FG,
..Cell::default()
};
frame.print_text_clipped(
area.x + 2,
y,
&format!("{:<width$}", " Failed", width = label_width as usize),
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
value_x,
);
frame.print_text_clipped(
value_x,
y,
&format_count(data.queue_failed),
failed_cell,
max_x,
);
}
}
// Hint at bottom.
let hint_y = area.bottom().saturating_sub(1);
frame.print_text_clipped(
area.x + 2,
hint_y,
"Esc: back | lore stats (full report)",
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
}
/// Render a label + value row.
fn render_stat_row(
frame: &mut Frame<'_>,
x: u16,
y: u16,
label: &str,
value: &str,
label_width: u16,
max_x: u16,
) {
let value_x = x + label_width;
frame.print_text_clipped(
x,
y,
&format!("{label:<width$}", width = label_width as usize),
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
value_x,
);
frame.print_text_clipped(
value_x,
y,
value,
Cell {
fg: TEXT,
..Cell::default()
},
max_x,
);
}
/// Color based on coverage percentage.
fn coverage_color(pct: f64) -> PackedRgba {
if pct >= 90.0 {
GOOD_FG
} else if pct >= 50.0 {
WARN_FG
} else {
TEXT
}
}
/// Format a count with comma separators for readability.
fn format_count(n: i64) -> String {
if n < 1_000 {
return n.to_string();
}
let s = n.to_string();
let mut result = String::with_capacity(s.len() + s.len() / 3);
for (i, c) in s.chars().enumerate() {
if i > 0 && (s.len() - i).is_multiple_of(3) {
result.push(',');
}
result.push(c);
}
result
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::state::stats::StatsData;
use ftui::render::grapheme_pool::GraphemePool;
macro_rules! with_frame {
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
let mut pool = GraphemePool::new();
let mut $frame = Frame::new($width, $height, &mut pool);
$body
}};
}
fn sample_data() -> StatsData {
StatsData {
total_documents: 500,
issues: 200,
merge_requests: 150,
discussions: 100,
notes: 50,
fts_indexed: 450,
embedded_documents: 300,
total_chunks: 1200,
coverage_pct: 60.0,
queue_pending: 5,
queue_failed: 1,
}
}
#[test]
fn test_render_not_loaded() {
with_frame!(80, 24, |frame| {
let state = StatsState::default();
let area = frame.bounds();
render_stats(&mut frame, &state, area);
});
}
#[test]
fn test_render_with_data() {
with_frame!(80, 24, |frame| {
let mut state = StatsState::default();
state.apply_data(sample_data());
let area = frame.bounds();
render_stats(&mut frame, &state, area);
});
}
#[test]
fn test_render_no_queue_work() {
with_frame!(80, 24, |frame| {
let mut state = StatsState::default();
state.apply_data(StatsData {
queue_pending: 0,
queue_failed: 0,
..sample_data()
});
let area = frame.bounds();
render_stats(&mut frame, &state, area);
});
}
#[test]
fn test_render_tiny_terminal() {
with_frame!(8, 2, |frame| {
let mut state = StatsState::default();
state.apply_data(sample_data());
let area = frame.bounds();
render_stats(&mut frame, &state, area);
});
}
#[test]
fn test_render_short_terminal() {
with_frame!(80, 8, |frame| {
let mut state = StatsState::default();
state.apply_data(sample_data());
let area = frame.bounds();
render_stats(&mut frame, &state, area);
// Should clip without panicking.
});
}
#[test]
fn test_format_count_small() {
assert_eq!(format_count(0), "0");
assert_eq!(format_count(42), "42");
assert_eq!(format_count(999), "999");
}
#[test]
fn test_format_count_thousands() {
assert_eq!(format_count(1_000), "1,000");
assert_eq!(format_count(12_345), "12,345");
assert_eq!(format_count(1_234_567), "1,234,567");
}
#[test]
fn test_coverage_color_thresholds() {
assert_eq!(coverage_color(100.0), GOOD_FG);
assert_eq!(coverage_color(90.0), GOOD_FG);
assert_eq!(coverage_color(89.9), WARN_FG);
assert_eq!(coverage_color(50.0), WARN_FG);
assert_eq!(coverage_color(49.9), TEXT);
}
}

View File

@@ -0,0 +1,575 @@
//! Sync screen view — progress bars, summary table, and log.
//!
//! Renders the sync screen in different phases:
//! - **Idle**: prompt to start sync
//! - **Running**: per-lane progress bars with throughput stats
//! - **Complete**: summary table with change counts
//! - **Cancelled/Failed**: status message with retry hint
use ftui::core::geometry::Rect;
use ftui::render::cell::{Cell, PackedRgba};
use ftui::render::drawing::Draw;
use ftui::render::frame::Frame;
use crate::state::sync::{SyncLane, SyncPhase, SyncState};
use super::{ACCENT, TEXT, TEXT_MUTED};
/// Progress bar fill color.
const PROGRESS_FG: PackedRgba = PackedRgba::rgb(0xDA, 0x70, 0x2C); // orange
/// Progress bar background.
const PROGRESS_BG: PackedRgba = PackedRgba::rgb(0x34, 0x34, 0x30);
/// Success green.
const SUCCESS_FG: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39);
/// Error red.
const ERROR_FG: PackedRgba = PackedRgba::rgb(0xD1, 0x4D, 0x41);
// ---------------------------------------------------------------------------
// Public entry point
// ---------------------------------------------------------------------------
/// Render the sync screen.
pub fn render_sync(frame: &mut Frame<'_>, state: &SyncState, area: Rect) {
if area.width < 10 || area.height < 3 {
return;
}
match &state.phase {
SyncPhase::Idle => render_idle(frame, area),
SyncPhase::Running => render_running(frame, state, area),
SyncPhase::Complete => render_summary(frame, state, area),
SyncPhase::Cancelled => render_cancelled(frame, area),
SyncPhase::Failed(err) => render_failed(frame, area, err),
}
}
// ---------------------------------------------------------------------------
// Idle view
// ---------------------------------------------------------------------------
fn render_idle(frame: &mut Frame<'_>, area: Rect) {
let max_x = area.right();
let center_y = area.y + area.height / 2;
let title = "Sync";
let title_x = area.x + area.width.saturating_sub(title.len() as u16) / 2;
frame.print_text_clipped(
title_x,
center_y.saturating_sub(1),
title,
Cell {
fg: ACCENT,
..Cell::default()
},
max_x,
);
let hint = "Press Enter to start sync, or run `lore sync` externally.";
let hint_x = area.x + area.width.saturating_sub(hint.len() as u16) / 2;
frame.print_text_clipped(
hint_x,
center_y + 1,
hint,
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
}
// ---------------------------------------------------------------------------
// Running view — per-lane progress bars
// ---------------------------------------------------------------------------
fn render_running(frame: &mut Frame<'_>, state: &SyncState, area: Rect) {
let max_x = area.right();
// Title.
let title = "Syncing...";
let title_x = area.x + 2;
frame.print_text_clipped(
title_x,
area.y + 1,
title,
Cell {
fg: ACCENT,
..Cell::default()
},
max_x,
);
// Stage label.
if !state.stage.is_empty() {
let stage_cell = Cell {
fg: TEXT_MUTED,
..Cell::default()
};
frame.print_text_clipped(title_x, area.y + 2, &state.stage, stage_cell, max_x);
}
// Per-lane progress bars.
let bar_start_y = area.y + 4;
let label_width = 14u16; // "Discussions " is the longest
let bar_x = area.x + 2 + label_width;
let bar_width = area.width.saturating_sub(4 + label_width + 12); // 12 for count text
for (i, lane) in SyncLane::ALL.iter().enumerate() {
let y = bar_start_y + i as u16;
if y >= area.bottom().saturating_sub(3) {
break;
}
let lane_progress = &state.lanes[i];
// Lane label.
let label = format!("{:<12}", lane.label());
frame.print_text_clipped(
area.x + 2,
y,
&label,
Cell {
fg: TEXT,
..Cell::default()
},
bar_x,
);
// Progress bar.
if bar_width > 2 {
render_progress_bar(frame, bar_x, y, bar_width, lane_progress.fraction());
}
// Count text (e.g., "50/100").
let count_x = bar_x + bar_width + 1;
let count_text = if lane_progress.total > 0 {
format!("{}/{}", lane_progress.current, lane_progress.total)
} else if lane_progress.current > 0 {
format!("{}", lane_progress.current)
} else {
"--".to_string()
};
frame.print_text_clipped(
count_x,
y,
&count_text,
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
}
// Throughput stats.
let stats_y = bar_start_y + SyncLane::ALL.len() as u16 + 1;
if stats_y < area.bottom().saturating_sub(2) && state.items_synced > 0 {
let stats = format!(
"{} items synced ({:.0} items/sec)",
state.items_synced, state.items_per_sec
);
frame.print_text_clipped(
area.x + 2,
stats_y,
&stats,
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
}
// Cancel hint at bottom.
let hint_y = area.bottom().saturating_sub(1);
frame.print_text_clipped(
area.x + 2,
hint_y,
"Esc: cancel sync",
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
}
/// Render a horizontal progress bar.
fn render_progress_bar(frame: &mut Frame<'_>, x: u16, y: u16, width: u16, fraction: f64) {
let filled = ((width as f64) * fraction).round() as u16;
let max_x = x + width;
for col in x..max_x {
let is_filled = col < x + filled;
let cell = Cell {
fg: if is_filled { PROGRESS_FG } else { PROGRESS_BG },
bg: if is_filled { PROGRESS_FG } else { PROGRESS_BG },
..Cell::default()
};
frame.buffer.set(col, y, cell);
}
}
// ---------------------------------------------------------------------------
// Summary view
// ---------------------------------------------------------------------------
fn render_summary(frame: &mut Frame<'_>, state: &SyncState, area: Rect) {
let max_x = area.right();
// Title.
let title = "Sync Complete";
let title_x = area.x + 2;
frame.print_text_clipped(
title_x,
area.y + 1,
title,
Cell {
fg: SUCCESS_FG,
..Cell::default()
},
max_x,
);
if let Some(ref summary) = state.summary {
// Duration.
let duration = format_duration(summary.elapsed_ms);
frame.print_text_clipped(
title_x,
area.y + 2,
&format!("Duration: {duration}"),
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
// Summary table header.
let table_y = area.y + 4;
let header = format!("{:<16} {:>6} {:>8}", "Entity", "New", "Updated");
frame.print_text_clipped(
area.x + 2,
table_y,
&header,
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
// Summary rows.
let rows = [
("Issues", summary.issues.new, summary.issues.updated),
("MRs", summary.merge_requests.new, summary.merge_requests.updated),
("Discussions", summary.discussions.new, summary.discussions.updated),
("Notes", summary.notes.new, summary.notes.updated),
];
for (i, (label, new, updated)) in rows.iter().enumerate() {
let row_y = table_y + 1 + i as u16;
if row_y >= area.bottom().saturating_sub(3) {
break;
}
let row = format!("{label:<16} {new:>6} {updated:>8}");
let fg = if *new > 0 || *updated > 0 {
TEXT
} else {
TEXT_MUTED
};
frame.print_text_clipped(
area.x + 2,
row_y,
&row,
Cell {
fg,
..Cell::default()
},
max_x,
);
}
// Total.
let total_y = table_y + 1 + rows.len() as u16;
if total_y < area.bottom().saturating_sub(2) {
let total = format!("Total changes: {}", summary.total_changes());
frame.print_text_clipped(
area.x + 2,
total_y,
&total,
Cell {
fg: ACCENT,
..Cell::default()
},
max_x,
);
}
// Per-project errors.
if summary.has_errors() {
let err_y = total_y + 2;
if err_y < area.bottom().saturating_sub(1) {
frame.print_text_clipped(
area.x + 2,
err_y,
"Errors:",
Cell {
fg: ERROR_FG,
..Cell::default()
},
max_x,
);
for (i, (project, err)) in summary.project_errors.iter().enumerate() {
let y = err_y + 1 + i as u16;
if y >= area.bottom().saturating_sub(1) {
break;
}
let line = format!(" {project}: {err}");
frame.print_text_clipped(
area.x + 2,
y,
&line,
Cell {
fg: ERROR_FG,
..Cell::default()
},
max_x,
);
}
}
}
}
// Navigation hint at bottom.
let hint_y = area.bottom().saturating_sub(1);
frame.print_text_clipped(
area.x + 2,
hint_y,
"Esc: back | Enter: sync again",
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
}
// ---------------------------------------------------------------------------
// Cancelled / Failed views
// ---------------------------------------------------------------------------
fn render_cancelled(frame: &mut Frame<'_>, area: Rect) {
let max_x = area.right();
let center_y = area.y + area.height / 2;
frame.print_text_clipped(
area.x + 2,
center_y.saturating_sub(1),
"Sync Cancelled",
Cell {
fg: ACCENT,
..Cell::default()
},
max_x,
);
frame.print_text_clipped(
area.x + 2,
center_y + 1,
"Press Enter to retry, or Esc to go back.",
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
}
fn render_failed(frame: &mut Frame<'_>, area: Rect, error: &str) {
let max_x = area.right();
let center_y = area.y + area.height / 2;
frame.print_text_clipped(
area.x + 2,
center_y.saturating_sub(2),
"Sync Failed",
Cell {
fg: ERROR_FG,
..Cell::default()
},
max_x,
);
// Truncate error to fit screen.
let max_len = area.width.saturating_sub(4) as usize;
let display_err = if error.len() > max_len {
format!("{}...", &error[..error.floor_char_boundary(max_len.saturating_sub(3))])
} else {
error.to_string()
};
frame.print_text_clipped(
area.x + 2,
center_y,
&display_err,
Cell {
fg: TEXT,
..Cell::default()
},
max_x,
);
frame.print_text_clipped(
area.x + 2,
center_y + 2,
"Press Enter to retry, or Esc to go back.",
Cell {
fg: TEXT_MUTED,
..Cell::default()
},
max_x,
);
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
fn format_duration(ms: u64) -> String {
let secs = ms / 1000;
let mins = secs / 60;
let remaining_secs = secs % 60;
if mins > 0 {
format!("{mins}m {remaining_secs}s")
} else {
format!("{secs}s")
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::state::sync::{EntityChangeCounts, SyncSummary};
use ftui::render::grapheme_pool::GraphemePool;
macro_rules! with_frame {
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
let mut pool = GraphemePool::new();
let mut $frame = Frame::new($width, $height, &mut pool);
$body
}};
}
#[test]
fn test_render_idle_no_panic() {
with_frame!(80, 24, |frame| {
let state = SyncState::default();
let area = frame.bounds();
render_sync(&mut frame, &state, area);
});
}
#[test]
fn test_render_running_no_panic() {
with_frame!(80, 24, |frame| {
let mut state = SyncState::default();
state.start();
state.update_progress("issues", 25, 100);
let area = frame.bounds();
render_sync(&mut frame, &state, area);
});
}
#[test]
fn test_render_complete_no_panic() {
with_frame!(80, 24, |frame| {
let mut state = SyncState::default();
state.start();
state.complete(5000);
state.summary = Some(SyncSummary {
issues: EntityChangeCounts { new: 5, updated: 3 },
merge_requests: EntityChangeCounts { new: 2, updated: 1 },
elapsed_ms: 5000,
..Default::default()
});
let area = frame.bounds();
render_sync(&mut frame, &state, area);
});
}
#[test]
fn test_render_cancelled_no_panic() {
with_frame!(80, 24, |frame| {
let mut state = SyncState::default();
state.start();
state.cancel();
let area = frame.bounds();
render_sync(&mut frame, &state, area);
});
}
#[test]
fn test_render_failed_no_panic() {
with_frame!(80, 24, |frame| {
let mut state = SyncState::default();
state.start();
state.fail("network timeout".into());
let area = frame.bounds();
render_sync(&mut frame, &state, area);
});
}
#[test]
fn test_render_tiny_terminal() {
with_frame!(8, 2, |frame| {
let state = SyncState::default();
let area = frame.bounds();
render_sync(&mut frame, &state, area);
// Should not panic.
});
}
#[test]
fn test_render_complete_with_errors() {
with_frame!(80, 24, |frame| {
let mut state = SyncState::default();
state.start();
state.complete(3000);
state.summary = Some(SyncSummary {
elapsed_ms: 3000,
project_errors: vec![
("grp/repo".into(), "timeout".into()),
],
..Default::default()
});
let area = frame.bounds();
render_sync(&mut frame, &state, area);
});
}
#[test]
fn test_format_duration_seconds() {
assert_eq!(format_duration(3500), "3s");
}
#[test]
fn test_format_duration_minutes() {
assert_eq!(format_duration(125_000), "2m 5s");
}
#[test]
fn test_render_running_with_stats() {
with_frame!(80, 24, |frame| {
let mut state = SyncState::default();
state.start();
state.update_progress("issues", 50, 200);
state.update_stream_stats(1024, 50);
let area = frame.bounds();
render_sync(&mut frame, &state, area);
});
}
}

View File

@@ -24,16 +24,16 @@ use ftui::render::drawing::Draw;
use ftui::render::frame::Frame;
use crate::state::trace::TraceState;
use crate::text_width::cursor_cell_offset;
use lore::core::trace::TraceResult;
use super::common::truncate_str;
use super::{ACCENT, BG_SURFACE, TEXT, TEXT_MUTED};
// ---------------------------------------------------------------------------
// Colors (Flexoki palette)
// Colors (Flexoki palette — extras not in parent module)
// ---------------------------------------------------------------------------
const TEXT: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx
const TEXT_MUTED: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2
const BG_SURFACE: PackedRgba = PackedRgba::rgb(0x28, 0x28, 0x24); // bg-2
const ACCENT: PackedRgba = PackedRgba::rgb(0xDA, 0x70, 0x2C); // orange
const GREEN: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39); // green
const CYAN: PackedRgba = PackedRgba::rgb(0x3A, 0xA9, 0x9F); // cyan
const YELLOW: PackedRgba = PackedRgba::rgb(0xD0, 0xA2, 0x15); // yellow
@@ -135,7 +135,8 @@ fn render_path_input(frame: &mut Frame<'_>, state: &TraceState, x: u16, y: u16,
// Cursor.
if state.path_focused {
let cursor_x = after_label + state.path_cursor as u16;
let cursor_col = state.path_input[..state.path_cursor].chars().count() as u16;
let cursor_x = after_label + cursor_col;
if cursor_x < max_x {
let cursor_cell = Cell {
fg: PackedRgba::rgb(0x10, 0x0F, 0x0F),
@@ -144,8 +145,8 @@ fn render_path_input(frame: &mut Frame<'_>, state: &TraceState, x: u16, y: u16,
};
let ch = state
.path_input
.chars()
.nth(state.path_cursor)
.get(state.path_cursor..)
.and_then(|s| s.chars().next())
.unwrap_or(' ');
frame.print_text_clipped(cursor_x, y, &ch.to_string(), cursor_cell, max_x);
}
@@ -457,16 +458,6 @@ fn render_hint_bar(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) {
frame.print_text_clipped(x + 1, y, hints, style, max_x);
}
/// Truncate a string to at most `max_chars` display characters.
fn truncate_str(s: &str, max_chars: usize) -> String {
if s.chars().count() <= max_chars {
s.to_string()
} else {
let truncated: String = s.chars().take(max_chars.saturating_sub(1)).collect();
format!("{truncated}")
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------

View File

@@ -25,6 +25,7 @@ use lore::core::who_types::{
use crate::state::who::{WhoMode, WhoState};
use super::common::truncate_str;
use super::{ACCENT, BG_SURFACE, BORDER, TEXT, TEXT_MUTED};
/// Muted accent for inactive mode tabs.
@@ -915,20 +916,6 @@ fn render_truncation_footer(
frame.print_text_clipped(footer_x, footer_y, &footer, cell, max_x);
}
/// Truncate a string to at most `max_chars` display characters.
fn truncate_str(s: &str, max_chars: usize) -> String {
let chars: Vec<char> = s.chars().collect();
if chars.len() <= max_chars {
s.to_string()
} else if max_chars <= 3 {
chars[..max_chars].iter().collect()
} else {
let mut result: String = chars[..max_chars - 3].iter().collect();
result.push_str("...");
result
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
@@ -1029,7 +1016,7 @@ mod tests {
#[test]
fn test_truncate_str() {
assert_eq!(truncate_str("hello", 10), "hello");
assert_eq!(truncate_str("hello world", 8), "hello...");
assert_eq!(truncate_str("hello world", 8), "hello w\u{2026}");
assert_eq!(truncate_str("hi", 2), "hi");
assert_eq!(truncate_str("abc", 3), "abc");
}