refactor(core): Rename GiError to LoreError and add search infrastructure

Mechanical rename of GiError -> LoreError across the core module to
match the project's rebranding from gitlab-inbox to gitlore/lore.
Updates the error enum name, all From impls, and the Result type alias.

Additionally introduces:

- New error variants for embedding pipeline: OllamaUnavailable,
  OllamaModelNotFound, EmbeddingFailed, EmbeddingsNotBuilt. Each
  includes actionable suggestions (e.g., "ollama serve", "ollama pull
  nomic-embed-text") to guide users through recovery.

- New error codes 14-16 for programmatic handling of Ollama failures.

- Savepoint-based migration execution in db.rs: each migration now
  runs inside a SQLite SAVEPOINT so a failed migration rolls back
  cleanly without corrupting the schema_version tracking. Previously
  a partial migration could leave the database in an inconsistent
  state.

- core::backoff module: exponential backoff with jitter utility for
  retry loops in the embedding pipeline and discussion queues.

- core::project module: helper for resolving project IDs and paths
  from the local database, used by the document regenerator and
  search filters.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Taylor Eernisse
2026-01-30 15:45:54 -05:00
parent 4270603da4
commit 6e22f120d0
8 changed files with 361 additions and 22 deletions

99
src/core/backoff.rs Normal file
View File

@@ -0,0 +1,99 @@
use rand::Rng;
/// Compute next_attempt_at with exponential backoff and jitter.
///
/// Formula: now + min(3600000, 1000 * 2^attempt_count) * (0.9 to 1.1)
/// - Capped at 1 hour to prevent runaway delays
/// - ±10% jitter prevents synchronized retries after outages
///
/// Used by:
/// - `dirty_sources` retry scheduling (document regeneration failures)
/// - `pending_discussion_fetches` retry scheduling (API fetch failures)
///
/// Having one implementation prevents subtle divergence between queues
/// (e.g., different caps or jitter ranges).
pub fn compute_next_attempt_at(now: i64, attempt_count: i64) -> i64 {
// Cap attempt_count to prevent overflow (2^30 > 1 hour anyway)
let capped_attempts = attempt_count.min(30) as u32;
let base_delay_ms = 1000_i64.saturating_mul(1 << capped_attempts);
let capped_delay_ms = base_delay_ms.min(3_600_000); // 1 hour cap
// Add ±10% jitter
let jitter_factor = rand::thread_rng().gen_range(0.9..=1.1);
let delay_with_jitter = (capped_delay_ms as f64 * jitter_factor) as i64;
now + delay_with_jitter
}
#[cfg(test)]
mod tests {
use super::*;
const MAX_DELAY_MS: i64 = 3_600_000;
#[test]
fn test_exponential_curve() {
let now = 1_000_000_000_i64;
// Each attempt should roughly double the delay (within jitter)
for attempt in 1..=10 {
let result = compute_next_attempt_at(now, attempt);
let delay = result - now;
let expected_base = 1000_i64 * (1 << attempt);
let min_expected = (expected_base as f64 * 0.89) as i64;
let max_expected = (expected_base as f64 * 1.11) as i64;
assert!(
delay >= min_expected && delay <= max_expected,
"attempt {attempt}: delay {delay} not in [{min_expected}, {max_expected}]"
);
}
}
#[test]
fn test_cap_at_one_hour() {
let now = 1_000_000_000_i64;
for attempt in [20, 25, 30, 50, 100] {
let result = compute_next_attempt_at(now, attempt);
let delay = result - now;
let max_with_jitter = (MAX_DELAY_MS as f64 * 1.11) as i64;
assert!(
delay <= max_with_jitter,
"attempt {attempt}: delay {delay} exceeds cap {max_with_jitter}"
);
}
}
#[test]
fn test_jitter_range() {
let now = 1_000_000_000_i64;
let attempt = 5; // base = 32000
let base = 1000_i64 * (1 << attempt);
let min_delay = (base as f64 * 0.89) as i64;
let max_delay = (base as f64 * 1.11) as i64;
for _ in 0..100 {
let result = compute_next_attempt_at(now, attempt);
let delay = result - now;
assert!(
delay >= min_delay && delay <= max_delay,
"delay {delay} not in jitter range [{min_delay}, {max_delay}]"
);
}
}
#[test]
fn test_first_retry_is_about_two_seconds() {
let now = 1_000_000_000_i64;
let result = compute_next_attempt_at(now, 1);
let delay = result - now;
// attempt 1: base = 2000ms, with jitter: 1800-2200ms
assert!(delay >= 1800 && delay <= 2200, "first retry delay: {delay}ms");
}
#[test]
fn test_overflow_safety() {
let now = i64::MAX / 2;
// Should not panic even with very large attempt_count
let result = compute_next_attempt_at(now, i64::MAX);
assert!(result > now);
}
}