perf: Configurable rate limit, 429 auto-retry, concurrent project ingestion

The sync pipeline was bottlenecked at 10 req/s (hardcoded) with
sequential project processing and no retry on rate limiting. These
changes target 3-5x throughput improvement.

Rate limit configuration:
- Add requestsPerSecond to SyncConfig (default 30.0, was hardcoded 10)
- Pass configured rate through to GitLabClient::new from ingest
- Floor rate at 0.1 rps in RateLimiter::new to prevent panic on
  Duration::from_secs_f64(1.0 / 0.0) — now reachable via user config

429 auto-retry:
- Both request() and request_with_headers() retry up to 3 times on
  HTTP 429, respecting the retry-after header (default 60s)
- Extract parse_retry_after helper, reused by handle_response fallback
- After exhausting retries, the 429 error propagates as before
- Improved JSON decode errors now include a response body preview

Concurrent project ingestion:
- Derive Clone on GitLabClient (cheap: shares Arc<Mutex<RateLimiter>>
  and reqwest::Client which is already Arc-backed)
- Restructure project loop to use futures::stream::buffer_unordered
  with primary_concurrency (default 4) as the parallelism bound
- Each project gets its own SQLite connection (WAL mode + busy_timeout
  handles concurrent writes)
- Add show_spinner field to IngestDisplay to separate the per-project
  spinner from the sync-level stage spinner
- Error aggregation defers failures: all successful projects get their
  summaries printed and results counted before returning the first error
- Bump dependentConcurrency default from 2 to 8 for discussion prefetch

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Taylor Eernisse
2026-02-03 17:37:06 -05:00
parent 4ee99c1677
commit f5b4a765b7
3 changed files with 319 additions and 190 deletions

View File

@@ -44,6 +44,19 @@ pub struct IngestResult {
pub resource_events_failed: usize, pub resource_events_failed: usize,
} }
/// Outcome of ingesting a single project, used to aggregate results
/// from concurrent project processing.
enum ProjectIngestOutcome {
Issues {
path: String,
result: IngestProjectResult,
},
Mrs {
path: String,
result: IngestMrProjectResult,
},
}
/// Controls what interactive UI elements `run_ingest` displays. /// Controls what interactive UI elements `run_ingest` displays.
/// ///
/// Separates progress indicators (spinners, bars) from text output (headers, /// Separates progress indicators (spinners, bars) from text output (headers,
@@ -53,6 +66,9 @@ pub struct IngestResult {
pub struct IngestDisplay { pub struct IngestDisplay {
/// Show animated spinners and progress bars. /// Show animated spinners and progress bars.
pub show_progress: bool, pub show_progress: bool,
/// Show the per-project spinner. When called from `sync`, the stage
/// spinner already covers this, so a second spinner causes flashing.
pub show_spinner: bool,
/// Show text headers ("Ingesting...") and per-project summary lines. /// Show text headers ("Ingesting...") and per-project summary lines.
pub show_text: bool, pub show_text: bool,
} }
@@ -62,6 +78,7 @@ impl IngestDisplay {
pub fn interactive() -> Self { pub fn interactive() -> Self {
Self { Self {
show_progress: true, show_progress: true,
show_spinner: true,
show_text: true, show_text: true,
} }
} }
@@ -70,14 +87,17 @@ impl IngestDisplay {
pub fn silent() -> Self { pub fn silent() -> Self {
Self { Self {
show_progress: false, show_progress: false,
show_spinner: false,
show_text: false, show_text: false,
} }
} }
/// Progress only (used by sync in interactive mode). /// Progress bars only, no spinner or text (used by sync which provides its
/// own stage spinner).
pub fn progress_only() -> Self { pub fn progress_only() -> Self {
Self { Self {
show_progress: true, show_progress: true,
show_spinner: false,
show_text: false, show_text: false,
} }
} }
@@ -123,7 +143,11 @@ pub async fn run_ingest(
})?; })?;
// Create GitLab client // Create GitLab client
let client = GitLabClient::new(&config.gitlab.base_url, &token, None); let client = GitLabClient::new(
&config.gitlab.base_url,
&token,
Some(config.sync.requests_per_second),
);
// Get projects to sync // Get projects to sync
let projects = get_projects_to_sync(&conn, &config.projects, project_filter)?; let projects = get_projects_to_sync(&conn, &config.projects, project_filter)?;
@@ -188,13 +212,32 @@ pub async fn run_ingest(
println!(); println!();
} }
// Sync each project // Process projects concurrently. Each project gets its own DB connection
for (local_project_id, gitlab_project_id, path) in &projects { // while sharing the rate limiter through the cloned GitLabClient.
// Show spinner while fetching (only in interactive mode) let concurrency = config.sync.primary_concurrency as usize;
let spinner = if !display.show_progress { let resource_type_owned = resource_type.to_string();
use futures::stream::{self, StreamExt};
let project_results: Vec<Result<ProjectIngestOutcome>> = stream::iter(projects.iter())
.map(|(local_project_id, gitlab_project_id, path)| {
let client = client.clone();
let db_path = db_path.clone();
let config = config.clone();
let resource_type = resource_type_owned.clone();
let path = path.clone();
let local_project_id = *local_project_id;
let gitlab_project_id = *gitlab_project_id;
async move {
let proj_conn = create_connection(&db_path)?;
let multi = crate::cli::progress::multi();
let spinner = if !display.show_spinner {
ProgressBar::hidden() ProgressBar::hidden()
} else { } else {
let s = ProgressBar::new_spinner(); let s = multi.add(ProgressBar::new_spinner());
s.set_style( s.set_style(
ProgressStyle::default_spinner() ProgressStyle::default_spinner()
.template("{spinner:.blue} {msg}") .template("{spinner:.blue} {msg}")
@@ -205,11 +248,10 @@ pub async fn run_ingest(
s s
}; };
// Progress bar for discussion sync (hidden until needed, or always hidden in robot mode)
let disc_bar = if !display.show_progress { let disc_bar = if !display.show_progress {
ProgressBar::hidden() ProgressBar::hidden()
} else { } else {
let b = ProgressBar::new(0); let b = multi.add(ProgressBar::new(0));
b.set_style( b.set_style(
ProgressStyle::default_bar() ProgressStyle::default_bar()
.template( .template(
@@ -221,14 +263,12 @@ pub async fn run_ingest(
b b
}; };
// Create progress callback (no-op in robot mode)
let spinner_clone = spinner.clone(); let spinner_clone = spinner.clone();
let disc_bar_clone = disc_bar.clone(); let disc_bar_clone = disc_bar.clone();
let progress_callback: crate::ingestion::ProgressCallback = if !display.show_progress { let progress_callback: crate::ingestion::ProgressCallback = if !display.show_progress {
Box::new(|_| {}) Box::new(|_| {})
} else { } else {
Box::new(move |event: ProgressEvent| match event { Box::new(move |event: ProgressEvent| match event {
// Issue events
ProgressEvent::DiscussionSyncStarted { total } => { ProgressEvent::DiscussionSyncStarted { total } => {
spinner_clone.finish_and_clear(); spinner_clone.finish_and_clear();
disc_bar_clone.set_length(total as u64); disc_bar_clone.set_length(total as u64);
@@ -240,7 +280,6 @@ pub async fn run_ingest(
ProgressEvent::DiscussionSyncComplete => { ProgressEvent::DiscussionSyncComplete => {
disc_bar_clone.finish_and_clear(); disc_bar_clone.finish_and_clear();
} }
// MR events
ProgressEvent::MrDiscussionSyncStarted { total } => { ProgressEvent::MrDiscussionSyncStarted { total } => {
spinner_clone.finish_and_clear(); spinner_clone.finish_and_clear();
disc_bar_clone.set_length(total as u64); disc_bar_clone.set_length(total as u64);
@@ -273,13 +312,13 @@ pub async fn run_ingest(
}) })
}; };
if resource_type == "issues" { let outcome = if resource_type == "issues" {
let result = ingest_project_issues_with_progress( let result = ingest_project_issues_with_progress(
&conn, &proj_conn,
&client, &client,
config, &config,
*local_project_id, local_project_id,
*gitlab_project_id, gitlab_project_id,
Some(progress_callback), Some(progress_callback),
) )
.await?; .await?;
@@ -287,12 +326,50 @@ pub async fn run_ingest(
spinner.finish_and_clear(); spinner.finish_and_clear();
disc_bar.finish_and_clear(); disc_bar.finish_and_clear();
// Print per-project summary (only in interactive mode) ProjectIngestOutcome::Issues { path, result }
if display.show_text { } else {
print_issue_project_summary(path, &result); let result = ingest_project_merge_requests_with_progress(
} &proj_conn,
&client,
&config,
local_project_id,
gitlab_project_id,
full,
Some(progress_callback),
)
.await?;
// Aggregate totals spinner.finish_and_clear();
disc_bar.finish_and_clear();
ProjectIngestOutcome::Mrs { path, result }
};
Ok(outcome)
}
})
.buffer_unordered(concurrency)
.collect()
.await;
// Aggregate results and print per-project summaries.
// Process all successes first, then return the first error (if any)
// so that successful project summaries are always printed.
let mut first_error: Option<LoreError> = None;
for project_result in project_results {
match project_result {
Err(e) => {
if first_error.is_none() {
first_error = Some(e);
}
}
Ok(ProjectIngestOutcome::Issues {
ref path,
ref result,
}) => {
if display.show_text {
print_issue_project_summary(path, result);
}
total.projects_synced += 1; total.projects_synced += 1;
total.issues_fetched += result.issues_fetched; total.issues_fetched += result.issues_fetched;
total.issues_upserted += result.issues_upserted; total.issues_upserted += result.issues_upserted;
@@ -303,27 +380,14 @@ pub async fn run_ingest(
total.issues_skipped_discussion_sync += result.issues_skipped_discussion_sync; total.issues_skipped_discussion_sync += result.issues_skipped_discussion_sync;
total.resource_events_fetched += result.resource_events_fetched; total.resource_events_fetched += result.resource_events_fetched;
total.resource_events_failed += result.resource_events_failed; total.resource_events_failed += result.resource_events_failed;
} else {
let result = ingest_project_merge_requests_with_progress(
&conn,
&client,
config,
*local_project_id,
*gitlab_project_id,
full,
Some(progress_callback),
)
.await?;
spinner.finish_and_clear();
disc_bar.finish_and_clear();
// Print per-project summary (only in interactive mode)
if display.show_text {
print_mr_project_summary(path, &result);
} }
Ok(ProjectIngestOutcome::Mrs {
// Aggregate totals ref path,
ref result,
}) => {
if display.show_text {
print_mr_project_summary(path, result);
}
total.projects_synced += 1; total.projects_synced += 1;
total.mrs_fetched += result.mrs_fetched; total.mrs_fetched += result.mrs_fetched;
total.mrs_upserted += result.mrs_upserted; total.mrs_upserted += result.mrs_upserted;
@@ -339,6 +403,11 @@ pub async fn run_ingest(
total.resource_events_failed += result.resource_events_failed; total.resource_events_failed += result.resource_events_failed;
} }
} }
}
if let Some(e) = first_error {
return Err(e);
}
// Lock is released on drop // Lock is released on drop
Ok(total) Ok(total)

View File

@@ -51,6 +51,9 @@ pub struct SyncConfig {
#[serde(rename = "dependentConcurrency")] #[serde(rename = "dependentConcurrency")]
pub dependent_concurrency: u32, pub dependent_concurrency: u32,
#[serde(rename = "requestsPerSecond")]
pub requests_per_second: f64,
#[serde(rename = "fetchResourceEvents", default = "default_true")] #[serde(rename = "fetchResourceEvents", default = "default_true")]
pub fetch_resource_events: bool, pub fetch_resource_events: bool,
} }
@@ -67,7 +70,8 @@ impl Default for SyncConfig {
heartbeat_interval_seconds: 30, heartbeat_interval_seconds: 30,
cursor_rewind_seconds: 2, cursor_rewind_seconds: 2,
primary_concurrency: 4, primary_concurrency: 4,
dependent_concurrency: 2, dependent_concurrency: 8,
requests_per_second: 30.0,
fetch_resource_events: true, fetch_resource_events: true,
} }
} }

View File

@@ -26,9 +26,11 @@ struct RateLimiter {
impl RateLimiter { impl RateLimiter {
fn new(requests_per_second: f64) -> Self { fn new(requests_per_second: f64) -> Self {
// Floor at 0.1 rps to prevent division-by-zero panic in Duration::from_secs_f64
let rps = requests_per_second.max(0.1);
Self { Self {
last_request: Instant::now() - Duration::from_secs(1), // Allow immediate first request last_request: Instant::now() - Duration::from_secs(1), // Allow immediate first request
min_interval: Duration::from_secs_f64(1.0 / requests_per_second), min_interval: Duration::from_secs_f64(1.0 / rps),
} }
} }
@@ -67,6 +69,10 @@ fn rand_jitter() -> u64 {
} }
/// GitLab API client with rate limiting. /// GitLab API client with rate limiting.
///
/// Cloning shares the underlying HTTP client and rate limiter,
/// making it cheap and safe for concurrent use across projects.
#[derive(Clone)]
pub struct GitLabClient { pub struct GitLabClient {
client: Client, client: Client,
base_url: String, base_url: String,
@@ -112,15 +118,20 @@ impl GitLabClient {
self.request("/api/v4/version").await self.request("/api/v4/version").await
} }
/// Make an authenticated API request. /// Maximum number of retries on 429 Too Many Requests.
const MAX_RETRIES: u32 = 3;
/// Make an authenticated API request with automatic 429 retry.
async fn request<T: serde::de::DeserializeOwned>(&self, path: &str) -> Result<T> { async fn request<T: serde::de::DeserializeOwned>(&self, path: &str) -> Result<T> {
let url = format!("{}{}", self.base_url, path);
for attempt in 0..=Self::MAX_RETRIES {
let delay = self.rate_limiter.lock().await.check_delay(); let delay = self.rate_limiter.lock().await.check_delay();
if let Some(d) = delay { if let Some(d) = delay {
sleep(d).await; sleep(d).await;
} }
let url = format!("{}{}", self.base_url, path); debug!(url = %url, attempt, "GitLab request");
debug!(url = %url, "GitLab request");
let response = self let response = self
.client .client
@@ -133,7 +144,32 @@ impl GitLabClient {
source: Some(e), source: Some(e),
})?; })?;
self.handle_response(response, path).await if response.status() == StatusCode::TOO_MANY_REQUESTS && attempt < Self::MAX_RETRIES {
let retry_after = Self::parse_retry_after(&response);
tracing::warn!(
retry_after_secs = retry_after,
attempt,
path,
"Rate limited by GitLab, retrying"
);
sleep(Duration::from_secs(retry_after)).await;
continue;
}
return self.handle_response(response, path).await;
}
unreachable!("loop always returns")
}
/// Parse retry-after header from a 429 response, defaulting to 60s.
fn parse_retry_after(response: &Response) -> u64 {
response
.headers()
.get("retry-after")
.and_then(|v| v.to_str().ok())
.and_then(|s| s.parse().ok())
.unwrap_or(60)
} }
/// Handle API response, converting errors appropriately. /// Handle API response, converting errors appropriately.
@@ -150,19 +186,22 @@ impl GitLabClient {
}), }),
StatusCode::TOO_MANY_REQUESTS => { StatusCode::TOO_MANY_REQUESTS => {
let retry_after = response let retry_after = Self::parse_retry_after(&response);
.headers()
.get("retry-after")
.and_then(|v| v.to_str().ok())
.and_then(|s| s.parse().ok())
.unwrap_or(60);
Err(LoreError::GitLabRateLimited { retry_after }) Err(LoreError::GitLabRateLimited { retry_after })
} }
status if status.is_success() => { status if status.is_success() => {
let body = response.json().await?; let text = response.text().await?;
Ok(body) serde_json::from_str(&text).map_err(|e| {
let preview = if text.len() > 500 {
&text[..500]
} else {
&text
};
LoreError::Other(format!(
"Failed to decode response from {path}: {e}\nResponse preview: {preview}"
))
})
} }
status => Err(LoreError::Other(format!( status => Err(LoreError::Other(format!(
@@ -498,18 +537,21 @@ impl GitLabClient {
} }
/// Make an authenticated API request with query parameters, returning headers. /// Make an authenticated API request with query parameters, returning headers.
/// Automatically retries on 429 Too Many Requests.
async fn request_with_headers<T: serde::de::DeserializeOwned>( async fn request_with_headers<T: serde::de::DeserializeOwned>(
&self, &self,
path: &str, path: &str,
params: &[(&str, String)], params: &[(&str, String)],
) -> Result<(T, HeaderMap)> { ) -> Result<(T, HeaderMap)> {
let url = format!("{}{}", self.base_url, path);
for attempt in 0..=Self::MAX_RETRIES {
let delay = self.rate_limiter.lock().await.check_delay(); let delay = self.rate_limiter.lock().await.check_delay();
if let Some(d) = delay { if let Some(d) = delay {
sleep(d).await; sleep(d).await;
} }
let url = format!("{}{}", self.base_url, path); debug!(url = %url, ?params, attempt, "GitLab paginated request");
debug!(url = %url, ?params, "GitLab paginated request");
let response = self let response = self
.client .client
@@ -523,10 +565,24 @@ impl GitLabClient {
source: Some(e), source: Some(e),
})?; })?;
if response.status() == StatusCode::TOO_MANY_REQUESTS && attempt < Self::MAX_RETRIES {
let retry_after = Self::parse_retry_after(&response);
tracing::warn!(
retry_after_secs = retry_after,
attempt,
path,
"Rate limited by GitLab, retrying"
);
sleep(Duration::from_secs(retry_after)).await;
continue;
}
let headers = response.headers().clone(); let headers = response.headers().clone();
let body = self.handle_response(response, path).await?; let body = self.handle_response(response, path).await?;
return Ok((body, headers));
}
Ok((body, headers)) unreachable!("loop always returns")
} }
} }