2 Commits

Author SHA1 Message Date
teernisse
5c44ee91fb fix(robot): propagate JSON serialization errors instead of silent failure
Three robot-mode print functions used `serde_json::to_string().unwrap_or_default()`
which silently outputs an empty string on failure (exit 0, no error). This
diverged from the codebase standard in handlers.rs which uses `?` propagation.

Changed to return Result<()> with proper LoreError::Other mapping:
- explain.rs: print_explain_json()
- file_history.rs: print_file_history_json()
- trace.rs: print_trace_json()

Updated callers in handlers.rs and explain.rs to propagate with `?`.

While serde_json::to_string on a json!() Value is unlikely to fail in practice
(only non-finite floats trigger it), the unwrap_or_default pattern violates the
robot mode contract: callers expect either valid JSON on stdout or a structured
error on stderr with a non-zero exit code, never empty output with exit 0.
2026-03-10 17:11:03 -04:00
teernisse
6aff96d32f fix(sql): add ORDER BY to all LIMIT queries for deterministic results
SQLite does not guarantee row order without ORDER BY, even with LIMIT.
This was a systemic issue found during a multi-pass bug hunt:

Production queries (explain.rs):
- Outgoing reference query: ORDER BY target_entity_type, target_entity_iid
- Incoming reference query: ORDER BY source_entity_type, COALESCE(iid)
  Without these, robot mode output was non-deterministic across calls,
  breaking clients expecting stable ordering.

Test helper queries (5 locations across 3 files):
- discussions_tests.rs: get_discussion_id()
- mr_discussions.rs: get_mr_discussion_id()
- queue.rs: setup_db_with_job(), release_all_locked_jobs_clears_locks()
  Currently safe (single-row inserts) but would break silently if tests
  expanded to multi-row fixtures.
2026-03-10 17:10:52 -04:00
7 changed files with 53 additions and 19 deletions

View File

@@ -1328,7 +1328,7 @@ fn handle_file_history(
if robot_mode {
let elapsed_ms = start.elapsed().as_millis() as u64;
print_file_history_json(&result, elapsed_ms);
print_file_history_json(&result, elapsed_ms)?;
} else {
print_file_history(&result);
}
@@ -1384,7 +1384,7 @@ fn handle_trace(
if robot_mode {
let elapsed_ms = start.elapsed().as_millis() as u64;
print_trace_json(&result, elapsed_ms, line_requested);
print_trace_json(&result, elapsed_ms, line_requested)?;
} else {
print_trace(&result);
}

View File

@@ -821,7 +821,8 @@ fn fetch_related_entities(
LEFT JOIN merge_requests mr ON er.target_entity_type = 'merge_request' AND mr.id = er.target_entity_id \
WHERE er.source_entity_type = ?1 AND er.source_entity_id = ?2 \
AND er.reference_type != 'closes' \
AND er.target_entity_iid IS NOT NULL",
AND er.target_entity_iid IS NOT NULL \
ORDER BY er.target_entity_type, er.target_entity_iid",
)?;
let outgoing: Vec<RelatedEntityInfo> = out_stmt
@@ -845,7 +846,8 @@ fn fetch_related_entities(
LEFT JOIN merge_requests mr ON er.source_entity_type = 'merge_request' AND mr.id = er.source_entity_id \
WHERE er.target_entity_type = ?1 AND er.target_entity_id = ?2 \
AND er.reference_type != 'closes' \
AND COALESCE(i.iid, mr.iid) IS NOT NULL",
AND COALESCE(i.iid, mr.iid) IS NOT NULL \
ORDER BY er.source_entity_type, COALESCE(i.iid, mr.iid)",
)?;
let incoming: Vec<RelatedEntityInfo> = in_stmt
@@ -1020,7 +1022,7 @@ pub fn handle_explain(
let elapsed_ms = start.elapsed().as_millis() as u64;
if robot_mode {
print_explain_json(&result, elapsed_ms);
print_explain_json(&result, elapsed_ms)?;
} else {
print_explain(&result);
}
@@ -1032,13 +1034,18 @@ pub fn handle_explain(
// Output rendering (Task 5 fills these in fully)
// ---------------------------------------------------------------------------
pub fn print_explain_json(result: &ExplainResult, elapsed_ms: u64) {
pub fn print_explain_json(result: &ExplainResult, elapsed_ms: u64) -> Result<()> {
let response = serde_json::json!({
"ok": true,
"data": result,
"meta": { "elapsed_ms": elapsed_ms }
});
println!("{}", serde_json::to_string(&response).unwrap_or_default());
println!(
"{}",
serde_json::to_string(&response)
.map_err(|e| LoreError::Other(format!("JSON serialization failed: {e}")))?
);
Ok(())
}
pub fn print_explain(result: &ExplainResult) {

View File

@@ -5,7 +5,7 @@ use tracing::info;
use crate::Config;
use crate::cli::render::{self, Icons, Theme};
use crate::core::db::create_connection;
use crate::core::error::Result;
use crate::core::error::{LoreError, Result};
use crate::core::file_history::resolve_rename_chain;
use crate::core::paths::get_db_path;
use crate::core::project::resolve_project;
@@ -391,7 +391,7 @@ pub fn print_file_history(result: &FileHistoryResult) {
// ── Robot (JSON) output ─────────────────────────────────────────────────────
pub fn print_file_history_json(result: &FileHistoryResult, elapsed_ms: u64) {
pub fn print_file_history_json(result: &FileHistoryResult, elapsed_ms: u64) -> Result<()> {
let output = serde_json::json!({
"ok": true,
"data": {
@@ -409,5 +409,10 @@ pub fn print_file_history_json(result: &FileHistoryResult, elapsed_ms: u64) {
}
});
println!("{}", serde_json::to_string(&output).unwrap_or_default());
println!(
"{}",
serde_json::to_string(&output)
.map_err(|e| LoreError::Other(format!("JSON serialization failed: {e}")))?
);
Ok(())
}

View File

@@ -1,4 +1,5 @@
use crate::cli::render::{Icons, Theme};
use crate::core::error::{LoreError, Result};
use crate::core::trace::{TraceChain, TraceResult};
/// Parse a path with optional `:line` suffix.
@@ -152,7 +153,11 @@ fn truncate_body(body: &str, max: usize) -> String {
format!("{}...", &body[..boundary])
}
pub fn print_trace_json(result: &TraceResult, elapsed_ms: u64, line_requested: Option<u32>) {
pub fn print_trace_json(
result: &TraceResult,
elapsed_ms: u64,
line_requested: Option<u32>,
) -> Result<()> {
// Truncate discussion bodies for token efficiency in robot mode
let chains: Vec<serde_json::Value> = result
.trace_chains
@@ -205,7 +210,12 @@ pub fn print_trace_json(result: &TraceResult, elapsed_ms: u64, line_requested: O
}
});
println!("{}", serde_json::to_string(&output).unwrap_or_default());
println!(
"{}",
serde_json::to_string(&output)
.map_err(|e| LoreError::Other(format!("JSON serialization failed: {e}")))?
);
Ok(())
}
#[cfg(test)]

View File

@@ -40,8 +40,12 @@ fn setup() -> Connection {
}
fn get_discussion_id(conn: &Connection) -> i64 {
conn.query_row("SELECT id FROM discussions LIMIT 1", [], |row| row.get(0))
.unwrap()
conn.query_row(
"SELECT id FROM discussions ORDER BY id LIMIT 1",
[],
|row| row.get(0),
)
.unwrap()
}
#[allow(clippy::too_many_arguments)]

View File

@@ -786,8 +786,12 @@ mod tests {
}
fn get_mr_discussion_id(conn: &Connection) -> i64 {
conn.query_row("SELECT id FROM discussions LIMIT 1", [], |row| row.get(0))
.unwrap()
conn.query_row(
"SELECT id FROM discussions ORDER BY id LIMIT 1",
[],
|row| row.get(0),
)
.unwrap()
}
#[allow(clippy::too_many_arguments)]

View File

@@ -242,14 +242,16 @@ mod tests {
.unwrap();
let project_id: i64 = conn
.query_row("SELECT id FROM projects LIMIT 1", [], |row| row.get(0))
.query_row("SELECT id FROM projects ORDER BY id LIMIT 1", [], |row| {
row.get(0)
})
.unwrap();
enqueue_job(&conn, project_id, "issue", 42, 100, "resource_events", None).unwrap();
let job_id: i64 = conn
.query_row(
"SELECT id FROM pending_dependent_fetches LIMIT 1",
"SELECT id FROM pending_dependent_fetches ORDER BY id LIMIT 1",
[],
|row| row.get(0),
)
@@ -301,7 +303,9 @@ mod tests {
let (conn, _job_id) = setup_db_with_job();
let project_id: i64 = conn
.query_row("SELECT id FROM projects LIMIT 1", [], |row| row.get(0))
.query_row("SELECT id FROM projects ORDER BY id LIMIT 1", [], |row| {
row.get(0)
})
.unwrap();
let jobs = claim_jobs(&conn, "resource_events", project_id, 10).unwrap();
assert_eq!(jobs.len(), 1);