feat(who): expand expert + overlap queries with mr_file_changes and mr_reviewers

Chain: bd-jec (config flag) -> bd-2yo (fetch MR diffs) -> bd-3qn6 (rewrite who queries)

- Add fetch_mr_file_changes config option and --no-file-changes CLI flag
- Add GitLab MR diffs API fetch pipeline with watermark-based sync
- Create migration 020 for diffs_synced_for_updated_at watermark column
- Rewrite query_expert() and query_overlap() to use 4-signal UNION ALL:
  DiffNote reviewers, DiffNote MR authors, file-change authors, file-change reviewers
- Deduplicate across signal types via COUNT(DISTINCT CASE WHEN ... THEN mr_id END)
- Add insert_file_change test helper, 8 new who tests, all 397 tests pass
- Also includes: list performance migration 019, autocorrect module, README updates

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Taylor Eernisse
2026-02-08 13:35:14 -05:00
parent 435a208c93
commit 95b7183add
19 changed files with 2139 additions and 291 deletions

802
src/cli/autocorrect.rs Normal file
View File

@@ -0,0 +1,802 @@
use serde::Serialize;
use strsim::jaro_winkler;
// ---------------------------------------------------------------------------
// Types
// ---------------------------------------------------------------------------
/// A single correction applied to one argument.
#[derive(Debug, Clone, Serialize)]
pub struct Correction {
pub original: String,
pub corrected: String,
pub rule: CorrectionRule,
pub confidence: f64,
}
/// Which rule triggered the correction.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum CorrectionRule {
SingleDashLongFlag,
CaseNormalization,
FuzzyFlag,
}
/// Result of the correction pass over raw args.
#[derive(Debug, Clone)]
pub struct CorrectionResult {
pub args: Vec<String>,
pub corrections: Vec<Correction>,
}
// ---------------------------------------------------------------------------
// Flag registry
// ---------------------------------------------------------------------------
/// Global flags accepted by every command (from `Cli` struct).
const GLOBAL_FLAGS: &[&str] = &[
"--config",
"--robot",
"--json",
"--color",
"--quiet",
"--no-quiet",
"--verbose",
"--no-verbose",
"--log-format",
];
/// Per-subcommand flags. Each entry is `(command_name, &[flags])`.
/// Hidden `--no-*` variants are included so they can be fuzzy-matched too.
const COMMAND_FLAGS: &[(&str, &[&str])] = &[
(
"issues",
&[
"--limit",
"--fields",
"--state",
"--project",
"--author",
"--assignee",
"--label",
"--milestone",
"--since",
"--due-before",
"--has-due",
"--no-has-due",
"--sort",
"--asc",
"--no-asc",
"--open",
"--no-open",
],
),
(
"mrs",
&[
"--limit",
"--fields",
"--state",
"--project",
"--author",
"--assignee",
"--reviewer",
"--label",
"--since",
"--draft",
"--no-draft",
"--target",
"--source",
"--sort",
"--asc",
"--no-asc",
"--open",
"--no-open",
],
),
(
"ingest",
&[
"--project",
"--force",
"--no-force",
"--full",
"--no-full",
"--dry-run",
"--no-dry-run",
],
),
(
"sync",
&[
"--full",
"--no-full",
"--force",
"--no-force",
"--no-embed",
"--no-docs",
"--no-events",
"--no-file-changes",
"--dry-run",
"--no-dry-run",
],
),
(
"search",
&[
"--mode",
"--type",
"--author",
"--project",
"--label",
"--path",
"--after",
"--updated-after",
"--limit",
"--explain",
"--no-explain",
"--fts-mode",
],
),
(
"embed",
&["--full", "--no-full", "--retry-failed", "--no-retry-failed"],
),
(
"stats",
&[
"--check",
"--no-check",
"--repair",
"--dry-run",
"--no-dry-run",
],
),
("count", &["--for"]),
(
"timeline",
&[
"--project",
"--since",
"--depth",
"--expand-mentions",
"--limit",
"--max-seeds",
"--max-entities",
"--max-evidence",
],
),
(
"who",
&[
"--path",
"--active",
"--overlap",
"--reviews",
"--since",
"--project",
"--limit",
],
),
(
"init",
&[
"--force",
"--non-interactive",
"--gitlab-url",
"--token-env-var",
"--projects",
],
),
("generate-docs", &["--full", "--project"]),
("completions", &[]),
(
"list",
&[
"--limit",
"--project",
"--state",
"--author",
"--assignee",
"--label",
"--milestone",
"--since",
"--due-before",
"--has-due-date",
"--sort",
"--order",
"--open",
"--draft",
"--no-draft",
"--reviewer",
"--target-branch",
"--source-branch",
],
),
("show", &["--project"]),
("reset", &["--yes"]),
];
/// Valid values for enum-like flags, used for post-clap error enhancement.
pub const ENUM_VALUES: &[(&str, &[&str])] = &[
("--state", &["opened", "closed", "merged", "locked", "all"]),
("--mode", &["lexical", "hybrid", "semantic"]),
("--sort", &["updated", "created", "iid"]),
("--type", &["issue", "mr", "discussion"]),
("--fts-mode", &["safe", "raw"]),
("--color", &["auto", "always", "never"]),
("--log-format", &["text", "json"]),
("--for", &["issue", "mr"]),
];
// ---------------------------------------------------------------------------
// Correction thresholds
// ---------------------------------------------------------------------------
const FUZZY_FLAG_THRESHOLD: f64 = 0.8;
// ---------------------------------------------------------------------------
// Core logic
// ---------------------------------------------------------------------------
/// Detect which subcommand is being invoked by finding the first positional
/// arg (not a flag, not a flag value).
fn detect_subcommand(args: &[String]) -> Option<&str> {
// Skip args[0] (binary name). Walk forward looking for the first
// arg that isn't a flag and isn't the value to a flag that takes one.
let mut skip_next = false;
for arg in args.iter().skip(1) {
if skip_next {
skip_next = false;
continue;
}
if arg.starts_with('-') {
// Flags that take a value: we know global ones; for simplicity
// skip the next arg for any `--flag=value` form (handled inline)
// or known value-taking global flags.
if arg.contains('=') {
continue;
}
if matches!(arg.as_str(), "--config" | "-c" | "--color" | "--log-format") {
skip_next = true;
}
continue;
}
// First non-flag positional = subcommand
return Some(arg.as_str());
}
None
}
/// Build the set of valid long flags for the detected subcommand.
fn valid_flags_for(subcommand: Option<&str>) -> Vec<&'static str> {
let mut flags: Vec<&str> = GLOBAL_FLAGS.to_vec();
if let Some(cmd) = subcommand {
for (name, cmd_flags) in COMMAND_FLAGS {
if *name == cmd {
flags.extend_from_slice(cmd_flags);
break;
}
}
} else {
// No subcommand detected — include all flags for maximum matching
for (_, cmd_flags) in COMMAND_FLAGS {
for flag in *cmd_flags {
if !flags.contains(flag) {
flags.push(flag);
}
}
}
}
flags
}
/// Run the pre-clap correction pass on raw args.
///
/// Returns the (possibly modified) args and any corrections applied.
pub fn correct_args(raw: Vec<String>) -> CorrectionResult {
let subcommand = detect_subcommand(&raw);
let valid = valid_flags_for(subcommand);
let mut corrected = Vec::with_capacity(raw.len());
let mut corrections = Vec::new();
for arg in raw {
if let Some(fixed) = try_correct(&arg, &valid) {
let s = fixed.corrected.clone();
corrections.push(fixed);
corrected.push(s);
} else {
corrected.push(arg);
}
}
CorrectionResult {
args: corrected,
corrections,
}
}
/// Try to correct a single arg. Returns `None` if no correction needed.
fn try_correct(arg: &str, valid_flags: &[&str]) -> Option<Correction> {
// Only attempt correction on flag-like args (starts with `-`)
if !arg.starts_with('-') {
return None;
}
// Skip short flags — they're unambiguous single chars (-p, -n, -v, -J)
// Also skip stacked short flags (-vvv)
if !arg.starts_with("--") {
// Rule 1: Single-dash long flag — e.g. `-robot` (len > 2, not a valid short flag)
// A short flag is `-` + single char, optionally stacked (-vvv).
// If it's `-` + multiple chars and NOT all the same char, it's likely a single-dash long flag.
let after_dash = &arg[1..];
// Check if it's a stacked short flag like -vvv (all same char)
let all_same_char = after_dash.len() > 1
&& after_dash
.chars()
.all(|c| c == after_dash.chars().next().unwrap_or('\0'));
if all_same_char {
return None;
}
// Single char = valid short flag, don't touch
if after_dash.len() == 1 {
return None;
}
// It looks like a single-dash long flag (e.g. `-robot`, `-state`)
let candidate = format!("--{after_dash}");
// Check exact match first (case-sensitive)
if valid_flags.contains(&candidate.as_str()) {
return Some(Correction {
original: arg.to_string(),
corrected: candidate,
rule: CorrectionRule::SingleDashLongFlag,
confidence: 0.95,
});
}
// Check case-insensitive exact match
let lower = candidate.to_lowercase();
if let Some(&flag) = valid_flags.iter().find(|f| f.to_lowercase() == lower) {
return Some(Correction {
original: arg.to_string(),
corrected: flag.to_string(),
rule: CorrectionRule::SingleDashLongFlag,
confidence: 0.95,
});
}
// Try fuzzy on the single-dash candidate
if let Some((best_flag, score)) = best_fuzzy_match(&lower, valid_flags)
&& score >= FUZZY_FLAG_THRESHOLD
{
return Some(Correction {
original: arg.to_string(),
corrected: best_flag.to_string(),
rule: CorrectionRule::SingleDashLongFlag,
confidence: score * 0.95, // discount slightly for compound correction
});
}
return None;
}
// For `--flag` or `--flag=value` forms: only correct the flag name
let (flag_part, value_suffix) = if let Some(eq_pos) = arg.find('=') {
(&arg[..eq_pos], Some(&arg[eq_pos..]))
} else {
(arg, None)
};
// Already valid? No correction needed.
if valid_flags.contains(&flag_part) {
return None;
}
// Rule 2: Case normalization — `--Robot` -> `--robot`
let lower = flag_part.to_lowercase();
if lower != flag_part
&& let Some(&flag) = valid_flags.iter().find(|f| f.to_lowercase() == lower)
{
let corrected = match value_suffix {
Some(suffix) => format!("{flag}{suffix}"),
None => flag.to_string(),
};
return Some(Correction {
original: arg.to_string(),
corrected,
rule: CorrectionRule::CaseNormalization,
confidence: 0.9,
});
}
// Rule 3: Fuzzy flag match — `--staate` -> `--state`
if let Some((best_flag, score)) = best_fuzzy_match(&lower, valid_flags)
&& score >= FUZZY_FLAG_THRESHOLD
{
let corrected = match value_suffix {
Some(suffix) => format!("{best_flag}{suffix}"),
None => best_flag.to_string(),
};
return Some(Correction {
original: arg.to_string(),
corrected,
rule: CorrectionRule::FuzzyFlag,
confidence: score,
});
}
None
}
/// Find the best fuzzy match among valid flags for a given (lowercased) input.
fn best_fuzzy_match<'a>(input: &str, valid_flags: &[&'a str]) -> Option<(&'a str, f64)> {
valid_flags
.iter()
.map(|&flag| (flag, jaro_winkler(input, flag)))
.max_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal))
}
// ---------------------------------------------------------------------------
// Post-clap suggestion helpers
// ---------------------------------------------------------------------------
/// Given an unrecognized flag (from a clap error), suggest the most similar
/// valid flag for the detected subcommand.
pub fn suggest_similar_flag(invalid_flag: &str, raw_args: &[String]) -> Option<String> {
let subcommand = detect_subcommand(raw_args);
let valid = valid_flags_for(subcommand);
let lower = invalid_flag.to_lowercase();
let (best_flag, score) = best_fuzzy_match(&lower, &valid)?;
if score >= 0.6 {
Some(best_flag.to_string())
} else {
None
}
}
/// Given a flag name, return its valid enum values (if known).
pub fn valid_values_for_flag(flag: &str) -> Option<&'static [&'static str]> {
let lower = flag.to_lowercase();
ENUM_VALUES
.iter()
.find(|(f, _)| f.to_lowercase() == lower)
.map(|(_, vals)| *vals)
}
/// Format a human/robot teaching note for a correction.
pub fn format_teaching_note(correction: &Correction) -> String {
match correction.rule {
CorrectionRule::SingleDashLongFlag => {
format!(
"Use double-dash for long flags: {} (not {})",
correction.corrected, correction.original
)
}
CorrectionRule::CaseNormalization => {
format!(
"Flags are lowercase: {} (not {})",
correction.corrected, correction.original
)
}
CorrectionRule::FuzzyFlag => {
format!(
"Correct spelling: {} (not {})",
correction.corrected, correction.original
)
}
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
fn args(s: &str) -> Vec<String> {
s.split_whitespace().map(String::from).collect()
}
// ---- Single-dash long flag ----
#[test]
fn single_dash_robot() {
let result = correct_args(args("lore -robot issues -n 5"));
assert_eq!(result.corrections.len(), 1);
assert_eq!(result.corrections[0].original, "-robot");
assert_eq!(result.corrections[0].corrected, "--robot");
assert_eq!(
result.corrections[0].rule,
CorrectionRule::SingleDashLongFlag
);
assert_eq!(result.args, args("lore --robot issues -n 5"));
}
#[test]
fn single_dash_state() {
let result = correct_args(args("lore --robot issues -state opened"));
assert_eq!(result.corrections.len(), 1);
assert_eq!(result.corrections[0].corrected, "--state");
}
// ---- Case normalization ----
#[test]
fn case_robot() {
let result = correct_args(args("lore --Robot issues"));
assert_eq!(result.corrections.len(), 1);
assert_eq!(result.corrections[0].corrected, "--robot");
assert_eq!(
result.corrections[0].rule,
CorrectionRule::CaseNormalization
);
}
#[test]
fn case_state_upper() {
let result = correct_args(args("lore --robot issues --State opened"));
assert_eq!(result.corrections.len(), 1);
assert_eq!(result.corrections[0].corrected, "--state");
assert_eq!(
result.corrections[0].rule,
CorrectionRule::CaseNormalization
);
}
#[test]
fn case_all_upper() {
let result = correct_args(args("lore --ROBOT issues --STATE opened"));
assert_eq!(result.corrections.len(), 2);
assert_eq!(result.corrections[0].corrected, "--robot");
assert_eq!(result.corrections[1].corrected, "--state");
}
// ---- Fuzzy flag match ----
#[test]
fn fuzzy_staate() {
let result = correct_args(args("lore --robot issues --staate opened"));
assert_eq!(result.corrections.len(), 1);
assert_eq!(result.corrections[0].corrected, "--state");
assert_eq!(result.corrections[0].rule, CorrectionRule::FuzzyFlag);
}
#[test]
fn fuzzy_projct() {
let result = correct_args(args("lore --robot issues --projct group/repo"));
assert_eq!(result.corrections.len(), 1);
assert_eq!(result.corrections[0].corrected, "--project");
assert_eq!(result.corrections[0].rule, CorrectionRule::FuzzyFlag);
}
// ---- No corrections ----
#[test]
fn already_correct() {
let original = args("lore --robot issues --state opened -n 10");
let result = correct_args(original.clone());
assert!(result.corrections.is_empty());
assert_eq!(result.args, original);
}
#[test]
fn short_flags_untouched() {
let original = args("lore -J issues -n 10 -s opened -p group/repo");
let result = correct_args(original.clone());
assert!(result.corrections.is_empty());
}
#[test]
fn stacked_short_flags_untouched() {
let original = args("lore -vvv issues");
let result = correct_args(original.clone());
assert!(result.corrections.is_empty());
}
#[test]
fn positional_args_untouched() {
let result = correct_args(args("lore --robot search authentication"));
assert!(result.corrections.is_empty());
}
#[test]
fn wildly_wrong_flag_not_corrected() {
// `--xyzzy` shouldn't match anything above 0.8
let result = correct_args(args("lore --robot issues --xyzzy foo"));
assert!(result.corrections.is_empty());
}
// ---- Flag with = value ----
#[test]
fn flag_eq_value_case_correction() {
let result = correct_args(args("lore --robot issues --State=opened"));
assert_eq!(result.corrections.len(), 1);
assert_eq!(result.corrections[0].corrected, "--state=opened");
}
// ---- Multiple corrections in one invocation ----
#[test]
fn multiple_corrections() {
let result = correct_args(args(
"lore -robot issues --State opened --projct group/repo",
));
assert_eq!(result.corrections.len(), 3);
assert_eq!(result.args[1], "--robot");
assert_eq!(result.args[3], "--state");
assert_eq!(result.args[5], "--project");
}
// ---- Teaching notes ----
#[test]
fn teaching_note_single_dash() {
let c = Correction {
original: "-robot".to_string(),
corrected: "--robot".to_string(),
rule: CorrectionRule::SingleDashLongFlag,
confidence: 0.95,
};
let note = format_teaching_note(&c);
assert!(note.contains("double-dash"));
assert!(note.contains("--robot"));
}
#[test]
fn teaching_note_case() {
let c = Correction {
original: "--State".to_string(),
corrected: "--state".to_string(),
rule: CorrectionRule::CaseNormalization,
confidence: 0.9,
};
let note = format_teaching_note(&c);
assert!(note.contains("lowercase"));
}
#[test]
fn teaching_note_fuzzy() {
let c = Correction {
original: "--staate".to_string(),
corrected: "--state".to_string(),
rule: CorrectionRule::FuzzyFlag,
confidence: 0.85,
};
let note = format_teaching_note(&c);
assert!(note.contains("spelling"));
}
// ---- Post-clap suggestion helpers ----
#[test]
fn suggest_similar_flag_works() {
let raw = args("lore --robot issues --xstat opened");
let suggestion = suggest_similar_flag("--xstat", &raw);
// Should suggest --state (close enough with lower threshold 0.6)
assert!(suggestion.is_some());
}
#[test]
fn valid_values_for_state() {
let vals = valid_values_for_flag("--state");
assert!(vals.is_some());
let vals = vals.unwrap();
assert!(vals.contains(&"opened"));
assert!(vals.contains(&"closed"));
}
#[test]
fn valid_values_unknown_flag() {
assert!(valid_values_for_flag("--xyzzy").is_none());
}
// ---- Subcommand detection ----
#[test]
fn detect_subcommand_basic() {
assert_eq!(
detect_subcommand(&args("lore issues -n 10")),
Some("issues")
);
}
#[test]
fn detect_subcommand_with_globals() {
assert_eq!(
detect_subcommand(&args("lore --robot --config /tmp/c.json mrs")),
Some("mrs")
);
}
#[test]
fn detect_subcommand_with_color() {
assert_eq!(
detect_subcommand(&args("lore --color never issues")),
Some("issues")
);
}
#[test]
fn detect_subcommand_none() {
assert_eq!(detect_subcommand(&args("lore --robot")), None);
}
// ---- Registry drift test ----
// This test uses clap introspection to verify our static registry covers
// all long flags defined in the Cli struct.
#[test]
fn registry_covers_global_flags() {
use clap::CommandFactory;
let cmd = crate::cli::Cli::command();
let clap_globals: Vec<String> = cmd
.get_arguments()
.filter_map(|a| a.get_long().map(|l| format!("--{l}")))
.collect();
for flag in &clap_globals {
// Skip help/version — clap adds these automatically
if flag == "--help" || flag == "--version" {
continue;
}
assert!(
GLOBAL_FLAGS.contains(&flag.as_str()),
"Clap global flag {flag} is missing from GLOBAL_FLAGS registry. \
Add it to GLOBAL_FLAGS in autocorrect.rs."
);
}
}
#[test]
fn registry_covers_command_flags() {
use clap::CommandFactory;
let cmd = crate::cli::Cli::command();
for sub in cmd.get_subcommands() {
let sub_name = sub.get_name().to_string();
// Find our registry entry
let registry_entry = COMMAND_FLAGS.iter().find(|(name, _)| *name == sub_name);
// Not all subcommands need entries (e.g., version, auth, status
// with no subcommand-specific flags)
let clap_flags: Vec<String> = sub
.get_arguments()
.filter_map(|a| a.get_long().map(|l| format!("--{l}")))
.filter(|f| !GLOBAL_FLAGS.contains(&f.as_str()))
.filter(|f| f != "--help" && f != "--version")
.collect();
if clap_flags.is_empty() {
continue;
}
let registry_flags = registry_entry.map(|(_, flags)| *flags);
let registry_flags = registry_flags.unwrap_or_else(|| {
panic!(
"Subcommand '{sub_name}' has clap flags {clap_flags:?} but no COMMAND_FLAGS \
registry entry. Add it to COMMAND_FLAGS in autocorrect.rs."
)
});
for flag in &clap_flags {
assert!(
registry_flags.contains(&flag.as_str()),
"Clap flag {flag} on subcommand '{sub_name}' is missing from \
COMMAND_FLAGS registry. Add it to the '{sub_name}' entry in autocorrect.rs."
);
}
}
}
}

View File

@@ -501,6 +501,20 @@ async fn run_ingest_inner(
ProgressEvent::ClosesIssuesFetchComplete { .. } => {
disc_bar_clone.finish_and_clear();
}
ProgressEvent::MrDiffsFetchStarted { total } => {
disc_bar_clone.reset();
disc_bar_clone.set_length(total as u64);
disc_bar_clone.enable_steady_tick(std::time::Duration::from_millis(100));
stage_bar_clone.set_message(
"Fetching MR file changes...".to_string()
);
}
ProgressEvent::MrDiffFetched { current, total: _ } => {
disc_bar_clone.set_position(current as u64);
}
ProgressEvent::MrDiffsFetchComplete { .. } => {
disc_bar_clone.finish_and_clear();
}
})
};

View File

@@ -335,18 +335,12 @@ fn query_issues(conn: &Connection, filters: &ListFilters) -> Result<ListResult>
(SELECT GROUP_CONCAT(ia.username, X'1F')
FROM issue_assignees ia
WHERE ia.issue_id = i.id) AS assignees_csv,
COALESCE(d.total, 0) AS discussion_count,
COALESCE(d.unresolved, 0) AS unresolved_count
(SELECT COUNT(*) FROM discussions d
WHERE d.issue_id = i.id) AS discussion_count,
(SELECT COUNT(*) FROM discussions d
WHERE d.issue_id = i.id AND d.resolvable = 1 AND d.resolved = 0) AS unresolved_count
FROM issues i
JOIN projects p ON i.project_id = p.id
LEFT JOIN (
SELECT issue_id,
COUNT(*) as total,
SUM(CASE WHEN resolvable = 1 AND resolved = 0 THEN 1 ELSE 0 END) as unresolved
FROM discussions
WHERE issue_id IS NOT NULL
GROUP BY issue_id
) d ON d.issue_id = i.id
{where_sql}
ORDER BY {sort_column} {order}
LIMIT ?"
@@ -528,18 +522,12 @@ fn query_mrs(conn: &Connection, filters: &MrListFilters) -> Result<MrListResult>
(SELECT GROUP_CONCAT(mr.username, X'1F')
FROM mr_reviewers mr
WHERE mr.merge_request_id = m.id) AS reviewers_csv,
COALESCE(d.total, 0) AS discussion_count,
COALESCE(d.unresolved, 0) AS unresolved_count
(SELECT COUNT(*) FROM discussions d
WHERE d.merge_request_id = m.id) AS discussion_count,
(SELECT COUNT(*) FROM discussions d
WHERE d.merge_request_id = m.id AND d.resolvable = 1 AND d.resolved = 0) AS unresolved_count
FROM merge_requests m
JOIN projects p ON m.project_id = p.id
LEFT JOIN (
SELECT merge_request_id,
COUNT(*) as total,
SUM(CASE WHEN resolvable = 1 AND resolved = 0 THEN 1 ELSE 0 END) as unresolved
FROM discussions
WHERE merge_request_id IS NOT NULL
GROUP BY merge_request_id
) d ON d.merge_request_id = m.id
{where_sql}
ORDER BY {sort_column} {order}
LIMIT ?"

View File

@@ -433,7 +433,7 @@ fn build_path_query(conn: &Connection, path: &str, project_id: Option<i64>) -> R
// Heuristic is now only a fallback; probes decide first when ambiguous.
let looks_like_file = !forced_dir && (is_root || last_segment.contains('.'));
// Probe 1: exact file exists (project-scoped via nullable binding)
// Probe 1: exact file exists in DiffNotes OR mr_file_changes (project-scoped)
let exact_exists = conn
.query_row(
"SELECT 1 FROM notes
@@ -445,9 +445,19 @@ fn build_path_query(conn: &Connection, path: &str, project_id: Option<i64>) -> R
rusqlite::params![trimmed, project_id],
|_| Ok(()),
)
.is_ok();
.is_ok()
|| conn
.query_row(
"SELECT 1 FROM mr_file_changes
WHERE new_path = ?1
AND (?2 IS NULL OR project_id = ?2)
LIMIT 1",
rusqlite::params![trimmed, project_id],
|_| Ok(()),
)
.is_ok();
// Probe 2: directory prefix exists (project-scoped)
// Probe 2: directory prefix exists in DiffNotes OR mr_file_changes (project-scoped)
let prefix_exists = if !forced_dir && !exact_exists {
let escaped = escape_like(trimmed);
let pat = format!("{escaped}/%");
@@ -462,6 +472,16 @@ fn build_path_query(conn: &Connection, path: &str, project_id: Option<i64>) -> R
|_| Ok(()),
)
.is_ok()
|| conn
.query_row(
"SELECT 1 FROM mr_file_changes
WHERE new_path LIKE ?1 ESCAPE '\\'
AND (?2 IS NULL OR project_id = ?2)
LIMIT 1",
rusqlite::params![pat, project_id],
|_| Ok(()),
)
.is_ok()
} else {
false
};
@@ -513,125 +533,117 @@ fn query_expert(
let pq = build_path_query(conn, path, project_id)?;
let limit_plus_one = (limit + 1) as i64;
let sql_prefix = "
WITH activity AS (
SELECT
n.author_username AS username,
'reviewer' AS role,
COUNT(DISTINCT m.id) AS mr_cnt,
COUNT(*) AS note_cnt,
MAX(n.created_at) AS last_seen_at
FROM notes n
JOIN discussions d ON n.discussion_id = d.id
JOIN merge_requests m ON d.merge_request_id = m.id
WHERE n.note_type = 'DiffNote'
AND n.is_system = 0
AND n.author_username IS NOT NULL
AND (m.author_username IS NULL OR n.author_username != m.author_username)
AND m.state IN ('opened','merged')
AND n.position_new_path LIKE ?1 ESCAPE '\\'
AND n.created_at >= ?2
AND (?3 IS NULL OR n.project_id = ?3)
GROUP BY n.author_username
UNION ALL
SELECT
m.author_username AS username,
'author' AS role,
COUNT(DISTINCT m.id) AS mr_cnt,
0 AS note_cnt,
MAX(n.created_at) AS last_seen_at
FROM merge_requests m
JOIN discussions d ON d.merge_request_id = m.id
JOIN notes n ON n.discussion_id = d.id
WHERE n.note_type = 'DiffNote'
AND n.is_system = 0
AND m.author_username IS NOT NULL
AND n.position_new_path LIKE ?1 ESCAPE '\\'
AND n.created_at >= ?2
AND (?3 IS NULL OR n.project_id = ?3)
GROUP BY m.author_username
)
SELECT
username,
SUM(CASE WHEN role = 'reviewer' THEN mr_cnt ELSE 0 END) AS review_mr_count,
SUM(CASE WHEN role = 'reviewer' THEN note_cnt ELSE 0 END) AS review_note_count,
SUM(CASE WHEN role = 'author' THEN mr_cnt ELSE 0 END) AS author_mr_count,
MAX(last_seen_at) AS last_seen_at,
(
(SUM(CASE WHEN role = 'reviewer' THEN mr_cnt ELSE 0 END) * 20) +
(SUM(CASE WHEN role = 'author' THEN mr_cnt ELSE 0 END) * 12) +
(SUM(CASE WHEN role = 'reviewer' THEN note_cnt ELSE 0 END) * 1)
) AS score
FROM activity
GROUP BY username
ORDER BY score DESC, last_seen_at DESC, username ASC
LIMIT ?4
";
let sql_exact = "
WITH activity AS (
SELECT
n.author_username AS username,
'reviewer' AS role,
COUNT(DISTINCT m.id) AS mr_cnt,
COUNT(*) AS note_cnt,
MAX(n.created_at) AS last_seen_at
FROM notes n
JOIN discussions d ON n.discussion_id = d.id
JOIN merge_requests m ON d.merge_request_id = m.id
WHERE n.note_type = 'DiffNote'
AND n.is_system = 0
AND n.author_username IS NOT NULL
AND (m.author_username IS NULL OR n.author_username != m.author_username)
AND m.state IN ('opened','merged')
AND n.position_new_path = ?1
AND n.created_at >= ?2
AND (?3 IS NULL OR n.project_id = ?3)
GROUP BY n.author_username
UNION ALL
SELECT
m.author_username AS username,
'author' AS role,
COUNT(DISTINCT m.id) AS mr_cnt,
0 AS note_cnt,
MAX(n.created_at) AS last_seen_at
FROM merge_requests m
JOIN discussions d ON d.merge_request_id = m.id
JOIN notes n ON n.discussion_id = d.id
WHERE n.note_type = 'DiffNote'
AND n.is_system = 0
AND m.author_username IS NOT NULL
AND n.position_new_path = ?1
AND n.created_at >= ?2
AND (?3 IS NULL OR n.project_id = ?3)
GROUP BY m.author_username
)
SELECT
username,
SUM(CASE WHEN role = 'reviewer' THEN mr_cnt ELSE 0 END) AS review_mr_count,
SUM(CASE WHEN role = 'reviewer' THEN note_cnt ELSE 0 END) AS review_note_count,
SUM(CASE WHEN role = 'author' THEN mr_cnt ELSE 0 END) AS author_mr_count,
MAX(last_seen_at) AS last_seen_at,
(
(SUM(CASE WHEN role = 'reviewer' THEN mr_cnt ELSE 0 END) * 20) +
(SUM(CASE WHEN role = 'author' THEN mr_cnt ELSE 0 END) * 12) +
(SUM(CASE WHEN role = 'reviewer' THEN note_cnt ELSE 0 END) * 1)
) AS score
FROM activity
GROUP BY username
ORDER BY score DESC, last_seen_at DESC, username ASC
LIMIT ?4
";
let mut stmt = if pq.is_prefix {
conn.prepare_cached(sql_prefix)?
// Build SQL with 4 signal sources (UNION ALL), deduplicating via COUNT(DISTINCT mr_id):
// 1. DiffNote reviewer — left inline review comments (not self-review)
// 2. DiffNote MR author — authored MR that has DiffNotes on this path
// 3. File-change author — authored MR that touched this path (mr_file_changes)
// 4. File-change reviewer — assigned reviewer on MR that touched this path
let path_op = if pq.is_prefix {
"LIKE ?1 ESCAPE '\\'"
} else {
conn.prepare_cached(sql_exact)?
"= ?1"
};
let sql = format!(
"
WITH signals AS (
-- 1. DiffNote reviewer (individual notes for note_cnt)
SELECT
n.author_username AS username,
'diffnote_reviewer' AS signal,
m.id AS mr_id,
n.id AS note_id,
n.created_at AS seen_at
FROM notes n
JOIN discussions d ON n.discussion_id = d.id
JOIN merge_requests m ON d.merge_request_id = m.id
WHERE n.note_type = 'DiffNote'
AND n.is_system = 0
AND n.author_username IS NOT NULL
AND (m.author_username IS NULL OR n.author_username != m.author_username)
AND m.state IN ('opened','merged')
AND n.position_new_path {path_op}
AND n.created_at >= ?2
AND (?3 IS NULL OR n.project_id = ?3)
UNION ALL
-- 2. DiffNote MR author
SELECT DISTINCT
m.author_username AS username,
'diffnote_author' AS signal,
m.id AS mr_id,
NULL AS note_id,
MAX(n.created_at) AS seen_at
FROM merge_requests m
JOIN discussions d ON d.merge_request_id = m.id
JOIN notes n ON n.discussion_id = d.id
WHERE n.note_type = 'DiffNote'
AND n.is_system = 0
AND m.author_username IS NOT NULL
AND m.state IN ('opened','merged')
AND n.position_new_path {path_op}
AND n.created_at >= ?2
AND (?3 IS NULL OR n.project_id = ?3)
GROUP BY m.author_username, m.id
UNION ALL
-- 3. MR author via file changes
SELECT
m.author_username AS username,
'file_author' AS signal,
m.id AS mr_id,
NULL AS note_id,
m.updated_at AS seen_at
FROM mr_file_changes fc
JOIN merge_requests m ON fc.merge_request_id = m.id
WHERE m.author_username IS NOT NULL
AND m.state IN ('opened','merged')
AND fc.new_path {path_op}
AND m.updated_at >= ?2
AND (?3 IS NULL OR fc.project_id = ?3)
UNION ALL
-- 4. MR reviewer via file changes + mr_reviewers
SELECT
r.username AS username,
'file_reviewer' AS signal,
m.id AS mr_id,
NULL AS note_id,
m.updated_at AS seen_at
FROM mr_file_changes fc
JOIN merge_requests m ON fc.merge_request_id = m.id
JOIN mr_reviewers r ON r.merge_request_id = m.id
WHERE r.username IS NOT NULL
AND m.state IN ('opened','merged')
AND fc.new_path {path_op}
AND m.updated_at >= ?2
AND (?3 IS NULL OR fc.project_id = ?3)
)
SELECT
username,
COUNT(DISTINCT CASE WHEN signal IN ('diffnote_reviewer', 'file_reviewer')
THEN mr_id END) AS review_mr_count,
COUNT(CASE WHEN signal = 'diffnote_reviewer' THEN note_id END) AS review_note_count,
COUNT(DISTINCT CASE WHEN signal IN ('diffnote_author', 'file_author')
THEN mr_id END) AS author_mr_count,
MAX(seen_at) AS last_seen_at,
(
(COUNT(DISTINCT CASE WHEN signal IN ('diffnote_reviewer', 'file_reviewer')
THEN mr_id END) * 20) +
(COUNT(DISTINCT CASE WHEN signal IN ('diffnote_author', 'file_author')
THEN mr_id END) * 12) +
(COUNT(CASE WHEN signal = 'diffnote_reviewer' THEN note_id END) * 1)
) AS score
FROM signals
GROUP BY username
ORDER BY score DESC, last_seen_at DESC, username ASC
LIMIT ?4
"
);
let mut stmt = conn.prepare_cached(&sql)?;
let experts: Vec<Expert> = stmt
.query_map(
@@ -1160,97 +1172,100 @@ fn query_overlap(
) -> Result<OverlapResult> {
let pq = build_path_query(conn, path, project_id)?;
let sql_prefix = "SELECT username, role, touch_count, last_seen_at, mr_refs FROM (
SELECT
n.author_username AS username,
'reviewer' AS role,
COUNT(DISTINCT m.id) AS touch_count,
MAX(n.created_at) AS last_seen_at,
GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid)) AS mr_refs
FROM notes n
JOIN discussions d ON n.discussion_id = d.id
JOIN merge_requests m ON d.merge_request_id = m.id
JOIN projects p ON m.project_id = p.id
WHERE n.note_type = 'DiffNote'
AND n.position_new_path LIKE ?1 ESCAPE '\\'
AND n.is_system = 0
AND n.author_username IS NOT NULL
AND (m.author_username IS NULL OR n.author_username != m.author_username)
AND m.state IN ('opened','merged')
AND n.created_at >= ?2
AND (?3 IS NULL OR n.project_id = ?3)
GROUP BY n.author_username
UNION ALL
SELECT
m.author_username AS username,
'author' AS role,
COUNT(DISTINCT m.id) AS touch_count,
MAX(n.created_at) AS last_seen_at,
GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid)) AS mr_refs
FROM merge_requests m
JOIN discussions d ON d.merge_request_id = m.id
JOIN notes n ON n.discussion_id = d.id
JOIN projects p ON m.project_id = p.id
WHERE n.note_type = 'DiffNote'
AND n.position_new_path LIKE ?1 ESCAPE '\\'
AND n.is_system = 0
AND m.state IN ('opened', 'merged')
AND m.author_username IS NOT NULL
AND n.created_at >= ?2
AND (?3 IS NULL OR n.project_id = ?3)
GROUP BY m.author_username
)";
let sql_exact = "SELECT username, role, touch_count, last_seen_at, mr_refs FROM (
SELECT
n.author_username AS username,
'reviewer' AS role,
COUNT(DISTINCT m.id) AS touch_count,
MAX(n.created_at) AS last_seen_at,
GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid)) AS mr_refs
FROM notes n
JOIN discussions d ON n.discussion_id = d.id
JOIN merge_requests m ON d.merge_request_id = m.id
JOIN projects p ON m.project_id = p.id
WHERE n.note_type = 'DiffNote'
AND n.position_new_path = ?1
AND n.is_system = 0
AND n.author_username IS NOT NULL
AND (m.author_username IS NULL OR n.author_username != m.author_username)
AND m.state IN ('opened','merged')
AND n.created_at >= ?2
AND (?3 IS NULL OR n.project_id = ?3)
GROUP BY n.author_username
UNION ALL
SELECT
m.author_username AS username,
'author' AS role,
COUNT(DISTINCT m.id) AS touch_count,
MAX(n.created_at) AS last_seen_at,
GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid)) AS mr_refs
FROM merge_requests m
JOIN discussions d ON d.merge_request_id = m.id
JOIN notes n ON n.discussion_id = d.id
JOIN projects p ON m.project_id = p.id
WHERE n.note_type = 'DiffNote'
AND n.position_new_path = ?1
AND n.is_system = 0
AND m.state IN ('opened', 'merged')
AND m.author_username IS NOT NULL
AND n.created_at >= ?2
AND (?3 IS NULL OR n.project_id = ?3)
GROUP BY m.author_username
)";
let mut stmt = if pq.is_prefix {
conn.prepare_cached(sql_prefix)?
// Build SQL with 4 signal sources, matching the expert query expansion.
// Each row produces (username, role, mr_id, mr_ref, seen_at) for Rust-side accumulation.
let path_op = if pq.is_prefix {
"LIKE ?1 ESCAPE '\\'"
} else {
conn.prepare_cached(sql_exact)?
"= ?1"
};
let sql = format!(
"SELECT username, role, touch_count, last_seen_at, mr_refs FROM (
-- 1. DiffNote reviewer
SELECT
n.author_username AS username,
'reviewer' AS role,
COUNT(DISTINCT m.id) AS touch_count,
MAX(n.created_at) AS last_seen_at,
GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid)) AS mr_refs
FROM notes n
JOIN discussions d ON n.discussion_id = d.id
JOIN merge_requests m ON d.merge_request_id = m.id
JOIN projects p ON m.project_id = p.id
WHERE n.note_type = 'DiffNote'
AND n.position_new_path {path_op}
AND n.is_system = 0
AND n.author_username IS NOT NULL
AND (m.author_username IS NULL OR n.author_username != m.author_username)
AND m.state IN ('opened','merged')
AND n.created_at >= ?2
AND (?3 IS NULL OR n.project_id = ?3)
GROUP BY n.author_username
UNION ALL
-- 2. DiffNote MR author
SELECT
m.author_username AS username,
'author' AS role,
COUNT(DISTINCT m.id) AS touch_count,
MAX(n.created_at) AS last_seen_at,
GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid)) AS mr_refs
FROM merge_requests m
JOIN discussions d ON d.merge_request_id = m.id
JOIN notes n ON n.discussion_id = d.id
JOIN projects p ON m.project_id = p.id
WHERE n.note_type = 'DiffNote'
AND n.position_new_path {path_op}
AND n.is_system = 0
AND m.state IN ('opened', 'merged')
AND m.author_username IS NOT NULL
AND n.created_at >= ?2
AND (?3 IS NULL OR n.project_id = ?3)
GROUP BY m.author_username
UNION ALL
-- 3. MR author via file changes
SELECT
m.author_username AS username,
'author' AS role,
COUNT(DISTINCT m.id) AS touch_count,
MAX(m.updated_at) AS last_seen_at,
GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid)) AS mr_refs
FROM mr_file_changes fc
JOIN merge_requests m ON fc.merge_request_id = m.id
JOIN projects p ON m.project_id = p.id
WHERE m.author_username IS NOT NULL
AND m.state IN ('opened','merged')
AND fc.new_path {path_op}
AND m.updated_at >= ?2
AND (?3 IS NULL OR fc.project_id = ?3)
GROUP BY m.author_username
UNION ALL
-- 4. MR reviewer via file changes + mr_reviewers
SELECT
r.username AS username,
'reviewer' AS role,
COUNT(DISTINCT m.id) AS touch_count,
MAX(m.updated_at) AS last_seen_at,
GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid)) AS mr_refs
FROM mr_file_changes fc
JOIN merge_requests m ON fc.merge_request_id = m.id
JOIN projects p ON m.project_id = p.id
JOIN mr_reviewers r ON r.merge_request_id = m.id
WHERE r.username IS NOT NULL
AND m.state IN ('opened','merged')
AND fc.new_path {path_op}
AND m.updated_at >= ?2
AND (?3 IS NULL OR fc.project_id = ?3)
GROUP BY r.username
)"
);
let mut stmt = conn.prepare_cached(&sql)?;
let rows: Vec<(String, String, u32, i64, Option<String>)> = stmt
.query_map(rusqlite::params![pq.value, since_ms, project_id], |row| {
Ok((
@@ -2117,7 +2132,6 @@ mod tests {
.unwrap();
}
#[allow(dead_code)]
fn insert_reviewer(conn: &Connection, mr_id: i64, username: &str) {
conn.execute(
"INSERT INTO mr_reviewers (merge_request_id, username) VALUES (?1, ?2)",
@@ -2126,6 +2140,21 @@ mod tests {
.unwrap();
}
fn insert_file_change(
conn: &Connection,
mr_id: i64,
project_id: i64,
new_path: &str,
change_type: &str,
) {
conn.execute(
"INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type)
VALUES (?1, ?2, ?3, ?4)",
rusqlite::params![mr_id, project_id, new_path, change_type],
)
.unwrap();
}
#[test]
fn test_is_file_path_discrimination() {
// Contains '/' -> file path
@@ -2678,4 +2707,142 @@ mod tests {
let result = query_expert(&conn, "src/auth/", None, 0, 10).unwrap();
assert!(!result.truncated);
}
#[test]
fn test_expert_file_changes_only() {
// MR author should appear even when there are zero DiffNotes
let conn = setup_test_db();
insert_project(&conn, 1, "team/backend");
insert_mr(&conn, 1, 1, 100, "file_author", "merged");
insert_file_change(&conn, 1, 1, "src/auth/login.rs", "modified");
let result = query_expert(&conn, "src/auth/login.rs", None, 0, 20).unwrap();
assert_eq!(result.experts.len(), 1);
assert_eq!(result.experts[0].username, "file_author");
assert!(result.experts[0].author_mr_count > 0);
assert_eq!(result.experts[0].review_mr_count, 0);
}
#[test]
fn test_expert_mr_reviewer_via_file_changes() {
// A reviewer assigned via mr_reviewers should appear when that MR
// touched the queried file (via mr_file_changes)
let conn = setup_test_db();
insert_project(&conn, 1, "team/backend");
insert_mr(&conn, 1, 1, 100, "author_a", "merged");
insert_file_change(&conn, 1, 1, "src/auth/login.rs", "modified");
insert_reviewer(&conn, 1, "assigned_reviewer");
let result = query_expert(&conn, "src/auth/login.rs", None, 0, 20).unwrap();
let reviewer = result
.experts
.iter()
.find(|e| e.username == "assigned_reviewer");
assert!(reviewer.is_some(), "assigned_reviewer should appear");
assert!(reviewer.unwrap().review_mr_count > 0);
}
#[test]
fn test_expert_deduplicates_across_signals() {
// User who is BOTH a DiffNote reviewer AND an mr_reviewers entry for
// the same MR should be counted only once per MR
let conn = setup_test_db();
insert_project(&conn, 1, "team/backend");
insert_mr(&conn, 1, 1, 100, "author_a", "merged");
insert_discussion(&conn, 1, 1, Some(1), None, true, false);
insert_diffnote(
&conn,
1,
1,
1,
"reviewer_b",
"src/auth/login.rs",
"looks good",
);
// Same user also listed as assigned reviewer, with file change data
insert_file_change(&conn, 1, 1, "src/auth/login.rs", "modified");
insert_reviewer(&conn, 1, "reviewer_b");
let result = query_expert(&conn, "src/auth/login.rs", None, 0, 20).unwrap();
let reviewer = result
.experts
.iter()
.find(|e| e.username == "reviewer_b")
.unwrap();
// Should be 1 MR, not 2 (dedup across DiffNote + mr_reviewers)
assert_eq!(reviewer.review_mr_count, 1);
}
#[test]
fn test_expert_combined_diffnote_and_file_changes() {
// Author with DiffNotes on path A and file_changes on path B should
// get credit for both when queried with a directory prefix
let conn = setup_test_db();
insert_project(&conn, 1, "team/backend");
// MR 1: has DiffNotes on login.rs
insert_mr(&conn, 1, 1, 100, "author_a", "merged");
insert_discussion(&conn, 1, 1, Some(1), None, true, false);
insert_diffnote(&conn, 1, 1, 1, "reviewer_b", "src/auth/login.rs", "note");
// MR 2: has file_changes on session.rs (no DiffNotes)
insert_mr(&conn, 2, 1, 200, "author_a", "merged");
insert_file_change(&conn, 2, 1, "src/auth/session.rs", "added");
let result = query_expert(&conn, "src/auth/", None, 0, 20).unwrap();
let author = result
.experts
.iter()
.find(|e| e.username == "author_a")
.unwrap();
// Should count 2 authored MRs (one from DiffNote path, one from file changes)
assert_eq!(author.author_mr_count, 2);
}
#[test]
fn test_expert_file_changes_prefix_match() {
// Directory prefix queries should pick up mr_file_changes under the directory
let conn = setup_test_db();
insert_project(&conn, 1, "team/backend");
insert_mr(&conn, 1, 1, 100, "author_a", "merged");
insert_file_change(&conn, 1, 1, "src/auth/login.rs", "modified");
insert_file_change(&conn, 1, 1, "src/auth/session.rs", "added");
let result = query_expert(&conn, "src/auth/", None, 0, 20).unwrap();
assert_eq!(result.path_match, "prefix");
assert_eq!(result.experts.len(), 1);
assert_eq!(result.experts[0].username, "author_a");
}
#[test]
fn test_overlap_file_changes_only() {
// Overlap mode should also find users via mr_file_changes
let conn = setup_test_db();
insert_project(&conn, 1, "team/backend");
insert_mr(&conn, 1, 1, 100, "author_a", "merged");
insert_file_change(&conn, 1, 1, "src/auth/login.rs", "modified");
insert_reviewer(&conn, 1, "reviewer_x");
let result = query_overlap(&conn, "src/auth/", None, 0, 20).unwrap();
assert!(
result.users.iter().any(|u| u.username == "author_a"),
"author_a should appear via file_changes"
);
assert!(
result.users.iter().any(|u| u.username == "reviewer_x"),
"reviewer_x should appear via mr_reviewers + file_changes"
);
}
#[test]
fn test_build_path_query_resolves_via_file_changes() {
// DB probe should detect exact file match from mr_file_changes even
// when no DiffNotes exist for the path
let conn = setup_test_db();
insert_project(&conn, 1, "team/backend");
insert_mr(&conn, 1, 1, 100, "author_a", "merged");
insert_file_change(&conn, 1, 1, "src/Dockerfile", "modified");
let pq = build_path_query(&conn, "src/Dockerfile", None).unwrap();
assert_eq!(pq.value, "src/Dockerfile");
assert!(!pq.is_prefix);
}
}

View File

@@ -1,3 +1,4 @@
pub mod autocorrect;
pub mod commands;
pub mod progress;
pub mod robot;
@@ -81,13 +82,18 @@ impl Cli {
/// Detect robot mode from environment before parsing succeeds.
/// Used for structured error output when clap parsing fails.
/// Also catches common agent typos like `-robot` and `--Robot`.
pub fn detect_robot_mode_from_env() -> bool {
let args: Vec<String> = std::env::args().collect();
args.iter()
.any(|a| a == "--robot" || a == "-J" || a == "--json")
|| std::env::var("LORE_ROBOT")
.ok()
.is_some_and(|v| !v.is_empty() && v != "0" && v != "false")
args.iter().any(|a| {
a == "-J"
|| a.eq_ignore_ascii_case("--robot")
|| a.eq_ignore_ascii_case("-robot")
|| a.eq_ignore_ascii_case("--json")
|| a.eq_ignore_ascii_case("-json")
}) || std::env::var("LORE_ROBOT")
.ok()
.is_some_and(|v| !v.is_empty() && v != "0" && v != "false")
|| !std::io::stdout().is_terminal()
}
}
@@ -608,6 +614,10 @@ pub struct SyncArgs {
#[arg(long = "no-events")]
pub no_events: bool,
/// Skip MR file change fetching (overrides config)
#[arg(long = "no-file-changes")]
pub no_file_changes: bool,
/// Preview what would be synced without making changes
#[arg(long, overrides_with = "no_dry_run")]
pub dry_run: bool,