Files
claude-statusline/src/transcript.rs
Taylor Eernisse e0c4a0fa9a feat: add colorgrad, transcript parser, terminal palette detection, and expanded color/input systems
Infrastructure layer for the TUI visual overhaul. Introduces foundational
modules and capabilities that the section-level features build on:

colorgrad (0.7) dependency:
  OKLab gradient interpolation for per-character color transitions in
  sparklines and context bars. Adds ~100K to binary (929K -> 1.0M).

color.rs expansion:
  - parse_hex(): #RRGGBB and #RGB -> (u8, u8, u8) conversion
  - fg_rgb()/bg_rgb(): 24-bit true-color ANSI escape generation
  - gradient_fg(): two-point interpolation via colorgrad
  - make_gradient()/sample_fg(): multi-stop gradient construction and sampling
  - resolve_color() now supports: hex (#FF6B35), bg:color, bg:#hex,
    italic, underline, strikethrough, and palette refs (p:success)
  - Named background constants (BG_RED through BG_WHITE)

transcript.rs (new module):
  Parses Claude Code transcript JSONL files to derive tool use counts,
  turn counts, and per-tool breakdowns. Claude Code doesn't include
  total_tool_uses or total_turns in its JSON — we compute them by scanning
  the transcript. Includes compact cache serialization format and
  skip_lines support for /clear offset handling.

terminal.rs (new module):
  Auto-detects the terminal's ANSI color palette for theme-aware tool
  coloring. Priority chain: WezTerm config > Kitty config > Alacritty
  config > OSC 4 escape sequence query. Parses Lua (WezTerm), key-value
  (Kitty), and TOML/YAML (Alacritty) config formats. OSC 4 queries
  use raw /dev/tty I/O with termios to avoid pipe interference. Includes
  cache serialization helpers for 1-hour TTL caching.

input.rs updates:
  - All structs now derive Serialize (for --dump-state diagnostics)
  - New fields: transcript_path, session_id, cwd, vim.mode, agent.name,
    exceeds_200k_tokens, cost.total_api_duration_ms
  - CurrentUsage: added input_tokens and output_tokens fields
  - #[serde(flatten)] extras on InputData and CostInfo for forward compat

cache.rs:
  Added flush_prefix() for /clear detection — removes all cache entries
  matching a key prefix (e.g., "trend_" to reset all sparkline history).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-09 23:41:50 -05:00

133 lines
4.5 KiB
Rust

use serde::Serialize;
use std::collections::HashMap;
use std::io::BufRead;
use std::path::Path;
/// Statistics derived from a Claude Code transcript JSONL file.
#[derive(Debug, Default, Clone, Serialize)]
pub struct TranscriptStats {
pub total_tool_uses: u64,
pub total_turns: u64,
pub last_tool_name: Option<String>,
/// Per-tool counts sorted by count descending.
pub tool_counts: Vec<(String, u64)>,
}
/// Cached format: "tools:N,turns:N,last:Name;ToolA=C,ToolB=C,..."
impl TranscriptStats {
pub fn to_cache_string(&self) -> String {
let mut s = format!("tools:{},turns:{}", self.total_tool_uses, self.total_turns);
if let Some(name) = &self.last_tool_name {
s.push_str(&format!(",last:{name}"));
}
if !self.tool_counts.is_empty() {
s.push(';');
let parts: Vec<String> = self
.tool_counts
.iter()
.map(|(name, count)| format!("{name}={count}"))
.collect();
s.push_str(&parts.join(","));
}
s
}
pub fn from_cache_string(s: &str) -> Option<Self> {
let mut stats = Self::default();
// Split on ';' — first part is summary, second is per-tool counts
let (summary, breakdown) = s.split_once(';').unwrap_or((s, ""));
for part in summary.split(',') {
if let Some((key, val)) = part.split_once(':') {
match key {
"tools" => stats.total_tool_uses = val.parse().unwrap_or(0),
"turns" => stats.total_turns = val.parse().unwrap_or(0),
"last" => {
if !val.is_empty() {
stats.last_tool_name = Some(val.to_string());
}
}
_ => {}
}
}
}
if !breakdown.is_empty() {
for part in breakdown.split(',') {
if let Some((name, count_str)) = part.split_once('=') {
let count: u64 = count_str.parse().unwrap_or(0);
if count > 0 {
stats.tool_counts.push((name.to_string(), count));
}
}
}
}
Some(stats)
}
}
/// Parse a Claude Code transcript JSONL file and extract tool use and turn counts.
/// `skip_lines` skips that many lines from the start (used after /clear to ignore
/// pre-clear entries in the same transcript file).
///
/// Transcript format (one JSON object per line):
/// - `{"type": "user", ...}` — a user turn
/// - `{"type": "assistant", "message": {"content": [{"type": "tool_use", "name": "Read"}, ...]}}` — tool uses
pub fn parse_transcript(path: &Path, skip_lines: usize) -> Option<TranscriptStats> {
let file = std::fs::File::open(path).ok()?;
let reader = std::io::BufReader::new(file);
let mut stats = TranscriptStats::default();
let mut counts: HashMap<String, u64> = HashMap::new();
for line in reader.lines().skip(skip_lines) {
let line = match line {
Ok(l) => l,
Err(_) => continue,
};
if line.is_empty() {
continue;
}
let v: serde_json::Value = match serde_json::from_str(&line) {
Ok(v) => v,
Err(_) => continue,
};
let msg_type = v.get("type").and_then(|t| t.as_str()).unwrap_or("");
match msg_type {
"user" => {
stats.total_turns += 1;
}
"assistant" => {
if let Some(content) = v
.get("message")
.and_then(|m| m.get("content"))
.and_then(|c| c.as_array())
{
for block in content {
if block.get("type").and_then(|t| t.as_str()) == Some("tool_use") {
stats.total_tool_uses += 1;
if let Some(name) = block.get("name").and_then(|n| n.as_str()) {
stats.last_tool_name = Some(name.to_string());
*counts.entry(name.to_string()).or_insert(0) += 1;
}
}
}
}
}
_ => {}
}
}
// Sort by count descending
let mut sorted: Vec<(String, u64)> = counts.into_iter().collect();
sorted.sort_by(|a, b| b.1.cmp(&a.1));
stats.tool_counts = sorted;
Some(stats)
}