diff --git a/.claude/memory/MEMORY.md b/.claude/memory/MEMORY.md new file mode 100644 index 0000000..461c064 --- /dev/null +++ b/.claude/memory/MEMORY.md @@ -0,0 +1,85 @@ +# netfelix-audio-fix — Project Memory + +## What it does +Bun + Hono JSON REST API + React 19 SPA to scan a Jellyfin library, compute which audio/subtitle +tracks to remove from each file, let you review and edit those decisions, then +execute FFmpeg (copy mode) to strip/reorder streams. Remote nodes via SSH. + +## Key technical decisions +- **Runtime**: Bun + Hono JSON REST API backend; React 19 SPA frontend via Vite +- **DB**: bun:sqlite WAL mode → `data/netfelix.db` (server-side; PGlite not applicable) +- **Frontend stack**: React 19 + TanStack Router + Zustand + Tailwind v4 + cn() utilities +- **Code quality**: Biome (formatting + linting) — no ESLint/Prettier +- **Path alias**: `~/` → `src/` (vite.config.ts resolve.alias + tsconfig.json paths) +- **Dev**: `bun run dev` runs Hono API (:3000) + Vite (:5173) concurrently; Vite proxies /api/* +- **Prod**: `bun run build` → Vite builds to `dist/`; Hono serves dist/ + /api/* routes +- **SSH keys**: uploaded via file input, stored as PEM text in `nodes.private_key` +- Two tsconfigs: `tsconfig.json` (frontend, DOM lib), `tsconfig.server.json` (backend, bun-types) + +## Project structure +``` +server/ ← Backend (Bun + Hono, JSON API at /api/*) + index.tsx ← entry point, Bun.serve, CORS for dev, static SPA serve + types.ts ← server-side interfaces + db/index.ts ← getDb(), getConfig(), setConfig(), getEnvLockedKeys() + db/schema.ts ← SCHEMA DDL string + DEFAULT_CONFIG + services/ + jellyfin.ts / radarr.ts / sonarr.ts / analyzer.ts / ffmpeg.ts / ssh.ts + api/ + dashboard.ts / scan.ts / review.ts / execute.ts / nodes.ts / setup.ts / subtitles.ts +src/ ← Frontend (React SPA, built with Vite) + main.tsx ← entry, RouterProvider + index.css ← Tailwind v4 @import + routeTree.gen.ts ← auto-generated by TanStack Router (gitignored) + routes/ + __root.tsx ← nav layout with Link components + index.tsx / scan.tsx / execute.tsx / nodes.tsx / setup.tsx + review.tsx ← layout route with Audio/Subtitles tab bar + Outlet + review/index.tsx (redirect → /review/audio) + review/audio/index.tsx ($filter) / review/audio/$id.tsx + review/subtitles/index.tsx ($filter) / review/subtitles/$id.tsx + features/ + dashboard/DashboardPage.tsx + scan/ScanPage.tsx (SSE for live progress) + review/AudioListPage.tsx / AudioDetailPage.tsx + subtitles/SubtitleListPage.tsx / SubtitleDetailPage.tsx + execute/ExecutePage.tsx (SSE for job updates) + nodes/NodesPage.tsx + setup/SetupPage.tsx + shared/ + lib/utils.ts (cn()) / api.ts (typed fetch) / types.ts / lang.ts (LANG_NAMES) + components/ui/badge.tsx / button.tsx / input.tsx / select.tsx / textarea.tsx / alert.tsx +biome.json / vite.config.ts / tsconfig.json / tsconfig.server.json / index.html +``` + +## Rules: what gets kept + +- Video/Data streams: always keep +- Audio: keep original_language + configured `audio_languages` (if OG unknown → keep all, flag needs_review) +- Audio order: OG first, then additional languages in `audio_languages` config order +- Subtitles: ALL removed from container, extracted to sidecar files on disk +- `subtitle_files` table tracks extracted sidecar files (file manager UI) +- `review_plans.subs_extracted` flag tracks extraction status +- `is_noop` only considers audio changes (subtitle extraction is implicit) + +## Scan flow +1. Jellyfin paginated API → upsert media_items + media_streams +2. Cross-check with Radarr (movies) / Sonarr (episodes) for language +3. analyzeItem() → upsert review_plans + stream_decisions +4. SSE events stream progress to browser (React EventSource in ScanPage) + +## Running locally +```fish +mise exec bun -- bun run dev # concurrent: Hono API :3000 + Vite :5173 +mise exec bun -- bun run build # build frontend to dist/ +mise exec bun -- bun start # production: Hono serves dist/ + API on :3000 +``` + +## Workflow rules +- **Always bump version** in `package.json` before committing/pushing. CalVer format: `YYYY.MM.DD` (append `.N` suffix for same-day releases). + +## Docker deploy (Unraid) +```fish +docker compose up -d # port 3000, data volume at ./data/ +``` +Note: Docker must serve the built dist/ — run `bun run build` before building the Docker image. diff --git a/.project.toml b/.project.toml new file mode 100644 index 0000000..3dab605 --- /dev/null +++ b/.project.toml @@ -0,0 +1,13 @@ +[project] +name = "Netfelix Audio Fix" +description = "Web-App mit Bun-Backend und Vite-Frontend für Audio-Verarbeitung/Korrektur." +status = "aktiv" +priority = "mittel" +location = "~/Developer/netfelix-audio-fix" + +[dates] +created = "2026-02-26" +last_activity = "2026-03-28" + +[notes] +next_steps = "Aktive Entwicklung fortsetzen" diff --git a/docs/superpowers/plans/2026-03-27-unified-pipeline.md b/docs/superpowers/plans/2026-03-27-unified-pipeline.md new file mode 100644 index 0000000..784452e --- /dev/null +++ b/docs/superpowers/plans/2026-03-27-unified-pipeline.md @@ -0,0 +1,1585 @@ +# Unified Media Processing Pipeline — Implementation Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Unify subtitle extraction, audio cleanup, and Apple compatibility transcoding into a single pipeline per file with a Kanban board UI, confidence-based auto-approval, and job scheduling. + +**Architecture:** The analyzer evaluates all three pipeline steps and produces one review plan with one FFmpeg command per file. A new Apple compatibility service maps codecs to transcode targets. The Kanban board replaces the separate scan/review/execute pages. A scheduler controls job timing (sleep between jobs, time window). + +**Tech Stack:** Bun + Hono (backend), React 19 + TanStack Router + Tailwind v4 (frontend), bun:sqlite, FFmpeg + +--- + +## File Structure + +### New files +| File | Responsibility | +|------|---------------| +| `server/services/apple-compat.ts` | Codec compatibility checks, transcode target mapping | +| `server/services/scheduler.ts` | Job sleep and schedule window logic | +| `src/routes/pipeline.tsx` | TanStack Router route for pipeline page | +| `src/features/pipeline/PipelinePage.tsx` | Kanban board layout with 5 columns | +| `src/features/pipeline/ReviewColumn.tsx` | Review column with series grouping, batch approve | +| `src/features/pipeline/QueueColumn.tsx` | Queued jobs waiting for execution | +| `src/features/pipeline/ProcessingColumn.tsx` | Active job with progress bar, schedule status | +| `src/features/pipeline/DoneColumn.tsx` | Completed items | +| `src/features/pipeline/PipelineCard.tsx` | Card component for movies/episodes | +| `src/features/pipeline/SeriesCard.tsx` | Collapsible series card with episode list | +| `src/features/pipeline/ScheduleControls.tsx` | Sleep/schedule config UI | + +### Modified files +| File | Changes | +|------|---------| +| `server/db/schema.ts` | New columns on review_plans, stream_decisions; new config defaults | +| `server/db/index.ts` | Migrations for new columns | +| `server/types.ts` | Updated interfaces: ReviewPlan, StreamDecision, Job, new AppleCompat types | +| `server/services/analyzer.ts` | Unified 3-step analysis with confidence scoring | +| `server/services/ffmpeg.ts` | Per-stream codec args (copy vs transcode), combined sub+audio command | +| `server/api/scan.ts` | Confidence scoring during scan | +| `server/api/review.ts` | Batch "approve up to here", series-level language | +| `server/api/execute.ts` | Scheduler integration, FFmpeg progress parsing | +| `server/index.tsx` | Add pipeline API route | +| `src/shared/lib/types.ts` | Mirror backend type changes | +| `src/routes/__root.tsx` | Replace nav links with Pipeline tab | + +--- + +### Task 1: Schema Migrations and Type Updates + +**Files:** +- Modify: `server/db/schema.ts` +- Modify: `server/db/index.ts` +- Modify: `server/types.ts` +- Modify: `src/shared/lib/types.ts` + +- [ ] **Step 1: Add new columns to schema migrations in `server/db/index.ts`** + +Add migrations after the existing ones (around line 40 in `getDb()`). Find the existing migration block that adds `subs_extracted` and append: + +```typescript +// Apple compat pipeline columns +db.run(`ALTER TABLE review_plans ADD COLUMN confidence TEXT NOT NULL DEFAULT 'low'`); +db.run(`ALTER TABLE review_plans ADD COLUMN apple_compat TEXT`); +db.run(`ALTER TABLE review_plans ADD COLUMN job_type TEXT NOT NULL DEFAULT 'copy'`); +db.run(`ALTER TABLE stream_decisions ADD COLUMN transcode_codec TEXT`); +``` + +Wrap each in try/catch like the existing migrations (SQLite throws if column already exists). + +- [ ] **Step 2: Add new config defaults in `server/db/schema.ts`** + +Add to `DEFAULT_CONFIG` (after `scan_running`): + +```typescript +job_sleep_seconds: '0', +schedule_enabled: '0', +schedule_start: '01:00', +schedule_end: '07:00', +``` + +- [ ] **Step 3: Update `server/types.ts` — ReviewPlan interface** + +Add new fields to `ReviewPlan` (around line 46): + +```typescript +export interface ReviewPlan { + id: number; + item_id: number; + status: 'pending' | 'approved' | 'skipped' | 'done' | 'error'; + is_noop: number; + confidence: 'high' | 'low'; + apple_compat: 'direct_play' | 'remux' | 'audio_transcode' | null; + job_type: 'copy' | 'transcode'; + subs_extracted: number; + notes: string | null; + reviewed_at: string | null; + created_at: string; +} +``` + +- [ ] **Step 4: Update `server/types.ts` — StreamDecision interface** + +Add `transcode_codec` to `StreamDecision` (around line 69): + +```typescript +export interface StreamDecision { + id: number; + plan_id: number; + stream_id: number; + action: 'keep' | 'remove'; + target_index: number | null; + custom_title: string | null; + transcode_codec: string | null; +} +``` + +- [ ] **Step 5: Update `server/types.ts` — PlanResult interface** + +Extend `PlanResult` (around line 115): + +```typescript +export interface PlanResult { + is_noop: boolean; + has_subs: boolean; + confidence: 'high' | 'low'; + apple_compat: 'direct_play' | 'remux' | 'audio_transcode' | null; + job_type: 'copy' | 'transcode'; + decisions: { stream_id: number; action: 'keep' | 'remove'; target_index: number | null; transcode_codec: string | null }[]; + notes: string[]; +} +``` + +- [ ] **Step 6: Mirror type changes in `src/shared/lib/types.ts`** + +Update `ReviewPlan`, `StreamDecision` to match server types. Add `confidence`, `apple_compat`, `job_type` to ReviewPlan and `transcode_codec` to StreamDecision. + +- [ ] **Step 7: Verify migrations run** + +Run: `bun run dev` — check that the server starts without errors and the database has the new columns. + +- [ ] **Step 8: Commit** + +```bash +git add server/db/schema.ts server/db/index.ts server/types.ts src/shared/lib/types.ts +git commit -m "add schema migrations for unified pipeline: confidence, apple_compat, job_type, transcode_codec" +``` + +--- + +### Task 2: Apple Compatibility Service + +**Files:** +- Create: `server/services/apple-compat.ts` + +- [ ] **Step 1: Create `server/services/apple-compat.ts`** + +```typescript +// Codec sets and transcode target mapping for Apple device compatibility. +// Apple natively decodes: AAC, AC3, EAC3, ALAC, FLAC, MP3, PCM, Opus +// Everything else (DTS family, TrueHD family) needs transcoding. + +const APPLE_COMPATIBLE_AUDIO = new Set([ + 'aac', 'ac3', 'eac3', 'alac', 'flac', 'mp3', + 'pcm_s16le', 'pcm_s24le', 'pcm_s32le', 'pcm_f32le', + 'pcm_s16be', 'pcm_s24be', 'pcm_s32be', 'pcm_f64le', + 'opus', +]); + +// Lossless source codecs — get FLAC in MKV, EAC3 in MP4 +const LOSSLESS_CODECS = new Set([ + 'dts', // DTS-HD MA reports as 'dts' with profile + 'truehd', +]); + +// Codec strings Jellyfin may report for DTS variants +const DTS_CODECS = new Set([ + 'dts', 'dca', +]); + +const TRUEHD_CODECS = new Set([ + 'truehd', +]); + +export function isAppleCompatible(codec: string): boolean { + return APPLE_COMPATIBLE_AUDIO.has(codec.toLowerCase()); +} + +/** Maps (codec, profile, container) → target codec for transcoding. */ +export function transcodeTarget( + codec: string, + profile: string | null, + container: string | null, +): string | null { + const c = codec.toLowerCase(); + const isMkv = !container || container.toLowerCase() === 'mkv' || container.toLowerCase() === 'matroska'; + + if (isAppleCompatible(c)) return null; // no transcode needed + + // DTS-HD MA and DTS:X are lossless → FLAC in MKV, EAC3 in MP4 + if (DTS_CODECS.has(c)) { + const p = (profile ?? '').toLowerCase(); + const isLossless = p.includes('ma') || p.includes('hd ma') || p.includes('x'); + if (isLossless) return isMkv ? 'flac' : 'eac3'; + // Lossy DTS variants → EAC3 + return 'eac3'; + } + + // TrueHD (including Atmos) → FLAC in MKV, EAC3 in MP4 + if (TRUEHD_CODECS.has(c)) { + return isMkv ? 'flac' : 'eac3'; + } + + // Any other incompatible codec → EAC3 as safe fallback + return 'eac3'; +} + +/** Determine overall Apple compatibility for a set of kept audio streams. */ +export function computeAppleCompat( + keptAudioCodecs: string[], + container: string | null, +): 'direct_play' | 'remux' | 'audio_transcode' { + const hasIncompatible = keptAudioCodecs.some(c => !isAppleCompatible(c)); + if (hasIncompatible) return 'audio_transcode'; + + const isMkv = !container || container.toLowerCase() === 'mkv' || container.toLowerCase() === 'matroska'; + if (isMkv) return 'remux'; + + return 'direct_play'; +} +``` + +- [ ] **Step 2: Commit** + +```bash +git add server/services/apple-compat.ts +git commit -m "add apple compatibility service: codec checks, transcode target mapping" +``` + +--- + +### Task 3: Unified Analyzer + +**Files:** +- Modify: `server/services/analyzer.ts` + +- [ ] **Step 1: Read the current analyzer** + +Read `server/services/analyzer.ts` fully to understand the existing `analyzeItem()`, `decideAction()`, `assignTargetOrder()`, `langRank()`, and `checkAudioOrderChanged()` functions. + +- [ ] **Step 2: Add apple-compat imports and update `PlanResult`** + +At the top of `server/services/analyzer.ts`, add: + +```typescript +import { isAppleCompatible, transcodeTarget, computeAppleCompat } from './apple-compat'; +``` + +- [ ] **Step 3: Update `analyzeItem()` to compute transcode decisions** + +After `assignTargetOrder()` runs (which determines which streams are kept and their order), add a new step that iterates over kept audio streams and computes `transcode_codec`: + +```typescript +// Step 3: Apple compatibility — compute transcode targets for kept audio +for (const d of decisions) { + if (d.action === 'keep') { + const stream = streams.find(s => s.id === d.stream_id); + if (stream && stream.type === 'Audio') { + d.transcode_codec = transcodeTarget( + stream.codec ?? '', + stream.title, // Jellyfin sometimes puts DTS profile info in title + item.container, + ); + } + } +} +``` + +- [ ] **Step 4: Compute confidence, apple_compat, and job_type on the PlanResult** + +Before returning from `analyzeItem()`, compute the new fields: + +```typescript +const keptAudioCodecs = decisions + .filter(d => d.action === 'keep') + .map(d => streams.find(s => s.id === d.stream_id)) + .filter(s => s && s.type === 'Audio') + .map(s => s!.codec ?? ''); + +const needsTranscode = decisions.some(d => d.transcode_codec != null); +const apple_compat = computeAppleCompat(keptAudioCodecs, item.container); +const job_type = needsTranscode ? 'transcode' as const : 'copy' as const; + +// Confidence is computed during scan (depends on source agreement), +// but we can detect the "zero audio tracks match OG" case here +const ogLang = item.original_language; +const hasOgAudio = ogLang + ? streams.some(s => s.type === 'Audio' && s.language === ogLang) + : false; +const noOgMatch = ogLang && !hasOgAudio; +``` + +- [ ] **Step 5: Update is_noop to include Apple compatibility** + +Update the `is_noop` computation to also check that no audio transcoding is needed: + +```typescript +const is_noop = !anyAudioRemoved && !audioOrderChanged && !has_subs && !needsTranscode; +``` + +Where `has_subs` means there are embedded subtitles to extract (already computed). Previously `is_noop` only checked audio; now it checks all three pipeline steps. + +- [ ] **Step 6: Return extended PlanResult** + +```typescript +return { + is_noop, + has_subs, + confidence: noOgMatch ? 'low' : 'low', // base confidence, scan upgrades to 'high' + apple_compat, + job_type, + decisions: decisions.map(d => ({ + stream_id: d.stream_id, + action: d.action, + target_index: d.target_index, + transcode_codec: d.transcode_codec ?? null, + })), + notes, +}; +``` + +- [ ] **Step 7: Verify existing scan still works** + +Run: `bun run dev`, trigger a scan with a few items, check that `review_plans` rows now have `confidence`, `apple_compat`, and `job_type` populated. + +- [ ] **Step 8: Commit** + +```bash +git add server/services/analyzer.ts +git commit -m "unify analyzer: 3-step pipeline with apple compat, transcode decisions, extended is_noop" +``` + +--- + +### Task 4: Update Scan to Store New Fields and Compute Confidence + +**Files:** +- Modify: `server/api/scan.ts` + +- [ ] **Step 1: Read `server/api/scan.ts`** + +Read the full file to understand the `runScan()` function and how it stores `review_plans` and `stream_decisions`. + +- [ ] **Step 2: Update review_plans INSERT to include new columns** + +Find the INSERT INTO review_plans statement and add the new columns: + +```sql +INSERT INTO review_plans (item_id, status, is_noop, confidence, apple_compat, job_type, notes) +VALUES (?, ?, ?, ?, ?, ?, ?) +ON CONFLICT(item_id) DO UPDATE SET + status = CASE WHEN review_plans.status IN ('done','error') THEN 'pending' ELSE review_plans.status END, + is_noop = excluded.is_noop, + confidence = excluded.confidence, + apple_compat = excluded.apple_compat, + job_type = excluded.job_type, + notes = excluded.notes +``` + +Pass `plan.confidence`, `plan.apple_compat`, `plan.job_type` as bind params. + +- [ ] **Step 3: Update stream_decisions INSERT to include transcode_codec** + +Find the INSERT INTO stream_decisions statement and add: + +```sql +INSERT INTO stream_decisions (plan_id, stream_id, action, target_index, transcode_codec) +VALUES (?, ?, ?, ?, ?) +``` + +Pass `d.transcode_codec` as the fifth bind param. + +- [ ] **Step 4: Compute confidence during scan** + +After the language resolution logic (Jellyfin vs Radarr vs Sonarr), compute confidence based on source agreement. Add this before the `analyzeItem()` call: + +```typescript +// Compute confidence from source agreement +let confidence: 'high' | 'low' = 'low'; +const jellyfinLang = jellyfinOrigLang; // from Jellyfin metadata +const arrLang = radarrLang ?? sonarrLang ?? null; // from *arr services + +if (!origLang) { + confidence = 'low'; // unknown language +} else if (needsReview) { + confidence = 'low'; // sources disagree +} else if (jellyfinLang && arrLang && jellyfinLang === arrLang) { + confidence = 'high'; // two sources agree +} else if ((jellyfinLang || arrLang) && !needsReview) { + confidence = 'high'; // single source, no conflict +} +``` + +Override the analyzer's base confidence with this computed value before storing. + +- [ ] **Step 5: Verify scan populates new fields** + +Run scan, then query: +```sql +SELECT confidence, apple_compat, job_type FROM review_plans LIMIT 10; +SELECT transcode_codec FROM stream_decisions WHERE transcode_codec IS NOT NULL LIMIT 10; +``` + +- [ ] **Step 6: Commit** + +```bash +git add server/api/scan.ts +git commit -m "store confidence, apple_compat, job_type, transcode_codec during scan" +``` + +--- + +### Task 5: Unified FFmpeg Command Builder + +**Files:** +- Modify: `server/services/ffmpeg.ts` + +- [ ] **Step 1: Read `server/services/ffmpeg.ts`** + +Read the full file, especially `buildCommand()`, `buildExtractOnlyCommand()`, `buildExtractionOutputs()`, and `buildStreamFlags()`. + +- [ ] **Step 2: Create `buildPipelineCommand()` function** + +This replaces the separate `buildCommand()` and `buildExtractOnlyCommand()` with a single function that handles all three pipeline steps: + +```typescript +/** + * Build a single FFmpeg command that: + * 1. Extracts subtitles to sidecar files + * 2. Remuxes with reordered/filtered audio + * 3. Transcodes incompatible audio codecs + */ +export function buildPipelineCommand( + item: { file_path: string; container: string | null }, + streams: MediaStream[], + decisions: (StreamDecision & { stream: MediaStream })[], +): { command: string; extractedFiles: { path: string; language: string | null; codec: string | null; is_forced: number; is_hearing_impaired: number }[] } { + const inputPath = item.file_path; + const ext = item.container?.toLowerCase() === 'mp4' ? 'mp4' : 'mkv'; + const tmpPath = inputPath.replace(/\.[^.]+$/, `.tmp.${ext}`); + + // --- Subtitle extraction outputs --- + const subStreams = streams.filter(s => s.type === 'Subtitle'); + const extraction = computeExtractionEntries(inputPath, subStreams); + const subOutputs = buildExtractionOutputs(extraction, streams); + + // --- Kept streams for remuxed output --- + const kept = decisions + .filter(d => d.action === 'keep') + .sort((a, b) => (a.target_index ?? 999) - (b.target_index ?? 999)); + + // Build -map flags using type-relative indices + const maps = buildMaps(kept.map(d => d.stream), streams); + + // Build per-stream codec flags + const codecFlags: string[] = ['-c:v copy']; + let audioIdx = 0; + for (const d of kept) { + if (d.stream.type === 'Audio') { + if (d.transcode_codec) { + codecFlags.push(`-c:a:${audioIdx} ${d.transcode_codec}`); + // For EAC3, set a reasonable bitrate based on channel count + if (d.transcode_codec === 'eac3') { + const bitrate = (d.stream.channels ?? 2) >= 6 ? '640k' : '256k'; + codecFlags.push(`-b:a:${audioIdx} ${bitrate}`); + } + } else { + codecFlags.push(`-c:a:${audioIdx} copy`); + } + audioIdx++; + } + } + + // Disposition + metadata flags for audio + const streamFlags = buildStreamFlags(kept, streams); + + // Assemble command parts + const parts: string[] = ['ffmpeg -y', `-i ${shellQuote(inputPath)}`]; + + // Subtitle extraction outputs first + for (const out of subOutputs) { + parts.push(out); + } + + // Map flags for remuxed output + parts.push(...maps); + + // Codec flags + parts.push(...codecFlags); + + // Stream flags (disposition, metadata) + parts.push(...streamFlags); + + // Output file + parts.push(shellQuote(tmpPath)); + + const command = parts.join(' \\\n ') + + ` && mv ${shellQuote(tmpPath)} ${shellQuote(inputPath)}`; + + return { + command, + extractedFiles: extraction.map(e => ({ + path: e.outputPath, + language: e.language, + codec: e.codec, + is_forced: e.is_forced ? 1 : 0, + is_hearing_impaired: e.is_hearing_impaired ? 1 : 0, + })), + }; +} +``` + +- [ ] **Step 3: Update `buildStreamFlags()` to accept decisions with transcode info** + +The existing function builds disposition and metadata flags. Update it to also handle title metadata for transcoded streams (e.g., append " (Transcoded)" to title if desired, or keep original title). No change needed if we keep the same title logic. + +- [ ] **Step 4: Update review.ts `loadItemDetail()` to use `buildPipelineCommand()`** + +In `server/api/review.ts`, update the `loadItemDetail()` function to call `buildPipelineCommand()` instead of the separate `buildCommand()` / `buildExtractOnlyCommand()`. This ensures the detail page shows the unified command. + +- [ ] **Step 5: Update execute.ts `runJob()` post-processing** + +After a job completes successfully, the current code calls `predictExtractedFiles()`. Update to use the `extractedFiles` from the command builder (stored alongside the job, or recomputed from decisions). + +- [ ] **Step 6: Verify commands generate correctly** + +Scan a file with DTS audio and embedded subtitles. Check the review detail page shows a single FFmpeg command with subtitle extraction, audio reordering, and `-c:a:N eac3` for the DTS stream. + +- [ ] **Step 7: Commit** + +```bash +git add server/services/ffmpeg.ts server/api/review.ts server/api/execute.ts +git commit -m "unified FFmpeg command builder: single command for sub extraction, audio cleanup, transcode" +``` + +--- + +### Task 6: Job Scheduler Service + +**Files:** +- Create: `server/services/scheduler.ts` +- Modify: `server/api/execute.ts` + +- [ ] **Step 1: Create `server/services/scheduler.ts`** + +```typescript +import { getConfig, setConfig } from '../db'; + +export interface SchedulerState { + job_sleep_seconds: number; + schedule_enabled: boolean; + schedule_start: string; // "HH:MM" + schedule_end: string; // "HH:MM" +} + +export function getSchedulerState(): SchedulerState { + return { + job_sleep_seconds: parseInt(getConfig('job_sleep_seconds') ?? '0', 10), + schedule_enabled: getConfig('schedule_enabled') === '1', + schedule_start: getConfig('schedule_start') ?? '01:00', + schedule_end: getConfig('schedule_end') ?? '07:00', + }; +} + +export function updateSchedulerState(updates: Partial): void { + if (updates.job_sleep_seconds != null) setConfig('job_sleep_seconds', String(updates.job_sleep_seconds)); + if (updates.schedule_enabled != null) setConfig('schedule_enabled', updates.schedule_enabled ? '1' : '0'); + if (updates.schedule_start != null) setConfig('schedule_start', updates.schedule_start); + if (updates.schedule_end != null) setConfig('schedule_end', updates.schedule_end); +} + +/** Check if current time is within the schedule window. */ +export function isInScheduleWindow(): boolean { + const state = getSchedulerState(); + if (!state.schedule_enabled) return true; // no schedule = always allowed + + const now = new Date(); + const minutes = now.getHours() * 60 + now.getMinutes(); + const start = parseTime(state.schedule_start); + const end = parseTime(state.schedule_end); + + // Handle overnight windows (e.g., 23:00 → 07:00) + if (start <= end) { + return minutes >= start && minutes < end; + } else { + return minutes >= start || minutes < end; + } +} + +/** Returns milliseconds until the next schedule window opens. */ +export function msUntilWindow(): number { + const state = getSchedulerState(); + const now = new Date(); + const minutes = now.getHours() * 60 + now.getMinutes(); + const start = parseTime(state.schedule_start); + + if (minutes < start) { + return (start - minutes) * 60_000; + } else { + // Next day + return (24 * 60 - minutes + start) * 60_000; + } +} + +/** Returns the schedule_start time as "HH:MM" for display. */ +export function nextWindowTime(): string { + return getSchedulerState().schedule_start; +} + +function parseTime(hhmm: string): number { + const [h, m] = hhmm.split(':').map(Number); + return h * 60 + m; +} + +/** Sleep for the configured duration between jobs. */ +export function sleepBetweenJobs(): Promise { + const seconds = getSchedulerState().job_sleep_seconds; + if (seconds <= 0) return Promise.resolve(); + return new Promise(resolve => setTimeout(resolve, seconds * 1000)); +} + +/** Wait until the schedule window opens. Resolves immediately if already in window. */ +export function waitForWindow(): Promise { + if (isInScheduleWindow()) return Promise.resolve(); + const ms = msUntilWindow(); + return new Promise(resolve => setTimeout(resolve, ms)); +} +``` + +- [ ] **Step 2: Integrate scheduler into `server/api/execute.ts`** + +Update `runSequential()` to check the schedule window before each job and sleep between jobs: + +```typescript +import { sleepBetweenJobs, waitForWindow, isInScheduleWindow, getSchedulerState, nextWindowTime } from '../services/scheduler'; + +async function runSequential(jobs: Job[], nodeId: string | null) { + const key = targetKey(nodeId); + if (runningTargets.has(key)) return; + runningTargets.add(key); + try { + for (const job of jobs) { + // Wait for schedule window + if (!isInScheduleWindow()) { + emitJobUpdate({ type: 'queue_status', data: { status: 'paused', until: nextWindowTime() } }); + await waitForWindow(); + } + + await runJob(job); + + // Sleep between jobs + const state = getSchedulerState(); + if (state.job_sleep_seconds > 0) { + emitJobUpdate({ type: 'queue_status', data: { status: 'sleeping', seconds: state.job_sleep_seconds } }); + await sleepBetweenJobs(); + } + } + } finally { + runningTargets.delete(key); + } +} +``` + +- [ ] **Step 3: Add scheduler API endpoints** + +Add to `server/api/execute.ts`: + +```typescript +// GET /scheduler — current scheduler state +app.get('/scheduler', (c) => { + return c.json(getSchedulerState()); +}); + +// PATCH /scheduler — update scheduler settings +app.patch('/scheduler', async (c) => { + const body = await c.req.json(); + updateSchedulerState(body); + return c.json(getSchedulerState()); +}); +``` + +- [ ] **Step 4: Add FFmpeg progress parsing** + +Add a helper to parse FFmpeg's stderr for transcode progress: + +```typescript +/** Parse FFmpeg stderr line for progress. Returns seconds processed or null. */ +export function parseFFmpegProgress(line: string): number | null { + const match = line.match(/time=(\d+):(\d+):(\d+)\.(\d+)/); + if (!match) return null; + const [, h, m, s] = match.map(Number); + return h * 3600 + m * 60 + s; +} +``` + +Use this in `runJob()` to emit progress events for transcode jobs. The total duration comes from the media item's metadata (or from FFmpeg's initial output). + +- [ ] **Step 5: Emit progress SSE events** + +Add new SSE event types for the job runner: + +```typescript +// In runJob(), when processing stderr: +const progress = parseFFmpegProgress(line); +if (progress != null) { + emitJobUpdate({ + type: 'job_progress', + data: { id: job.id, seconds: progress, total: totalDuration }, + }); +} +``` + +- [ ] **Step 6: Commit** + +```bash +git add server/services/scheduler.ts server/api/execute.ts +git commit -m "add job scheduler: sleep between jobs, schedule window, FFmpeg progress parsing" +``` + +--- + +### Task 7: Pipeline API Endpoint + +**Files:** +- Modify: `server/api/review.ts` +- Modify: `server/index.tsx` + +- [ ] **Step 1: Add "approve up to here" endpoint** + +Add to `server/api/review.ts`: + +```typescript +// POST /approve-up-to/:id — approve this plan and all plans above it +// "Above" means: higher confidence first, then by item name, then by id +app.post('/approve-up-to/:id', async (c) => { + const targetId = Number(c.req.param('id')); + const db = getDb(); + + // Get the target plan's sort position + const target = db.query(`SELECT id, item_id FROM review_plans WHERE id = ?`).get(targetId) as any; + if (!target) return c.json({ error: 'Plan not found' }, 404); + + // Get all pending plans sorted by confidence (high first), then name + const pendingPlans = db.query(` + SELECT rp.id, rp.confidence, mi.name, mi.series_name + FROM review_plans rp + JOIN media_items mi ON mi.id = rp.item_id + WHERE rp.status = 'pending' AND rp.is_noop = 0 + ORDER BY + CASE rp.confidence WHEN 'high' THEN 0 ELSE 1 END, + COALESCE(mi.series_name, mi.name), + mi.season_number, + mi.episode_number, + mi.name + `).all() as any[]; + + // Find the target and approve everything up to and including it + const toApprove: number[] = []; + for (const plan of pendingPlans) { + toApprove.push(plan.id); + if (plan.id === targetId) break; + } + + // Batch approve + const stmt = db.prepare(`UPDATE review_plans SET status = 'approved', reviewed_at = datetime('now') WHERE id = ?`); + for (const id of toApprove) { + stmt.run(id); + } + + // Create jobs for approved plans + for (const id of toApprove) { + const detail = loadItemDetail(id); + if (detail?.command) { + db.run( + `INSERT INTO jobs (item_id, command, job_type, status) VALUES (?, ?, ?, 'pending')`, + [detail.item.id, detail.command, detail.plan.job_type], + ); + } + } + + return c.json({ approved: toApprove.length }); +}); +``` + +- [ ] **Step 2: Add series-level language endpoint** + +Add to `server/api/review.ts`: + +```typescript +// PATCH /series/:seriesKey/language — set OG language for all episodes in a series +app.patch('/series/:seriesKey/language', async (c) => { + const seriesKey = decodeURIComponent(c.req.param('seriesKey')); + const { language } = await c.req.json(); + const db = getDb(); + + // Update all items in the series + const items = db.query( + `SELECT id FROM media_items WHERE series_jellyfin_id = ? OR series_name = ?` + ).all(seriesKey, seriesKey) as { id: number }[]; + + for (const item of items) { + db.run(`UPDATE media_items SET original_language = ?, orig_lang_source = 'manual', needs_review = 0 WHERE id = ?`, [language, item.id]); + } + + // Re-analyze all episodes + const config = { + subtitleLanguages: JSON.parse(getConfig('subtitle_languages') ?? '[]'), + audioLanguages: JSON.parse(getConfig('audio_languages') ?? '[]'), + }; + + for (const item of items) { + await reanalyze(item.id, config); + } + + return c.json({ updated: items.length }); +}); +``` + +- [ ] **Step 3: Add pipeline summary endpoint** + +Add a new endpoint that returns data grouped by pipeline column: + +```typescript +// GET /pipeline — returns items grouped by pipeline stage +app.get('/pipeline', (c) => { + const db = getDb(); + + const review = db.query(` + SELECT rp.*, mi.name, mi.series_name, mi.series_jellyfin_id, + mi.season_number, mi.episode_number, mi.type, mi.container, + mi.original_language, mi.orig_lang_source, mi.file_path + FROM review_plans rp + JOIN media_items mi ON mi.id = rp.item_id + WHERE rp.status = 'pending' AND rp.is_noop = 0 + ORDER BY + CASE rp.confidence WHEN 'high' THEN 0 ELSE 1 END, + COALESCE(mi.series_name, mi.name), + mi.season_number, mi.episode_number + `).all(); + + const queued = db.query(` + SELECT j.*, mi.name, mi.series_name, mi.type, + rp.job_type, rp.apple_compat + FROM jobs j + JOIN media_items mi ON mi.id = j.item_id + JOIN review_plans rp ON rp.item_id = j.item_id + WHERE j.status = 'pending' + ORDER BY j.created_at + `).all(); + + const processing = db.query(` + SELECT j.*, mi.name, mi.series_name, mi.type, + rp.job_type, rp.apple_compat + FROM jobs j + JOIN media_items mi ON mi.id = j.item_id + JOIN review_plans rp ON rp.item_id = j.item_id + WHERE j.status = 'running' + `).all(); + + const done = db.query(` + SELECT j.*, mi.name, mi.series_name, mi.type, + rp.job_type, rp.apple_compat + FROM jobs j + JOIN media_items mi ON mi.id = j.item_id + JOIN review_plans rp ON rp.item_id = j.item_id + WHERE j.status IN ('done', 'error') + ORDER BY j.completed_at DESC + LIMIT 50 + `).all(); + + const noops = db.query(`SELECT COUNT(*) as count FROM review_plans WHERE is_noop = 1`).get() as any; + const scheduler = getSchedulerState(); + + return c.json({ review, queued, processing, done, noopCount: noops.count, scheduler }); +}); +``` + +- [ ] **Step 4: Register pipeline routes in `server/index.tsx`** + +The pipeline endpoints live on the existing review and execute routes. No new route registration needed — the endpoints are added to the existing Hono apps. + +- [ ] **Step 5: Commit** + +```bash +git add server/api/review.ts server/api/execute.ts +git commit -m "add pipeline API: approve-up-to, series language, pipeline summary, scheduler endpoints" +``` + +--- + +### Task 8: Kanban Board — Route and Layout + +**Files:** +- Create: `src/routes/pipeline.tsx` +- Create: `src/features/pipeline/PipelinePage.tsx` +- Modify: `src/routes/__root.tsx` + +- [ ] **Step 1: Create route file `src/routes/pipeline.tsx`** + +```typescript +import { createFileRoute } from '@tanstack/react-router'; +import { PipelinePage } from '~/features/pipeline/PipelinePage'; + +export const Route = createFileRoute('/pipeline')({ + component: PipelinePage, +}); +``` + +- [ ] **Step 2: Create `src/features/pipeline/PipelinePage.tsx`** + +```typescript +import { useCallback, useEffect, useRef, useState } from 'react'; +import { api } from '~/shared/lib/api'; +import { ReviewColumn } from './ReviewColumn'; +import { QueueColumn } from './QueueColumn'; +import { ProcessingColumn } from './ProcessingColumn'; +import { DoneColumn } from './DoneColumn'; +import { ScheduleControls } from './ScheduleControls'; + +interface PipelineData { + review: any[]; + queued: any[]; + processing: any[]; + done: any[]; + noopCount: number; + scheduler: { + job_sleep_seconds: number; + schedule_enabled: boolean; + schedule_start: string; + schedule_end: string; + }; +} + +export function PipelinePage() { + const [data, setData] = useState(null); + const [loading, setLoading] = useState(true); + + const load = useCallback(async () => { + const res = await api.get('/api/review/pipeline'); + setData(res); + setLoading(false); + }, []); + + useEffect(() => { load(); }, [load]); + + // SSE for live updates + useEffect(() => { + const es = new EventSource('/api/execute/events'); + es.addEventListener('job_update', () => load()); + es.addEventListener('job_progress', (e) => { + const progress = JSON.parse(e.data); + setData(prev => prev ? { ...prev, _progress: progress } : prev); + }); + es.addEventListener('queue_status', (e) => { + const status = JSON.parse(e.data); + setData(prev => prev ? { ...prev, _queueStatus: status } : prev); + }); + return () => es.close(); + }, [load]); + + if (loading || !data) return
Loading pipeline...
; + + return ( +
+
+

Pipeline

+
+ {data.noopCount} files already processed + +
+
+
+ + + + +
+
+ ); +} +``` + +- [ ] **Step 3: Update nav in `src/routes/__root.tsx`** + +Replace the existing scan/review/execute nav links with a single Pipeline link. Keep the subtitle manager, nodes, and settings links: + +```typescript +Pipeline +Subtitles +Nodes +Settings +``` + +Keep the old routes functional (don't delete them) but remove them from the nav. Users can still access them via URL if needed. + +- [ ] **Step 4: Commit** + +```bash +git add src/routes/pipeline.tsx src/features/pipeline/PipelinePage.tsx src/routes/__root.tsx +git commit -m "add pipeline route and Kanban board layout with 4 columns" +``` + +--- + +### Task 9: Kanban Board — Review Column with Cards + +**Files:** +- Create: `src/features/pipeline/ReviewColumn.tsx` +- Create: `src/features/pipeline/PipelineCard.tsx` +- Create: `src/features/pipeline/SeriesCard.tsx` + +- [ ] **Step 1: Create `src/features/pipeline/PipelineCard.tsx`** + +Card component for a single media item: + +```typescript +import { Badge } from '~/shared/components/ui/badge'; +import { Select } from '~/shared/components/ui/select'; +import { LANG_NAMES, langName } from '~/shared/lib/lang'; + +interface PipelineCardProps { + item: any; + onLanguageChange?: (lang: string) => void; + showApproveUpTo?: boolean; + onApproveUpTo?: () => void; +} + +export function PipelineCard({ item, onLanguageChange, showApproveUpTo, onApproveUpTo }: PipelineCardProps) { + const title = item.type === 'Episode' + ? `S${String(item.season_number).padStart(2, '0')}E${String(item.episode_number).padStart(2, '0')} — ${item.name}` + : item.name; + + const confidenceColor = item.confidence === 'high' ? 'bg-green-50 border-green-200' : 'bg-amber-50 border-amber-200'; + + return ( +
+
+
+

{title}

+
+ {/* OG language dropdown */} + {onLanguageChange ? ( + + ) : ( + {langName(item.original_language)} + )} + + {/* Pipeline step badges */} + {item.apple_compat === 'audio_transcode' && ( + transcode + )} + {item.job_type === 'copy' && item.apple_compat !== 'audio_transcode' && ( + copy + )} +
+
+
+ + {showApproveUpTo && onApproveUpTo && ( + + )} +
+ ); +} +``` + +- [ ] **Step 2: Create `src/features/pipeline/SeriesCard.tsx`** + +Collapsible series card: + +```typescript +import { useState } from 'react'; +import { api } from '~/shared/lib/api'; +import { Select } from '~/shared/components/ui/select'; +import { LANG_NAMES, langName } from '~/shared/lib/lang'; +import { PipelineCard } from './PipelineCard'; + +interface SeriesCardProps { + seriesKey: string; + seriesName: string; + episodes: any[]; + onMutate: () => void; +} + +export function SeriesCard({ seriesKey, seriesName, episodes, onMutate }: SeriesCardProps) { + const [expanded, setExpanded] = useState(false); + + // Use the first episode's language as the series language + const seriesLang = episodes[0]?.original_language ?? ''; + + const setSeriesLanguage = async (lang: string) => { + await api.patch(`/api/review/series/${encodeURIComponent(seriesKey)}/language`, { language: lang }); + onMutate(); + }; + + const approveSeries = async () => { + await api.post(`/api/review/series/${encodeURIComponent(seriesKey)}/approve-all`); + onMutate(); + }; + + const highCount = episodes.filter((e: any) => e.confidence === 'high').length; + const lowCount = episodes.filter((e: any) => e.confidence === 'low').length; + + return ( +
+
setExpanded(!expanded)} + > +
+ {expanded ? '▼' : '▶'} +

{seriesName}

+ {episodes.length} eps + {highCount > 0 && {highCount} ready} + {lowCount > 0 && {lowCount} review} +
+
e.stopPropagation()}> + + +
+
+ {expanded && ( +
+ {episodes.map((ep: any) => ( + { + await api.patch(`/api/review/${ep.item_id}/language`, { language: lang }); + onMutate(); + }} + /> + ))} +
+ )} +
+ ); +} +``` + +- [ ] **Step 3: Create `src/features/pipeline/ReviewColumn.tsx`** + +```typescript +import { api } from '~/shared/lib/api'; +import { PipelineCard } from './PipelineCard'; +import { SeriesCard } from './SeriesCard'; + +interface ReviewColumnProps { + items: any[]; + onMutate: () => void; +} + +export function ReviewColumn({ items, onMutate }: ReviewColumnProps) { + // Group by series (movies are standalone) + const movies = items.filter(i => i.type === 'Movie'); + const seriesMap = new Map(); + + for (const item of items.filter(i => i.type === 'Episode')) { + const key = item.series_jellyfin_id ?? item.series_name; + if (!seriesMap.has(key)) { + seriesMap.set(key, { name: item.series_name, key, episodes: [] }); + } + seriesMap.get(key)!.episodes.push(item); + } + + const approveUpTo = async (planId: number) => { + await api.post(`/api/review/approve-up-to/${planId}`); + onMutate(); + }; + + // Interleave movies and series, sorted by confidence (high first) + const allItems = [ + ...movies.map(m => ({ type: 'movie' as const, item: m, sortKey: m.confidence === 'high' ? 0 : 1 })), + ...[...seriesMap.values()].map(s => ({ + type: 'series' as const, + item: s, + sortKey: s.episodes.every((e: any) => e.confidence === 'high') ? 0 : 1, + })), + ].sort((a, b) => a.sortKey - b.sortKey); + + return ( +
+
+ Review ({items.length}) +
+
+ {allItems.map((entry, idx) => { + if (entry.type === 'movie') { + return ( + { + await api.patch(`/api/review/${entry.item.item_id}/language`, { language: lang }); + onMutate(); + }} + showApproveUpTo + onApproveUpTo={() => approveUpTo(entry.item.id)} + /> + ); + } else { + return ( + + ); + } + })} + {allItems.length === 0 && ( +

No items to review

+ )} +
+
+ ); +} +``` + +- [ ] **Step 4: Commit** + +```bash +git add src/features/pipeline/PipelineCard.tsx src/features/pipeline/SeriesCard.tsx src/features/pipeline/ReviewColumn.tsx +git commit -m "add pipeline review column with cards, series grouping, approve-up-to" +``` + +--- + +### Task 10: Kanban Board — Queue, Processing, Done Columns + +**Files:** +- Create: `src/features/pipeline/QueueColumn.tsx` +- Create: `src/features/pipeline/ProcessingColumn.tsx` +- Create: `src/features/pipeline/DoneColumn.tsx` +- Create: `src/features/pipeline/ScheduleControls.tsx` + +- [ ] **Step 1: Create `src/features/pipeline/QueueColumn.tsx`** + +```typescript +import { Badge } from '~/shared/components/ui/badge'; + +interface QueueColumnProps { + items: any[]; +} + +export function QueueColumn({ items }: QueueColumnProps) { + return ( +
+
+ Queued ({items.length}) +
+
+ {items.map((item: any) => ( +
+

{item.name}

+ + {item.job_type} + +
+ ))} + {items.length === 0 && ( +

Queue empty

+ )} +
+
+ ); +} +``` + +- [ ] **Step 2: Create `src/features/pipeline/ProcessingColumn.tsx`** + +```typescript +import { Badge } from '~/shared/components/ui/badge'; + +interface ProcessingColumnProps { + items: any[]; + progress?: { id: number; seconds: number; total: number } | null; + queueStatus?: { status: string; until?: string; seconds?: number } | null; +} + +export function ProcessingColumn({ items, progress, queueStatus }: ProcessingColumnProps) { + const job = items[0]; // at most one running job + + const formatTime = (s: number) => { + const m = Math.floor(s / 60); + const sec = Math.floor(s % 60); + return `${m}:${String(sec).padStart(2, '0')}`; + }; + + return ( +
+
Processing
+
+ {/* Queue status */} + {queueStatus && queueStatus.status !== 'running' && ( +
+ {queueStatus.status === 'paused' && <>Paused until {queueStatus.until}} + {queueStatus.status === 'sleeping' && <>Sleeping {queueStatus.seconds}s between jobs} + {queueStatus.status === 'idle' && <>Idle} +
+ )} + + {job ? ( +
+

{job.name}

+
+ running + + {job.job_type} + +
+ + {/* Progress bar for transcode jobs */} + {progress && progress.total > 0 && ( +
+
+ {formatTime(progress.seconds)} + {Math.round((progress.seconds / progress.total) * 100)}% + {formatTime(progress.total)} +
+
+
+
+
+ )} +
+ ) : ( +

No active job

+ )} +
+
+ ); +} +``` + +- [ ] **Step 3: Create `src/features/pipeline/DoneColumn.tsx`** + +```typescript +import { Badge } from '~/shared/components/ui/badge'; + +interface DoneColumnProps { + items: any[]; +} + +export function DoneColumn({ items }: DoneColumnProps) { + return ( +
+
+ Done ({items.length}) +
+
+ {items.map((item: any) => ( +
+

{item.name}

+ + {item.status} + +
+ ))} + {items.length === 0 && ( +

No completed items

+ )} +
+
+ ); +} +``` + +- [ ] **Step 4: Create `src/features/pipeline/ScheduleControls.tsx`** + +```typescript +import { useState } from 'react'; +import { api } from '~/shared/lib/api'; +import { Input } from '~/shared/components/ui/input'; +import { Button } from '~/shared/components/ui/button'; + +interface ScheduleControlsProps { + scheduler: { + job_sleep_seconds: number; + schedule_enabled: boolean; + schedule_start: string; + schedule_end: string; + }; + onUpdate: () => void; +} + +export function ScheduleControls({ scheduler, onUpdate }: ScheduleControlsProps) { + const [open, setOpen] = useState(false); + const [state, setState] = useState(scheduler); + + const save = async () => { + await api.patch('/api/execute/scheduler', state); + onUpdate(); + setOpen(false); + }; + + const startAll = async () => { + await api.post('/api/execute/start'); + onUpdate(); + }; + + return ( +
+ + + + {open && ( +
+

Schedule Settings

+ + + setState({ ...state, job_sleep_seconds: parseInt(e.target.value) || 0 })} + className="mb-3" + /> + + + + {state.schedule_enabled && ( +
+ setState({ ...state, schedule_start: e.target.value })} + className="w-24" + /> + to + setState({ ...state, schedule_end: e.target.value })} + className="w-24" + /> +
+ )} + + +
+ )} +
+ ); +} +``` + +- [ ] **Step 5: Commit** + +```bash +git add src/features/pipeline/QueueColumn.tsx src/features/pipeline/ProcessingColumn.tsx src/features/pipeline/DoneColumn.tsx src/features/pipeline/ScheduleControls.tsx +git commit -m "add pipeline queue, processing, done columns, schedule controls" +``` + +--- + +### Task 11: Integration and Scan Page Update + +**Files:** +- Modify: `src/routes/__root.tsx` +- Modify: `src/features/scan/ScanPage.tsx` + +- [ ] **Step 1: Update the Scan page to link to Pipeline** + +After a scan completes, show a link to the Pipeline page instead of the review page. Update the "complete" SSE handler to show: + +```typescript +// After scan completes, show link to pipeline + + Review in Pipeline → + +``` + +- [ ] **Step 2: Keep old routes accessible but not in nav** + +Don't delete the old route files (`src/routes/review.tsx`, etc.) — they still work for direct URL access and for the subtitle manager. Just remove them from the nav bar in `__root.tsx`. + +- [ ] **Step 3: Verify end-to-end flow** + +1. Run a scan +2. Navigate to /pipeline +3. See items in Review column with confidence badges +4. Set OG language on a series +5. Click "Approve up to here" on an item +6. See items move to Queued +7. Click "Start queue" +8. See job in Processing with progress +9. See completed items in Done + +- [ ] **Step 4: Commit** + +```bash +git add src/features/scan/ScanPage.tsx src/routes/__root.tsx +git commit -m "wire pipeline into nav, link scan completion to pipeline page" +``` + +--- + +### Task 12: Cleanup and Polish + +**Files:** +- Modify: Various + +- [ ] **Step 1: Add `duration` to media_items if not present** + +For FFmpeg progress, we need total duration. Check if Jellyfin provides `RunTimeTicks` and store it during scan. If already available, use it in the progress calculation. + +- [ ] **Step 2: Handle edge cases in apple-compat** + +- Files with no audio streams → skip audio analysis +- Files with only video streams → `is_noop` for audio steps +- Files where all embedded subs are already extracted → `subs_extracted = 1` + +- [ ] **Step 3: Verify subtitle manager still works** + +Navigate to /review/subtitles. Verify: +- Browse sidecar files +- Delete a sidecar file +- Language summary view works + +- [ ] **Step 4: Bump version** + +Update `package.json` version to `2026.03.27` (CalVer). + +- [ ] **Step 5: Final commit** + +```bash +git add -A +git commit -m "polish unified pipeline: edge cases, duration tracking, version bump" +``` diff --git a/docs/superpowers/specs/2026-03-27-unified-pipeline-design.md b/docs/superpowers/specs/2026-03-27-unified-pipeline-design.md new file mode 100644 index 0000000..5ecaa87 --- /dev/null +++ b/docs/superpowers/specs/2026-03-27-unified-pipeline-design.md @@ -0,0 +1,223 @@ +# Unified Media Processing Pipeline + +**Date:** 2026-03-27 +**Status:** Draft + +## Problem + +The app currently handles subtitle extraction and audio cleanup as separate workflows with separate FFmpeg commands. Apple device compatibility (DTS/TrueHD transcoding) is not addressed at all. Users must manually navigate multiple pages and approve items one by one. + +## Goal + +Unify all media processing into a single pipeline per file. One scan, one review, one FFmpeg command. Minimize user interaction by auto-approving high-confidence items and enabling batch confirmation. + +## Pipeline + +Each file goes through three checks. The analyzer evaluates all three and produces one plan with one FFmpeg command. + +### Step 1: Subtitle Extraction + +- Extract all embedded subtitles to sidecar files on disk +- Remove subtitle streams from container +- Sidecar naming: `video.en.srt`, `video.de.forced.srt`, `video.es.hi.vtt` +- Populates `subtitle_files` table (feeds the existing subtitle manager) + +### Step 2: Audio Cleanup + +- Identify original language audio → set as default, first track +- Keep configured additional languages (`audio_languages` config), sorted by config order +- Remove all other audio tracks +- Preserve custom titles on kept tracks + +### Step 3: Apple Compatibility + +Check each **kept** audio stream's codec against the Apple-compatible set: + +**Compatible (no action):** AAC, AC3 (DD), EAC3 (DD+), ALAC, FLAC, MP3, PCM, Opus + +**Incompatible → transcode:** + +| Source codec | Container = MKV | Container = MP4 | +|---|---|---| +| DTS / DTS-ES / DTS-HD HRA | EAC3 | EAC3 | +| DTS-HD MA / DTS:X | FLAC | EAC3 | +| TrueHD / TrueHD Atmos | FLAC | EAC3 | + +Rationale: FLAC preserves lossless quality and is Apple-compatible (iOS 11+), but only works in MKV containers. EAC3 is the best lossy option for surround that Apple devices decode natively. + +### Combined FFmpeg Command + +A single FFmpeg invocation handles all three steps: + +```bash +ffmpeg -y -i 'input.mkv' \ + # Subtitle extraction (multiple outputs) + -map 0:s:0 'input.en.srt' \ + -map 0:s:1 'input.de.forced.srt' \ + # Remuxed output (no subs, reordered audio, transcoded where needed) + -map 0:v:0 -map 0:a:2 -map 0:a:0 \ + -c:v copy \ + -c:a:0 copy \ # OG audio (AAC) — already compatible + -c:a:1 eac3 \ # secondary audio (DTS) — transcode to EAC3 + -disposition:a:0 default -disposition:a:1 0 \ + -metadata:s:a:0 title='English' \ + -metadata:s:a:1 title='German' \ + 'input.tmp.mkv' && mv 'input.tmp.mkv' 'input.mkv' +``` + +Note: FFmpeg can output to multiple files in one invocation. Subtitle sidecar extraction and the remuxed output are produced in a single pass. + +### `is_noop` Definition + +A file is a no-op (already fully processed) when ALL of: +- All subtitles already extracted (or none embedded) +- Audio tracks already in correct order with no unwanted tracks +- All kept audio codecs are Apple-compatible + +`is_noop` files are marked as done during scan without entering the pipeline. + +## Confidence Scoring + +Each file gets a confidence score based on OG language reliability: + +**High confidence (auto-approve) — ALL of:** +- OG language is known (not null/unknown) +- At least two sources agree (any combination of Jellyfin, Radarr, Sonarr), OR only one source exists and it returned a language +- No `needs_review` flag from scan + +**Low confidence (needs review) — ANY of:** +- OG language is null or unknown +- Sources disagree (e.g., Jellyfin says "eng", Radarr says "fra") +- `needs_review` flag set during scan +- Zero audio tracks match the detected OG language + +High-confidence files are pre-approved and sorted to the top of the review column. Low-confidence files require human confirmation. + +## Kanban Board UI + +Replaces the current separate scan/review/execute pages with a unified pipeline view. + +### Columns + +| Scan | Review | Queued | Processing | Done | +|------|--------|--------|------------|------| +| Incoming from scan | Needs confirmation | Confirmed, waiting | FFmpeg running | Completed | + +### Card Content + +Each card represents one media file: + +- **Title:** movie name or "S01E03 — Episode Title" +- **OG language:** badge with confidence color (green/yellow/red) + inline dropdown to change +- **Pipeline badges:** icons showing which steps apply (sub extract, audio reorder, audio transcode) +- **Job type:** `copy` (fast, seconds) vs `transcode` (slow, minutes) +- **"Approve up to here" button:** confirms this card and all cards above it in the Review column + +### Series Grouping + +- Series appear as collapsible cards showing series name + episode count +- OG language is set at series level (one dropdown for the whole series) +- "Approve series" button confirms all episodes at once +- Individual episodes can be expanded and overridden if needed +- Rationale: if a series is English OG, it's unlikely a single episode differs + +### Processing Column + +- Shows currently running job with progress info +- For transcode jobs: progress bar (% complete, elapsed, ETA) parsed from FFmpeg stderr `time=` +- Queue status: idle / running / sleeping / paused until HH:MM + +### Done Column + +- Completed items with summary of what changed +- Collapsible, auto-archives + +## Execution & Scheduling + +### Job Queue + +- Jobs execute sequentially (one FFmpeg command at a time) +- Each job tagged as `copy` or `transcode` based on whether any audio streams need transcoding + +### Sleep Between Jobs + +- Configurable `job_sleep_seconds` (default: 0) +- Applied after each job completes, before the next starts +- Changeable at runtime via UI + +### Schedule Window + +- Configurable `schedule_start` and `schedule_end` (e.g., "01:00" and "07:00") +- `schedule_enabled` toggle (default: off = run anytime) +- When enabled: jobs only start within the window +- A running job is never interrupted — it finishes, then the queue pauses +- Changeable at runtime via UI + +### Config Keys (added to `config` table) + +``` +job_sleep_seconds: '0' +schedule_enabled: '0' +schedule_start: '01:00' +schedule_end: '07:00' +``` + +## Schema Changes + +### `review_plans` — new columns + +| Column | Type | Description | +|---|---|---| +| `confidence` | TEXT | `high` / `low` — based on OG language source agreement | +| `apple_compat` | TEXT | `direct_play` / `remux` / `audio_transcode` / `video_transcode` | +| `job_type` | TEXT | `copy` / `transcode` — determines expected duration | +| `subs_extracted` | INTEGER | 1 if subtitles already extracted (existing column, kept) | + +### `stream_decisions` — new columns + +| Column | Type | Description | +|---|---|---| +| `transcode_codec` | TEXT | Target codec if transcoding (e.g., `eac3`, `flac`), NULL if copy | + +### `jobs` — updated `job_type` values + +Current: `audio`, `extract`, `convert` +New: `copy` (stream copy only), `transcode` (includes audio re-encoding) + +### New config defaults + +```typescript +job_sleep_seconds: '0', +schedule_enabled: '0', +schedule_start: '01:00', +schedule_end: '07:00', +``` + +## Subtitle Manager (unchanged) + +The existing subtitle manager remains as a separate page/tool: + +- Browse extracted sidecar files per media item +- Delete unwanted sidecar files (the Bazarr gap) +- Language summary view +- Title harmonization + +The pipeline populates `subtitle_files` during step 1. The manager reads from that table independently. No coupling between the two beyond the shared table. + +## Out of Scope + +- **Configarr / custom format management** — handled externally on Unraid +- **Sonarr/Radarr re-search trigger** — future feature (flag incompatible files for re-download) +- **Video transcoding** (VP9 → H.264, etc.) — rare edge case, handle via re-download +- **Container conversion** (MKV ↔ MP4) — not needed for the pipeline, existing MKV convert command stays available + +## Guided Gates + +- `GG-1:` Scan a library with mixed codecs (DTS, AAC, TrueHD, EAC3). Verify the analyzer correctly identifies which files need transcoding vs copy-only. +- `GG-2:` Process a DTS-only MKV file. Verify the FFmpeg command transcodes DTS → FLAC (lossless) and the output plays on an iPhone without transcoding. +- `GG-3:` Process a TrueHD file in MP4 container. Verify it transcodes to EAC3 (not FLAC, since MP4 doesn't support FLAC). +- `GG-4:` Run the Kanban board with 20+ items. Use "Approve up to here" to batch-confirm 15 items. Verify all 15 move to Queued. +- `GG-5:` Set schedule window to a past time range. Verify queue pauses and shows "paused until HH:MM". +- `GG-6:` Process a file that is already fully compliant (Apple-compatible audio, subs extracted, correct order). Verify it's marked `is_noop` and shows as Done without entering the queue. +- `GG-7:` Verify the subtitle manager still works independently — delete a sidecar file, confirm it's removed from disk and `subtitle_files` table. +- `GG-8:` Collapse/expand a series in the review column. Set OG language at series level. Verify all episodes inherit it. Override one episode. Verify only that episode differs. diff --git a/server/api/review.ts b/server/api/review.ts index d1d3fb5..ca60e9a 100644 --- a/server/api/review.ts +++ b/server/api/review.ts @@ -1,8 +1,9 @@ import { Hono } from 'hono'; import { getDb, getConfig, getAllConfig } from '../db/index'; -import { analyzeItem } from '../services/analyzer'; +import { analyzeItem, assignTargetOrder } from '../services/analyzer'; import { buildCommand } from '../services/ffmpeg'; import { normalizeLanguage, getItem, refreshItem, mapStream } from '../services/jellyfin'; +import { parseId, isOneOf } from '../lib/validate'; import type { MediaItem, MediaStream, ReviewPlan, StreamDecision } from '../types'; const app = new Hono(); @@ -62,7 +63,16 @@ function loadItemDetail(db: ReturnType, itemId: number) { return { item, streams, plan: plan ?? null, decisions, command }; } -function reanalyze(db: ReturnType, itemId: number): void { +/** + * Match old custom_titles to new stream IDs after rescan. Keys by a + * composite of (type, language, stream_index, title) so user overrides + * survive stream-id changes when Jellyfin re-probes metadata. + */ +function titleKey(s: { type: string; language: string | null; stream_index: number; title: string | null }): string { + return `${s.type}|${s.language ?? ''}|${s.stream_index}|${s.title ?? ''}`; +} + +function reanalyze(db: ReturnType, itemId: number, preservedTitles?: Map): void { const item = db.prepare('SELECT * FROM media_items WHERE id = ?').get(itemId) as MediaItem; if (!item) return; @@ -78,17 +88,69 @@ function reanalyze(db: ReturnType, itemId: number): void { `).run(itemId, analysis.is_noop ? 1 : 0, analysis.confidence, analysis.apple_compat, analysis.job_type, analysis.notes.length > 0 ? analysis.notes.join('\n') : null); const plan = db.prepare('SELECT id FROM review_plans WHERE item_id = ?').get(itemId) as { id: number }; - const existingTitles = new Map( + + // Preserve existing custom_titles: prefer by stream_id (streams unchanged); + // fall back to titleKey match (streams regenerated after rescan). + const byStreamId = new Map( (db.prepare('SELECT stream_id, custom_title FROM stream_decisions WHERE plan_id = ?').all(plan.id) as { stream_id: number; custom_title: string | null }[]) .map((r) => [r.stream_id, r.custom_title]) ); + const streamById = new Map(streams.map(s => [s.id, s] as const)); + db.prepare('DELETE FROM stream_decisions WHERE plan_id = ?').run(plan.id); + const insertDecision = db.prepare('INSERT INTO stream_decisions (plan_id, stream_id, action, target_index, custom_title, transcode_codec) VALUES (?, ?, ?, ?, ?, ?)'); for (const dec of analysis.decisions) { - db.prepare('INSERT INTO stream_decisions (plan_id, stream_id, action, target_index, custom_title, transcode_codec) VALUES (?, ?, ?, ?, ?, ?)') - .run(plan.id, dec.stream_id, dec.action, dec.target_index, existingTitles.get(dec.stream_id) ?? null, dec.transcode_codec); + let customTitle = byStreamId.get(dec.stream_id) ?? null; + if (!customTitle && preservedTitles) { + const s = streamById.get(dec.stream_id); + if (s) customTitle = preservedTitles.get(titleKey(s)) ?? null; + } + insertDecision.run(plan.id, dec.stream_id, dec.action, dec.target_index, customTitle, dec.transcode_codec); } } +/** + * After the user toggles a stream action, re-run assignTargetOrder and + * recompute is_noop without wiping user-chosen actions or custom_titles. + */ +function recomputePlanAfterToggle(db: ReturnType, itemId: number): void { + const item = db.prepare('SELECT * FROM media_items WHERE id = ?').get(itemId) as MediaItem | undefined; + if (!item) return; + const streams = db.prepare('SELECT * FROM media_streams WHERE item_id = ? ORDER BY stream_index').all(itemId) as MediaStream[]; + const plan = db.prepare('SELECT id FROM review_plans WHERE item_id = ?').get(itemId) as { id: number } | undefined; + if (!plan) return; + const decisions = db.prepare('SELECT stream_id, action, target_index, transcode_codec FROM stream_decisions WHERE plan_id = ?').all(plan.id) as { + stream_id: number; action: 'keep' | 'remove'; target_index: number | null; transcode_codec: string | null + }[]; + + const origLang = item.original_language ? normalizeLanguage(item.original_language) : null; + const audioLanguages: string[] = JSON.parse(getConfig('audio_languages') ?? '[]'); + + // Re-assign target_index based on current actions + const decWithIdx = decisions.map(d => ({ stream_id: d.stream_id, action: d.action, target_index: null as number | null, transcode_codec: d.transcode_codec })); + assignTargetOrder(streams, decWithIdx, origLang, audioLanguages); + + const updateIdx = db.prepare('UPDATE stream_decisions SET target_index = ? WHERE plan_id = ? AND stream_id = ?'); + for (const d of decWithIdx) updateIdx.run(d.target_index, plan.id, d.stream_id); + + // Recompute is_noop: audio removed OR reordered OR subs exist OR transcode needed + const anyAudioRemoved = streams.some(s => s.type === 'Audio' && decWithIdx.find(d => d.stream_id === s.id)?.action === 'remove'); + const hasSubs = streams.some(s => s.type === 'Subtitle'); + const needsTranscode = decWithIdx.some(d => d.transcode_codec != null && d.action === 'keep'); + + const keptAudio = streams + .filter(s => s.type === 'Audio' && decWithIdx.find(d => d.stream_id === s.id)?.action === 'keep') + .sort((a, b) => a.stream_index - b.stream_index); + let audioOrderChanged = false; + for (let i = 0; i < keptAudio.length; i++) { + const dec = decWithIdx.find(d => d.stream_id === keptAudio[i].id); + if (dec?.target_index !== i) { audioOrderChanged = true; break; } + } + + const isNoop = !anyAudioRemoved && !audioOrderChanged && !hasSubs && !needsTranscode; + db.prepare('UPDATE review_plans SET is_noop = ? WHERE id = ?').run(isNoop ? 1 : 0, plan.id); +} + // ─── Pipeline: summary ─────────────────────────────────────────────────────── app.get('/pipeline', (c) => { @@ -141,16 +203,24 @@ app.get('/pipeline', (c) => { const noops = db.prepare('SELECT COUNT(*) as count FROM review_plans WHERE is_noop = 1').get() as { count: number }; - // Add transcode reasons per review plan - const transcodeStmt = db.prepare(` - SELECT DISTINCT ms.codec, sd.transcode_codec - FROM stream_decisions sd - JOIN media_streams ms ON ms.id = sd.stream_id - WHERE sd.plan_id = ? AND sd.transcode_codec IS NOT NULL - `); - for (const item of review as any[]) { - const rows = transcodeStmt.all(item.id) as { codec: string; transcode_codec: string }[]; - item.transcode_reasons = rows.map(r => `${(r.codec ?? '').toUpperCase()} → ${r.transcode_codec.toUpperCase()}`); + // Batch transcode reasons for all review plans in one query (avoids N+1) + const planIds = (review as { id: number }[]).map(r => r.id); + const reasonsByPlan = new Map(); + if (planIds.length > 0) { + const placeholders = planIds.map(() => '?').join(','); + const allReasons = db.prepare(` + SELECT DISTINCT sd.plan_id, ms.codec, sd.transcode_codec + FROM stream_decisions sd + JOIN media_streams ms ON ms.id = sd.stream_id + WHERE sd.plan_id IN (${placeholders}) AND sd.transcode_codec IS NOT NULL + `).all(...planIds) as { plan_id: number; codec: string | null; transcode_codec: string }[]; + for (const r of allReasons) { + if (!reasonsByPlan.has(r.plan_id)) reasonsByPlan.set(r.plan_id, []); + reasonsByPlan.get(r.plan_id)!.push(`${(r.codec ?? '').toUpperCase()} → ${r.transcode_codec.toUpperCase()}`); + } + } + for (const item of review as { id: number; transcode_reasons?: string[] }[]) { + item.transcode_reasons = reasonsByPlan.get(item.id) ?? []; } return c.json({ review, queued, processing, done, noopCount: noops.count, jellyfinUrl }); @@ -260,7 +330,8 @@ app.post('/series/:seriesKey/approve-all', (c) => { app.post('/season/:seriesKey/:season/approve-all', (c) => { const db = getDb(); const seriesKey = decodeURIComponent(c.req.param('seriesKey')); - const season = Number(c.req.param('season')); + const season = Number.parseInt(c.req.param('season') ?? '', 10); + if (!Number.isFinite(season)) return c.json({ error: 'invalid season' }, 400); const pending = db.prepare(` SELECT rp.*, mi.id as item_id FROM review_plans rp JOIN media_items mi ON mi.id = rp.item_id WHERE mi.type = 'Episode' AND (mi.series_jellyfin_id = ? OR (mi.series_jellyfin_id IS NULL AND mi.series_name = ?)) @@ -293,7 +364,8 @@ app.post('/approve-all', (c) => { app.get('/:id', (c) => { const db = getDb(); - const id = Number(c.req.param('id')); + const id = parseId(c.req.param('id')); + if (id == null) return c.json({ error: 'invalid id' }, 400); const detail = loadItemDetail(db, id); if (!detail.item) return c.notFound(); return c.json(detail); @@ -303,7 +375,8 @@ app.get('/:id', (c) => { app.patch('/:id/language', async (c) => { const db = getDb(); - const id = Number(c.req.param('id')); + const id = parseId(c.req.param('id')); + if (id == null) return c.json({ error: 'invalid id' }, 400); const body = await c.req.json<{ language: string | null }>(); const lang = body.language || null; db.prepare("UPDATE media_items SET original_language = ?, orig_lang_source = 'manual', needs_review = 0 WHERE id = ?") @@ -318,8 +391,9 @@ app.patch('/:id/language', async (c) => { app.patch('/:id/stream/:streamId/title', async (c) => { const db = getDb(); - const itemId = Number(c.req.param('id')); - const streamId = Number(c.req.param('streamId')); + const itemId = parseId(c.req.param('id')); + const streamId = parseId(c.req.param('streamId')); + if (itemId == null || streamId == null) return c.json({ error: 'invalid id' }, 400); const body = await c.req.json<{ title: string }>(); const title = (body.title ?? '').trim() || null; const plan = db.prepare('SELECT id FROM review_plans WHERE item_id = ?').get(itemId) as { id: number } | undefined; @@ -334,28 +408,26 @@ app.patch('/:id/stream/:streamId/title', async (c) => { app.patch('/:id/stream/:streamId', async (c) => { const db = getDb(); - const itemId = Number(c.req.param('id')); - const streamId = Number(c.req.param('streamId')); - const body = await c.req.json<{ action: 'keep' | 'remove' }>(); - const action = body.action; + const itemId = parseId(c.req.param('id')); + const streamId = parseId(c.req.param('streamId')); + if (itemId == null || streamId == null) return c.json({ error: 'invalid id' }, 400); + + const body = await c.req.json<{ action: unknown }>().catch(() => ({ action: null })); + if (!isOneOf(body.action, ['keep', 'remove'] as const)) { + return c.json({ error: 'action must be "keep" or "remove"' }, 400); + } + const action: 'keep' | 'remove' = body.action; // Only audio streams can be toggled — subtitles are always removed (extracted to sidecar) - const stream = db.prepare('SELECT type FROM media_streams WHERE id = ?').get(streamId) as { type: string } | undefined; - if (stream?.type === 'Subtitle') return c.json({ error: 'Subtitle streams cannot be toggled' }, 400); + const stream = db.prepare('SELECT type, item_id FROM media_streams WHERE id = ?').get(streamId) as { type: string; item_id: number } | undefined; + if (!stream || stream.item_id !== itemId) return c.json({ error: 'stream not found on item' }, 404); + if (stream.type === 'Subtitle') return c.json({ error: 'Subtitle streams cannot be toggled' }, 400); const plan = db.prepare('SELECT id FROM review_plans WHERE item_id = ?').get(itemId) as { id: number } | undefined; if (!plan) return c.notFound(); db.prepare('UPDATE stream_decisions SET action = ? WHERE plan_id = ? AND stream_id = ?').run(action, plan.id, streamId); - // is_noop only considers audio streams (subtitle removal is implicit) - const audioNotKept = (db.prepare(` - SELECT COUNT(*) as n FROM stream_decisions sd - JOIN media_streams ms ON ms.id = sd.stream_id - WHERE sd.plan_id = ? AND ms.type = 'Audio' AND sd.action != 'keep' - `).get(plan.id) as { n: number }).n; - // Also check audio ordering - const isNoop = audioNotKept === 0; // simplified — full recheck would need analyzer - db.prepare('UPDATE review_plans SET is_noop = ? WHERE id = ?').run(isNoop ? 1 : 0, plan.id); + recomputePlanAfterToggle(db, itemId); const detail = loadItemDetail(db, itemId); if (!detail.item) return c.notFound(); @@ -366,7 +438,8 @@ app.patch('/:id/stream/:streamId', async (c) => { app.post('/:id/approve', (c) => { const db = getDb(); - const id = Number(c.req.param('id')); + const id = parseId(c.req.param('id')); + if (id == null) return c.json({ error: 'invalid id' }, 400); const plan = db.prepare('SELECT * FROM review_plans WHERE item_id = ?').get(id) as ReviewPlan | undefined; if (!plan) return c.notFound(); db.prepare("UPDATE review_plans SET status = 'approved', reviewed_at = datetime('now') WHERE id = ?").run(plan.id); @@ -381,7 +454,8 @@ app.post('/:id/approve', (c) => { app.post('/:id/unapprove', (c) => { const db = getDb(); - const id = Number(c.req.param('id')); + const id = parseId(c.req.param('id')); + if (id == null) return c.json({ error: 'invalid id' }, 400); const plan = db.prepare('SELECT * FROM review_plans WHERE item_id = ?').get(id) as ReviewPlan | undefined; if (!plan) return c.notFound(); if (plan.status !== 'approved') return c.json({ ok: false, error: 'Can only unapprove items with status approved' }, 409); @@ -398,14 +472,16 @@ app.post('/:id/unapprove', (c) => { app.post('/:id/skip', (c) => { const db = getDb(); - const id = Number(c.req.param('id')); + const id = parseId(c.req.param('id')); + if (id == null) return c.json({ error: 'invalid id' }, 400); db.prepare("UPDATE review_plans SET status = 'skipped', reviewed_at = datetime('now') WHERE item_id = ?").run(id); return c.json({ ok: true }); }); app.post('/:id/unskip', (c) => { const db = getDb(); - const id = Number(c.req.param('id')); + const id = parseId(c.req.param('id')); + if (id == null) return c.json({ error: 'invalid id' }, 400); db.prepare("UPDATE review_plans SET status = 'pending', reviewed_at = NULL WHERE item_id = ? AND status = 'skipped'").run(id); return c.json({ ok: true }); }); @@ -414,7 +490,8 @@ app.post('/:id/unskip', (c) => { app.post('/:id/rescan', async (c) => { const db = getDb(); - const id = Number(c.req.param('id')); + const id = parseId(c.req.param('id')); + if (id == null) return c.json({ error: 'invalid id' }, 400); const item = db.prepare('SELECT * FROM media_items WHERE id = ?').get(id) as MediaItem | undefined; if (!item) return c.notFound(); @@ -425,6 +502,20 @@ app.post('/:id/rescan', async (c) => { // so the streams we fetch afterwards reflect the current file on disk. await refreshItem(jfCfg, item.jellyfin_id); + // Snapshot custom_titles keyed by stable properties, since replacing + // media_streams cascades away all stream_decisions. + const preservedTitles = new Map(); + const oldRows = db.prepare(` + SELECT ms.type, ms.language, ms.stream_index, ms.title, sd.custom_title + FROM stream_decisions sd + JOIN media_streams ms ON ms.id = sd.stream_id + JOIN review_plans rp ON rp.id = sd.plan_id + WHERE rp.item_id = ? AND sd.custom_title IS NOT NULL + `).all(id) as { type: string; language: string | null; stream_index: number; title: string | null; custom_title: string }[]; + for (const r of oldRows) { + preservedTitles.set(titleKey(r), r.custom_title); + } + const fresh = await getItem(jfCfg, item.jellyfin_id); if (fresh) { const insertStream = db.prepare(` @@ -440,7 +531,7 @@ app.post('/:id/rescan', async (c) => { } } - reanalyze(db, id); + reanalyze(db, id, preservedTitles); const detail = loadItemDetail(db, id); if (!detail.item) return c.notFound(); return c.json(detail); @@ -449,7 +540,8 @@ app.post('/:id/rescan', async (c) => { // ─── Pipeline: approve up to here ──────────────────────────────────────────── app.post('/approve-up-to/:id', (c) => { - const targetId = Number(c.req.param('id')); + const targetId = parseId(c.req.param('id')); + if (targetId == null) return c.json({ error: 'invalid id' }, 400); const db = getDb(); const target = db.prepare('SELECT id FROM review_plans WHERE id = ?').get(targetId) as { id: number } | undefined; diff --git a/server/api/scan.ts b/server/api/scan.ts index 6f02745..11c48fa 100644 --- a/server/api/scan.ts +++ b/server/api/scan.ts @@ -43,16 +43,18 @@ app.get('/', (c) => { // ─── Start ──────────────────────────────────────────────────────────────────── app.post('/start', async (c) => { - if (getConfig('scan_running') === '1') { + const db = getDb(); + // Atomic claim: only succeed if scan_running is not already '1'. + const claim = db.prepare("UPDATE config SET value = '1' WHERE key = 'scan_running' AND value != '1'").run(); + if (claim.changes === 0) { return c.json({ ok: false, error: 'Scan already running' }, 409); } - const body = await c.req.json<{ limit?: number }>().catch(() => ({})); + const body = await c.req.json<{ limit?: number }>().catch(() => ({ limit: undefined })); const formLimit = body.limit ?? null; const envLimit = process.env.SCAN_LIMIT ? Number(process.env.SCAN_LIMIT) : null; const limit = formLimit ?? envLimit ?? null; setConfig('scan_limit', limit != null ? String(limit) : ''); - setConfig('scan_running', '1'); runScan(limit).catch((err) => { logError('Scan failed:', err); diff --git a/server/api/subtitles.ts b/server/api/subtitles.ts index d3be1a8..4f26023 100644 --- a/server/api/subtitles.ts +++ b/server/api/subtitles.ts @@ -2,8 +2,11 @@ import { Hono } from 'hono'; import { getDb, getConfig, getAllConfig } from '../db/index'; import { buildExtractOnlyCommand } from '../services/ffmpeg'; import { normalizeLanguage, getItem, refreshItem, mapStream } from '../services/jellyfin'; +import { parseId } from '../lib/validate'; import type { MediaItem, MediaStream, SubtitleFile, ReviewPlan, StreamDecision } from '../types'; import { unlinkSync } from 'node:fs'; +import { dirname, resolve as resolvePath, sep } from 'node:path'; +import { error as logError } from '../lib/log'; const app = new Hono(); @@ -245,7 +248,9 @@ app.get('/summary', (c) => { app.get('/:id', (c) => { const db = getDb(); - const detail = loadDetail(db, Number(c.req.param('id'))); + const id = parseId(c.req.param('id')); + if (id == null) return c.json({ error: 'invalid id' }, 400); + const detail = loadDetail(db, id); if (!detail) return c.notFound(); return c.json(detail); }); @@ -254,8 +259,9 @@ app.get('/:id', (c) => { app.patch('/:id/stream/:streamId/language', async (c) => { const db = getDb(); - const itemId = Number(c.req.param('id')); - const streamId = Number(c.req.param('streamId')); + const itemId = parseId(c.req.param('id')); + const streamId = parseId(c.req.param('streamId')); + if (itemId == null || streamId == null) return c.json({ error: 'invalid id' }, 400); const body = await c.req.json<{ language: string }>(); const lang = (body.language ?? '').trim() || null; @@ -274,8 +280,9 @@ app.patch('/:id/stream/:streamId/language', async (c) => { app.patch('/:id/stream/:streamId/title', async (c) => { const db = getDb(); - const itemId = Number(c.req.param('id')); - const streamId = Number(c.req.param('streamId')); + const itemId = parseId(c.req.param('id')); + const streamId = parseId(c.req.param('streamId')); + if (itemId == null || streamId == null) return c.json({ error: 'invalid id' }, 400); const body = await c.req.json<{ title: string }>(); const title = (body.title ?? '').trim() || null; @@ -316,7 +323,8 @@ app.post('/extract-all', (c) => { app.post('/:id/extract', (c) => { const db = getDb(); - const id = Number(c.req.param('id')); + const id = parseId(c.req.param('id')); + if (id == null) return c.json({ error: 'invalid id' }, 400); const item = db.prepare('SELECT * FROM media_items WHERE id = ?').get(id) as MediaItem | undefined; if (!item) return c.notFound(); @@ -334,14 +342,32 @@ app.post('/:id/extract', (c) => { // ─── Delete file ───────────────────────────────────────────────────────────── +/** + * Verify a sidecar file path lives inside the directory of its owning + * media item. Guards against path-traversal via malformed DB state. + */ +function isSidecarOfItem(filePath: string, videoPath: string): boolean { + const videoDir = resolvePath(dirname(videoPath)); + const targetDir = resolvePath(dirname(filePath)); + return targetDir === videoDir || targetDir.startsWith(videoDir + sep); +} + app.delete('/:id/files/:fileId', (c) => { const db = getDb(); - const itemId = Number(c.req.param('id')); - const fileId = Number(c.req.param('fileId')); + const itemId = parseId(c.req.param('id')); + const fileId = parseId(c.req.param('fileId')); + if (itemId == null || fileId == null) return c.json({ error: 'invalid id' }, 400); const file = db.prepare('SELECT * FROM subtitle_files WHERE id = ? AND item_id = ?').get(fileId, itemId) as SubtitleFile | undefined; if (!file) return c.notFound(); + const item = db.prepare('SELECT file_path FROM media_items WHERE id = ?').get(itemId) as { file_path: string } | undefined; + if (!item || !isSidecarOfItem(file.file_path, item.file_path)) { + logError(`Refusing to delete subtitle file outside media dir: ${file.file_path}`); + db.prepare('DELETE FROM subtitle_files WHERE id = ?').run(fileId); + return c.json({ ok: false, error: 'file path outside media directory; DB entry removed without touching disk' }, 400); + } + try { unlinkSync(file.file_path); } catch { /* file may not exist */ } db.prepare('DELETE FROM subtitle_files WHERE id = ?').run(fileId); @@ -353,7 +379,8 @@ app.delete('/:id/files/:fileId', (c) => { app.post('/:id/rescan', async (c) => { const db = getDb(); - const id = Number(c.req.param('id')); + const id = parseId(c.req.param('id')); + if (id == null) return c.json({ error: 'invalid id' }, 400); const item = db.prepare('SELECT * FROM media_items WHERE id = ?').get(id) as MediaItem | undefined; if (!item) return c.notFound(); @@ -407,7 +434,12 @@ app.post('/batch-delete', async (c) => { } for (const file of files) { - try { unlinkSync(file.file_path); } catch { /* file may not exist */ } + const item = db.prepare('SELECT file_path FROM media_items WHERE id = ?').get(file.item_id) as { file_path: string } | undefined; + if (item && isSidecarOfItem(file.file_path, item.file_path)) { + try { unlinkSync(file.file_path); } catch { /* file may not exist */ } + } else { + logError(`Refusing to delete subtitle file outside media dir: ${file.file_path}`); + } db.prepare('DELETE FROM subtitle_files WHERE id = ?').run(file.id); deleted++; } diff --git a/server/db/schema.ts b/server/db/schema.ts index 803b8f0..edf4d6e 100644 --- a/server/db/schema.ts +++ b/server/db/schema.ts @@ -95,6 +95,18 @@ CREATE TABLE IF NOT EXISTS jobs ( started_at TEXT, completed_at TEXT ); + +CREATE INDEX IF NOT EXISTS idx_review_plans_status ON review_plans(status); +CREATE INDEX IF NOT EXISTS idx_review_plans_is_noop ON review_plans(is_noop); +CREATE INDEX IF NOT EXISTS idx_stream_decisions_plan_id ON stream_decisions(plan_id); +CREATE INDEX IF NOT EXISTS idx_media_items_series_jf ON media_items(series_jellyfin_id); +CREATE INDEX IF NOT EXISTS idx_media_items_series_name ON media_items(series_name); +CREATE INDEX IF NOT EXISTS idx_media_items_type ON media_items(type); +CREATE INDEX IF NOT EXISTS idx_media_streams_item_id ON media_streams(item_id); +CREATE INDEX IF NOT EXISTS idx_media_streams_type ON media_streams(type); +CREATE INDEX IF NOT EXISTS idx_subtitle_files_item_id ON subtitle_files(item_id); +CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status); +CREATE INDEX IF NOT EXISTS idx_jobs_item_id ON jobs(item_id); `; export const DEFAULT_CONFIG: Record = { diff --git a/server/lib/validate.ts b/server/lib/validate.ts new file mode 100644 index 0000000..5e62fef --- /dev/null +++ b/server/lib/validate.ts @@ -0,0 +1,26 @@ +import type { Context } from 'hono'; + +/** Parse a route param as a positive integer id. Returns null if invalid. */ +export function parseId(raw: string | undefined): number | null { + if (!raw) return null; + const n = Number.parseInt(raw, 10); + return Number.isFinite(n) && n > 0 ? n : null; +} + +/** + * Require a positive integer id param. Returns the id, or responds 400 + * and returns null. Callers check for null and return the response. + */ +export function requireId(c: Context, name: string): number | null { + const id = parseId(c.req.param(name)); + if (id == null) { + c.status(400); + return null; + } + return id; +} + +/** True if value is one of the allowed strings. */ +export function isOneOf(value: unknown, allowed: readonly T[]): value is T { + return typeof value === 'string' && (allowed as readonly string[]).includes(value); +} diff --git a/server/services/analyzer.ts b/server/services/analyzer.ts index 3a4a8e0..75a755c 100644 --- a/server/services/analyzer.ts +++ b/server/services/analyzer.ts @@ -12,7 +12,9 @@ export interface AnalyzerConfig { * and whether the file needs audio remuxing. * * Subtitles are ALWAYS removed from the container (they get extracted to - * sidecar files). is_noop only considers audio changes. + * sidecar files). is_noop considers audio removal/reorder, subtitle + * extraction, and transcode — a "noop" is a file that needs no changes + * at all. */ export function analyzeItem( item: Pick, @@ -22,65 +24,43 @@ export function analyzeItem( const origLang = item.original_language ? normalizeLanguage(item.original_language) : null; const notes: string[] = []; - // Compute action for each stream const decisions: PlanResult['decisions'] = streams.map((s) => { const action = decideAction(s, origLang, config.audioLanguages); return { stream_id: s.id, action, target_index: null, transcode_codec: null }; }); - // Audio-only noop: only consider audio removals/reordering - // (subtitles are always removed from container — that's implicit, not a "change" to review) const anyAudioRemoved = streams.some((s, i) => s.type === 'Audio' && decisions[i].action === 'remove'); - // Compute target ordering for kept streams within type groups - const keptStreams = streams.filter((_, i) => decisions[i].action === 'keep'); - assignTargetOrder(keptStreams, decisions, streams, origLang, config.audioLanguages); + assignTargetOrder(streams, decisions, origLang, config.audioLanguages); - // Check if audio ordering changes const audioOrderChanged = checkAudioOrderChanged(streams, decisions); - // Step 3: Apple compatibility — compute transcode targets for kept audio for (const d of decisions) { - if (d.action === 'keep') { - const stream = streams.find(s => s.id === d.stream_id); - if (stream && stream.type === 'Audio') { - d.transcode_codec = transcodeTarget( - stream.codec ?? '', - stream.title, - item.container, - ); - } + if (d.action !== 'keep') continue; + const stream = streams.find(s => s.id === d.stream_id); + if (stream && stream.type === 'Audio') { + d.transcode_codec = transcodeTarget(stream.codec ?? '', stream.title, item.container); } } const keptAudioCodecs = decisions .filter(d => d.action === 'keep') .map(d => streams.find(s => s.id === d.stream_id)) - .filter(s => s && s.type === 'Audio') - .map(s => s!.codec ?? ''); + .filter((s): s is MediaStream => !!s && s.type === 'Audio') + .map(s => s.codec ?? ''); const needsTranscode = decisions.some(d => d.transcode_codec != null); const apple_compat = computeAppleCompat(keptAudioCodecs, item.container); const job_type = needsTranscode ? 'transcode' as const : 'copy' as const; const hasSubs = streams.some((s) => s.type === 'Subtitle'); - // Extended is_noop: no audio changes AND no subs to extract AND no transcode needed const is_noop = !anyAudioRemoved && !audioOrderChanged && !hasSubs && !needsTranscode; - // Generate notes for edge cases if (!origLang && item.needs_review) { notes.push('Original language unknown — audio tracks not filtered; manual review required'); } - return { - is_noop, - has_subs: hasSubs, - confidence: 'low', - apple_compat, - job_type, - decisions, - notes, - }; + return { is_noop, has_subs: hasSubs, confidence: 'low', apple_compat, job_type, decisions, notes }; } function decideAction( @@ -95,8 +75,8 @@ function decideAction( return 'keep'; case 'Audio': { - if (!origLang) return 'keep'; // unknown lang → keep all - if (!stream.language) return 'keep'; // undetermined → keep + if (!origLang) return 'keep'; + if (!stream.language) return 'keep'; const normalized = normalizeLanguage(stream.language); if (normalized === origLang) return 'keep'; if (audioLanguages.includes(normalized)) return 'keep'; @@ -104,7 +84,6 @@ function decideAction( } case 'Subtitle': - // All subtitles are removed from the container and extracted to sidecar files return 'remove'; default: @@ -112,69 +91,70 @@ function decideAction( } } -function assignTargetOrder( - keptStreams: MediaStream[], - decisions: PlanResult['decisions'], +/** + * Assign target_index to each kept stream. target_index is the 0-based + * position within its type group in the output file, after sorting audio + * streams by language rank (OG first, then additional languages in + * configured order, then by original stream_index for stability). + */ +export function assignTargetOrder( allStreams: MediaStream[], + decisions: PlanResult['decisions'], origLang: string | null, audioLanguages: string[], ): void { - // Group kept streams by type - const byType: Record = {}; - for (const s of keptStreams) { - const t = s.type; - byType[t] = byType[t] ?? []; - byType[t].push(s); + const keptByType = new Map(); + for (const s of allStreams) { + const dec = decisions.find(d => d.stream_id === s.id); + if (dec?.action !== 'keep') continue; + if (!keptByType.has(s.type)) keptByType.set(s.type, []); + keptByType.get(s.type)!.push(s); } - // Sort audio: OG first, then additional languages in configured order, then by stream_index within each group - if (byType['Audio']) { - byType['Audio'].sort((a, b) => { - const aLang = a.language ? normalizeLanguage(a.language) : null; - const bLang = b.language ? normalizeLanguage(b.language) : null; - const aRank = langRank(aLang, origLang, audioLanguages); - const bRank = langRank(bLang, origLang, audioLanguages); + const audio = keptByType.get('Audio'); + if (audio) { + audio.sort((a, b) => { + const aRank = langRank(a.language, origLang, audioLanguages); + const bRank = langRank(b.language, origLang, audioLanguages); if (aRank !== bRank) return aRank - bRank; return a.stream_index - b.stream_index; }); } - // Assign target_index per type group - for (const [, typeStreams] of Object.entries(byType)) { - typeStreams.forEach((s, idx) => { - const dec = decisions.find((d) => d.stream_id === s.id); + for (const [, streams] of keptByType) { + streams.forEach((s, idx) => { + const dec = decisions.find(d => d.stream_id === s.id); if (dec) dec.target_index = idx; }); } } -/** Compute sort rank: OG = 0, additional languages = 1..N by config order, unknown/null = N+1. */ function langRank(lang: string | null, origLang: string | null, audioLanguages: string[]): number { - if (origLang && lang === origLang) return 0; - if (lang) { - const idx = audioLanguages.indexOf(lang); + const normalized = lang ? normalizeLanguage(lang) : null; + if (origLang && normalized === origLang) return 0; + if (normalized) { + const idx = audioLanguages.indexOf(normalized); if (idx >= 0) return idx + 1; } return audioLanguages.length + 1; } -/** Check if audio stream ordering changes (ignores subtitles which are always removed). */ +/** + * True when the output order of kept audio streams differs from their + * original order in the input. Compares original stream_index order + * against target_index order. + */ function checkAudioOrderChanged( streams: MediaStream[], decisions: PlanResult['decisions'] ): boolean { - const keptAudio = streams.filter((s) => { - if (s.type !== 'Audio') return false; - const dec = decisions.find((d) => d.stream_id === s.id); - return dec?.action === 'keep'; - }); + const keptAudio = streams + .filter(s => s.type === 'Audio' && decisions.find(d => d.stream_id === s.id)?.action === 'keep') + .sort((a, b) => a.stream_index - b.stream_index); - const sorted = [...keptAudio].sort((a, b) => a.stream_index - b.stream_index); for (let i = 0; i < keptAudio.length; i++) { - const dec = decisions.find((d) => d.stream_id === keptAudio[i].id); - if (!dec) continue; - const currentPos = sorted.findIndex((s) => s.id === keptAudio[i].id); - if (dec.target_index !== null && dec.target_index !== currentPos) return true; + const dec = decisions.find(d => d.stream_id === keptAudio[i].id); + if (dec?.target_index !== i) return true; } return false; } diff --git a/server/services/ffmpeg.ts b/server/services/ffmpeg.ts index 31ef516..dd5dcfb 100644 --- a/server/services/ffmpeg.ts +++ b/server/services/ffmpeg.ts @@ -226,6 +226,32 @@ function buildStreamFlags( return args; } +/** Canonical output order of stream types. Used by every command builder. */ +const TYPE_ORDER: Record = { Video: 0, Audio: 1, Subtitle: 2, Data: 3, EmbeddedImage: 4 }; + +/** + * Return kept streams paired with their decisions, sorted in canonical + * output order: type priority first, then target_index within each type. + * This is the single source of truth for output stream ordering. + */ +export function sortKeptStreams( + streams: MediaStream[], + decisions: StreamDecision[] +): { stream: MediaStream; dec: StreamDecision }[] { + const kept: { stream: MediaStream; dec: StreamDecision }[] = []; + for (const s of streams) { + const dec = decisions.find(d => d.stream_id === s.id); + if (dec?.action === 'keep') kept.push({ stream: s, dec }); + } + kept.sort((a, b) => { + const ta = TYPE_ORDER[a.stream.type] ?? 9; + const tb = TYPE_ORDER[b.stream.type] ?? 9; + if (ta !== tb) return ta - tb; + return (a.dec.target_index ?? 0) - (b.dec.target_index ?? 0); + }); + return kept; +} + /** * Build the full shell command to remux a media file, keeping only the * streams specified by the decisions and in the target order. @@ -237,22 +263,7 @@ export function buildCommand( streams: MediaStream[], decisions: StreamDecision[] ): string { - // Sort kept streams by type priority then target_index - const kept = streams - .map((s) => { - const dec = decisions.find((d) => d.stream_id === s.id); - return dec?.action === 'keep' ? { stream: s, dec } : null; - }) - .filter(Boolean) as { stream: MediaStream; dec: StreamDecision }[]; - - // Sort: Video first, Audio second, Subtitle third, Data last - const typeOrder: Record = { Video: 0, Audio: 1, Subtitle: 2, Data: 3, EmbeddedImage: 4 }; - kept.sort((a, b) => { - const ta = typeOrder[a.stream.type] ?? 9; - const tb = typeOrder[b.stream.type] ?? 9; - if (ta !== tb) return ta - tb; - return (a.dec.target_index ?? 0) - (b.dec.target_index ?? 0); - }); + const kept = sortKeptStreams(streams, decisions); const inputPath = item.file_path; const ext = inputPath.match(/\.([^.]+)$/)?.[1] ?? 'mkv'; @@ -289,20 +300,7 @@ export function buildMkvConvertCommand( const outputPath = inputPath.replace(/\.[^.]+$/, '.mkv'); const tmpPath = inputPath.replace(/\.[^.]+$/, '.tmp.mkv'); - const kept = streams - .map((s) => { - const dec = decisions.find((d) => d.stream_id === s.id); - return dec?.action === 'keep' ? { stream: s, dec } : null; - }) - .filter(Boolean) as { stream: MediaStream; dec: StreamDecision }[]; - - const typeOrder: Record = { Video: 0, Audio: 1, Subtitle: 2, Data: 3 }; - kept.sort((a, b) => { - const ta = typeOrder[a.stream.type] ?? 9; - const tb = typeOrder[b.stream.type] ?? 9; - if (ta !== tb) return ta - tb; - return (a.dec.target_index ?? 0) - (b.dec.target_index ?? 0); - }); + const kept = sortKeptStreams(streams, decisions); const maps = buildMaps(streams, kept); const streamFlags = buildStreamFlags(kept); @@ -385,23 +383,11 @@ export function buildPipelineCommand( } // --- Kept streams for remuxed output --- - // Enrich decisions with stream data - const enriched = decisions.map(d => { - const stream = d.stream ?? streams.find(s => s.id === d.stream_id); - return { ...d, stream: stream! }; - }).filter(d => d.action === 'keep' && d.stream); - - // Sort by type priority then target_index - const typeOrder: Record = { Video: 0, Audio: 1, Data: 2, EmbeddedImage: 3 }; - enriched.sort((a, b) => { - const ta = typeOrder[a.stream.type] ?? 9; - const tb = typeOrder[b.stream.type] ?? 9; - if (ta !== tb) return ta - tb; - return (a.target_index ?? 0) - (b.target_index ?? 0); - }); + const kept = sortKeptStreams(streams, decisions as StreamDecision[]); + const enriched = kept.map(k => ({ ...k.dec, stream: k.stream })); // Build -map flags - const maps = buildMaps(streams, enriched.map(d => ({ stream: d.stream, dec: d }))); + const maps = buildMaps(streams, kept); // Build per-stream codec flags const codecFlags: string[] = ['-c:v copy']; @@ -427,7 +413,7 @@ export function buildPipelineCommand( const finalCodecFlags = hasTranscode ? codecFlags : ['-c copy']; // Disposition + metadata flags for audio - const streamFlags = buildStreamFlags(enriched.map(d => ({ stream: d.stream, dec: d }))); + const streamFlags = buildStreamFlags(kept); // Assemble command const parts: string[] = [ diff --git a/server/services/scheduler.ts b/server/services/scheduler.ts index 91e8135..badcc0d 100644 --- a/server/services/scheduler.ts +++ b/server/services/scheduler.ts @@ -33,11 +33,12 @@ export function isInScheduleWindow(): boolean { const start = parseTime(state.schedule_start); const end = parseTime(state.schedule_end); - // Handle overnight windows (e.g., 23:00 → 07:00) + // Handle overnight windows (e.g., 23:00 → 07:00). End is inclusive so + // "07:00 → 07:00" spans a full day and the closing minute is covered. if (start <= end) { - return minutes >= start && minutes < end; + return minutes >= start && minutes <= end; } else { - return minutes >= start || minutes < end; + return minutes >= start || minutes <= end; } }