Loading source
Pulling the file list, source metadata, and syntax-aware rendering for this listing.
Source from bundle
Build or revise a reusable FFmpeg timeline for short-form video editing. Use when the user wants an agent-editable API for trimming clips, cropping, fitting to
Files
Skill
Size
Entrypoint
Format
Open file
Syntax-highlighted preview of this file as included in the skill package.
scripts/video_timeline_editor/application/timeline_service.py
1from __future__ import annotations23from pathlib import Path45from video_timeline_editor.domain.model import Clip, ClipAnalysis, Project6from video_timeline_editor.domain.services import (7DEFAULT_CAPTION_STYLES,8DEFAULT_OVERLAY_STYLES,9build_caption_ass,10compute_clip_timing,11merge_styles,12retime_caption_words,13validate_crop,14)15from video_timeline_editor.infrastructure.media import concat_clips, maybe_open, probe_media, render_clip_file16from video_timeline_editor.infrastructure.transcripts import load_transcript_words, resolve_path171819class TimelineService:20def __init__(self, project: Project, timeline_dir: Path):21self.project = project22self.timeline_dir = timeline_dir23self.overlay_styles = merge_styles(DEFAULT_OVERLAY_STYLES, project.overlay_styles)24self.caption_styles = merge_styles(DEFAULT_CAPTION_STYLES, project.caption_styles)2526def analyze_clip(self, clip: Clip) -> ClipAnalysis:27source_path = resolve_path(clip.source, self.timeline_dir)28if not source_path.exists():29raise FileNotFoundError(f"{clip.id}: missing source {source_path}")30if clip.crop:31validate_crop(clip.crop)32media = probe_media(source_path)33transcript_words = ()34if clip.captions:35transcript_words = load_transcript_words(clip.captions, self.timeline_dir)36timing = compute_clip_timing(clip, media, transcript_words)37caption_words = retime_caption_words(transcript_words, timing) if clip.captions else ()38return ClipAnalysis(39clip=clip,40source_path=source_path,41media=media,42timing=timing,43caption_words=caption_words,44)4546def analyze_timeline(self, clips: list[Clip]) -> dict:47total = 0.048manifest_clips = []49for clip in clips:50analysis = self.analyze_clip(clip)51manifest_clips.append(52{53"id": clip.id,54"label": clip.label,55"source": str(analysis.source_path),56"nominal_src_in": round(analysis.timing.nominal_in, 3),57"nominal_src_out": round(analysis.timing.nominal_out, 3),58"actual_src_in": round(analysis.timing.actual_in, 3),59"actual_src_out": round(analysis.timing.actual_out, 3),60"duration": round(analysis.timing.output_duration, 3),61"speed": round(clip.speed, 3),62"word_aligned_trim": analysis.timing.word_aligned,63"caption_words": len(analysis.caption_words),64"timeline_in": round(total, 3),65"timeline_out": round(total + analysis.timing.output_duration, 3),66}67)68total += analysis.timing.output_duration69return {70"project": {71"width": self.project.width,72"height": self.project.height,73"fps": self.project.fps,74"output": str(resolve_path(self.project.output, self.timeline_dir)),75"work_dir": str(resolve_path(self.project.work_dir, self.timeline_dir)),76},77"clips": manifest_clips,78"total_duration": round(total, 3),79}8081def render_timeline(self, clips: list[Clip], *, open_after: bool = False) -> dict:82work_dir = resolve_path(self.project.work_dir, self.timeline_dir)83output_path = resolve_path(self.project.output, self.timeline_dir)84work_dir.mkdir(parents=True, exist_ok=True)85output_path.parent.mkdir(parents=True, exist_ok=True)86caption_dir = work_dir / "_captions"87caption_dir.mkdir(parents=True, exist_ok=True)8889rendered_paths = []90manifest_clips = []91for clip in clips:92analysis = self.analyze_clip(clip)93caption_ass_path = None94if clip.captions and analysis.caption_words:95style = dict(self.caption_styles.get(clip.captions.style, self.caption_styles["karaoke"]))96ass_text = build_caption_ass(97analysis.caption_words,98clip.captions,99style,100self.project.width,101self.project.height,102)103caption_ass_path = caption_dir / f"{clip.id}.ass"104caption_ass_path.write_text(ass_text)105rendered_path = render_clip_file(106analysis,107width=self.project.width,108height=self.project.height,109fps=self.project.fps,110background=self.project.background,111overlay_styles=self.overlay_styles,112work_dir=work_dir,113caption_ass_path=caption_ass_path,114)115rendered_paths.append(rendered_path)116final_duration = probe_media(rendered_path).duration117manifest_clips.append(118{119"id": clip.id,120"label": clip.label,121"source": str(analysis.source_path),122"actual_src_in": round(analysis.timing.actual_in, 3),123"actual_src_out": round(analysis.timing.actual_out, 3),124"duration": round(final_duration, 3),125"word_aligned_trim": analysis.timing.word_aligned,126"caption_words": len(analysis.caption_words),127"caption_ass": str(caption_ass_path) if caption_ass_path else None,128}129)130concat_clips(rendered_paths, output_path, work_dir)131final_manifest = {132"project": {133"width": self.project.width,134"height": self.project.height,135"fps": self.project.fps,136"output": str(output_path),137"work_dir": str(work_dir),138},139"clips": manifest_clips,140"total_duration": round(probe_media(output_path).duration, 3),141"output": str(output_path),142}143if open_after or self.project.open_after:144maybe_open(output_path)145return final_manifest146