Loading source
Pulling the file list, source metadata, and syntax-aware rendering for this listing.
Source from bundle
Build or revise a reusable FFmpeg timeline for short-form video editing. Use when the user wants an agent-editable API for trimming clips, cropping, fitting to
Files
Skill
Size
Entrypoint
Format
Open file
Syntax-highlighted preview of this file as included in the skill package.
scripts/video_timeline_editor/__init__.py
1from .domain.model import CaptionTrack, Clip, CropBox, FitSpec, OverlaySpec, Project, TransformSpec234def project(5*,6width: int = 1080,7height: int = 1920,8fps: int = 30,9background: str = "black",10output: str = "final/output.mp4",11work_dir: str = "final/timeline-render",12open_after: bool = False,13overlay_styles: dict | None = None,14caption_styles: dict | None = None,15) -> Project:16return Project(17width=width,18height=height,19fps=fps,20background=background,21output=output,22work_dir=work_dir,23open_after=open_after,24overlay_styles=overlay_styles or {},25caption_styles=caption_styles or {},26)272829def clip(30*,31id: str,32source: str,33label: str | None = None,34take: tuple[float | None, float | None] | None = None,35src_in: float | None = None,36src_out: float | None = None,37trim: tuple[float, float] | None = None,38trim_start: float = 0.0,39trim_end: float = 0.0,40speed: float = 1.0,41fit: FitSpec | None = None,42crop: CropBox | None = None,43transform: TransformSpec | None = None,44overlays: list[OverlaySpec] | None = None,45captions: CaptionTrack | None = None,46muted: bool = False,47volume: float = 1.0,48) -> Clip:49if take is not None:50src_in, src_out = take51if trim is not None:52trim_start, trim_end = trim53return Clip(54id=id,55source=source,56label=label or id,57src_in=src_in,58src_out=src_out,59trim_start=trim_start,60trim_end=trim_end,61speed=speed,62fit=fit or contain(),63crop=crop,64transform=transform or TransformSpec(),65overlays=tuple(overlays or []),66captions=captions,67muted=muted,68volume=volume,69)707172def contain(*, background: str = "black") -> FitSpec:73return FitSpec(mode="contain", background=background)747576def cover(*, anchor: str = "center", background: str | None = None) -> FitSpec:77return FitSpec(mode="cover", anchor=anchor, background=background)787980def crop_box(x: float, y: float, width: float, height: float) -> CropBox:81return CropBox(x=x, y=y, width=width, height=height)828384def transform(85*,86hflip: bool = False,87vflip: bool = False,88zoom: float = 1.0,89video_pts: float = 1.0,90audio_tempo: float = 1.0,91eq: dict | None = None,92) -> TransformSpec:93return TransformSpec(94hflip=hflip,95vflip=vflip,96zoom=zoom,97video_pts=video_pts,98audio_tempo=audio_tempo,99eq=eq or {},100)101102103def text_overlay(104text: str,105*,106style: str = "default",107start: float | None = None,108end: float | None = None,109options: dict | None = None,110**overrides,111) -> OverlaySpec:112merged = dict(options or {})113merged.update(overrides)114return OverlaySpec(115text=text,116style=style,117start=start,118end=end,119options=merged,120)121122123def captions(124transcript: str,125*,126style: str = "karaoke",127mode: str = "karaoke",128trim: str = "word",129max_words_per_cue: int = 4,130source_offset: float = 0.0,131replacements: dict[str, str] | None = None,132skip_tokens: list[str] | tuple[str, ...] | None = None,133) -> CaptionTrack:134return CaptionTrack(135transcript=transcript,136style=style,137mode=mode,138trim=trim,139max_words_per_cue=max_words_per_cue,140source_offset=source_offset,141replacements=replacements or {},142skip_tokens=tuple(skip_tokens or ()),143)144145146__all__ = [147"project",148"clip",149"contain",150"cover",151"crop_box",152"transform",153"text_overlay",154"captions",155"CaptionTrack",156"Clip",157"CropBox",158"FitSpec",159"OverlaySpec",160"Project",161"TransformSpec",162]163