"""Pydantic models for SEEDREEL configuration, indexes, and plans."""

from datetime import datetime
from pathlib import Path
from typing import Literal

from pydantic import BaseModel, Field


class VideoMetadata(BaseModel):
    path: Path
    filename: str
    duration: float
    fps: float
    width: int
    height: int
    codec: str
    has_audio: bool
    file_size_bytes: int


class VideoIndex(BaseModel):
    created_at: datetime = Field(default_factory=datetime.now)
    source_folder: Path
    videos: list[VideoMetadata]


class Plan(BaseModel):
    created_at: datetime = Field(default_factory=datetime.now)
    seed: int
    constraints: PlanConstraints
    segments: list[ClipSegment]
    percussion_track: PercussionTrack = Field(default_factory=PercussionTrack)


class RenderSettings(BaseModel):
    codec: str = "libx264"
    preset: str = "medium"
    crf: int = 18
    audio_codec: str = "aac"
    audio_bitrate: str = "192k"
    pixel_format: str = "yuv420p"
    output_fps: int = 30
    audio_crossfade_duration: float = 0.05


PRESETS: dict[str, Preset] = {
    "calm": Preset(name="calm", description="Slow, meditative pace with longer clips"),
    "chaotic": Preset(name="chaotic", description="Fast, energetic cuts with short clips"),
    "rapid": Preset(name="rapid", description="Very fast cuts, almost strobing"),
    "staccato": Preset(name="staccato", description="Quick punchy cuts with slight variation"),
    "pulsed": Preset(name="pulsed", description="Wave-like pacing that builds and releases"),
    "balanced": Preset(name="balanced", description="Default balanced settings"),
}
"""Pydantic models for SEEDREEL configuration, indexes, and plans."""

from datetime import datetime
from pathlib import Path
from typing import Literal

from pydantic import BaseModel, Field


class VideoMetadata(BaseModel):
    path: Path
    filename: str
    duration: float
    fps: float
    width: int
    height: int
    codec: str
    has_audio: bool
    file_size_bytes: int


class VideoIndex(BaseModel):
    created_at: datetime = Field(default_factory=datetime.now)
    source_folder: Path
    videos: list[VideoMetadata]


class Plan(BaseModel):
    created_at: datetime = Field(default_factory=datetime.now)
    seed: int
    constraints: PlanConstraints
    segments: list[ClipSegment]
    percussion_track: PercussionTrack = Field(default_factory=PercussionTrack)


class RenderSettings(BaseModel):
    codec: str = "libx264"
    preset: str = "medium"
    crf: int = 18
    audio_codec: str = "aac"
    audio_bitrate: str = "192k"
    pixel_format: str = "yuv420p"
    output_fps: int = 30
    audio_crossfade_duration: float = 0.05


PRESETS: dict[str, Preset] = {
    "calm": Preset(name="calm", description="Slow, meditative pace with longer clips"),
    "chaotic": Preset(name="chaotic", description="Fast, energetic cuts with short clips"),
    "rapid": Preset(name="rapid", description="Very fast cuts, almost strobing"),
    "staccato": Preset(name="staccato", description="Quick punchy cuts with slight variation"),
    "pulsed": Preset(name="pulsed", description="Wave-like pacing that builds and releases"),
    "balanced": Preset(name="balanced", description="Default balanced settings"),
}
"""Pydantic models for SEEDREEL configuration, indexes, and plans."""

from datetime import datetime
from pathlib import Path
from typing import Literal

from pydantic import BaseModel, Field


class VideoMetadata(BaseModel):
    path: Path
    filename: str
    duration: float
    fps: float
    width: int
    height: int
    codec: str
    has_audio: bool
    file_size_bytes: int


class VideoIndex(BaseModel):
    created_at: datetime = Field(default_factory=datetime.now)
    source_folder: Path
    videos: list[VideoMetadata]


class Plan(BaseModel):
    created_at: datetime = Field(default_factory=datetime.now)
    seed: int
    constraints: PlanConstraints
    segments: list[ClipSegment]
    percussion_track: PercussionTrack = Field(default_factory=PercussionTrack)


class RenderSettings(BaseModel):
    codec: str = "libx264"
    preset: str = "medium"
    crf: int = 18
    audio_codec: str = "aac"
    audio_bitrate: str = "192k"
    pixel_format: str = "yuv420p"
    output_fps: int = 30
    audio_crossfade_duration: float = 0.05


PRESETS: dict[str, Preset] = {
    "calm": Preset(name="calm", description="Slow, meditative pace with longer clips"),
    "chaotic": Preset(name="chaotic", description="Fast, energetic cuts with short clips"),
    "rapid": Preset(name="rapid", description="Very fast cuts, almost strobing"),
    "staccato": Preset(name="staccato", description="Quick punchy cuts with slight variation"),
    "pulsed": Preset(name="pulsed", description="Wave-like pacing that builds and releases"),
    "balanced": Preset(name="balanced", description="Default balanced settings"),
}

Vibe Coding — A 3-Part Series / December 2025

VIBE-
CODING

a video-cutting CLI tool

This Isn't a Technical Post (On Purpose)

This project wasn't about engineering clever algorithms or inventing new tech. The point was recognizing that the pieces already existed and letting vibe-coding handle the glue.

What I brought

What I brought to the table wasn't deep implementation knowledge. It was direction.

FFmpeg can cut and re-encode video from the command line.

SuperCollider can generate audio procedurally from code.

Pedalboard makes real VST/AU audio effects accessible from Python.

Deterministic randomness—systems where chance exists, but outcomes are repeatable.

That was
enough.

Vibe-Coding as The Medium

Once those constraints were clear, vibe-coding took over the details.

I just needed to:

Point the system at a folder of videos

Decide outputs should be reproducible

Audio and video generated together

From there, the implementation naturally emerged. Metadata became an index. Randomness became seeded. Cuts became plans instead of impulses. The system started to feel intentional without being over-designed.

The core idea

Deterministic
Randomness
Is the Real Trick

The core idea I keep coming back to isn't FFmpeg or audio synthesis, or any other piece of tech it's deterministic randomness.

Same seed in.
Same result out.

That single constraint turns a generative tool into something you can:

Re-render

Tweak

Share

Build on

It's the difference between chaos and composition. The computer gets to explore, but you get to keep the results.

Tools,
Not Magic

None of this required bleeding-edge AI video or massive compute. It's mostly just:

1

Existing tools used together in a new way

2

Code written quickly, intuitively, and revised often

3

Letting taste and intention guide decisions instead of over-specification

Vibe-coding didn't replace thinking, it compressed the distance between idea and artifact.

Output

Two Videos.
Different Seeds.

Click/Tap to toggle mute

Output 01

Output 02

Two different videos, generated from the same source material with different seeds. Change the seed, get a different result but always reproducible.

Closing thought

This project exists because I recognized what was possible, not because I obsessed over how every part worked.

That's the real appeal of vibe-coding for me:

you bring the instincts, the references, and the constraints...
the code fills in the gaps.

Meta

SEEDREEL is a vibe-coded pipeline that turns a folder of clips into something intentional, repeatable, and shareable. It uses FFprobe to build an index, a seeded planner to generate an edit decision list, and FFmpeg to handle frame-accurate cuts and rendering. Audio can come from pre-baked tones or be generated on the fly via SuperCollider, with optional VST/AU effects through Spotify's Pedalboard (usually Valhalla Shimmer). Everything is reproducible: same seed, same output. The system creates a fast, playful loop for experimentation without losing control.

CLI arguments

SEEDREEL exposes a set of composable commands that mirror the pipeline: index → plan → render, plus a one-shot runner.

Core commands

  • seedreel index-videos <dir> scan clips and build a video index
    seedreel index-videos ./videos -o video_index.json
  • seedreel plan generate a deterministic edit plan (EDL)
    seedreel plan --seed 123 --num-clips 16 --harmonic --key G
  • seedreel render <plan.json> render a plan to final video
    seedreel render plan.json --fx --fx-preset TajMahal --fx-wetdry 0.3
  • seedreel run one-shot: index → plan → render
    seedreel run --seed 42 --preset calm --duration 30

Common arguments

  • --seed INT reproducible randomness
  • --num-clips INT / --duration FLOAT structure the edit
  • --preset {calm,chaotic,pulsed,balanced} pacing profiles
  • --harmonic / --no-harmonic, --key TEXT music system controls
  • --fx, --fx-preset TEXT, --fx-wetdry FLOAT audio effects

Examples

# Fast preview 
seedreel run --seed 7 --preset chaotic --duration 20
# Harmonic edit with effects 
seedreel run --seed 123 --harmonic --key G --fx --fx-preset DeepBlueDay

Vibe Coding — A 3-Part Series

This post is part of a short series documenting a year of vibe coding: small, exploratory projects built quickly and with a focus on fun, treating tools like LLMs, CLIs, and game data as creative materials rather than products.

  1. RogueLLMania: Running an LLM in Your Game Loop
    Procedural narration in a roguelike using a local model.
    Read it here
  2. Vibe-Coding a Video-Cutting CLI Tool
    FFmpeg, SuperCollider, and ambient automation.
    You are here
  3. A Warhammer 40K MCP Server
    Structured tabletop game knowledge for AI agents.
    Read it here

bradenleague.com

December 2025