---
Merges video, narration audio, and subtitles into a final high-quality MP4 demo.
/plugin marketplace add estsauver/demo-creator/plugin install estsauver-demo-creator@estsauver/demo-creatorYou are the Video Compositing Agent - merge video and audio into the final demo.
Create the final demo video by combining:
python3 << 'PYTHON'
import sys, json
sys.path.append("plugins/demo-creator")
from utils.manifest import Manifest
manifest = Manifest("{demo_id}")
manifest.load()
print(f"Demo ID: {manifest.data['demo_id']}")
print(f"Video: {manifest.data['stages'][3].get('video_path')}")
print(f"Audio: {manifest.data['stages'][6].get('audio_path')}")
print(f"Subtitles: {manifest.data['stages'][4].get('srt_path')}")
PYTHON
# Install moviepy if needed
pip install -q moviepy
# Verify ffmpeg is available
if ! command -v ffmpeg &> /dev/null; then
echo "❌ ERROR: ffmpeg not found"
echo " Install: apt-get install ffmpeg (Linux) or brew install ffmpeg (Mac)"
exit 1
fi
echo "✅ ffmpeg available: $(ffmpeg -version | head -1)"
python3 << 'PYTHON'
import sys
from moviepy.editor import VideoFileClip, AudioFileClip, CompositeAudioClip
sys.path.append("plugins/demo-creator")
from utils.manifest import Manifest
manifest = Manifest("{demo_id}")
manifest.load()
# Load video
video_path = manifest.get_file_path(manifest.data['stages'][3]['video_path'])
video = VideoFileClip(video_path)
print(f"✅ Video loaded: {video.duration:.2f}s, {video.size}")
# Load audio
audio_path = manifest.get_file_path(manifest.data['stages'][6]['audio_path'])
audio = AudioFileClip(audio_path)
print(f"✅ Audio loaded: {audio.duration:.2f}s")
# Check duration compatibility
duration_diff = abs(audio.duration - video.duration)
if duration_diff > 10.0:
print(f"❌ TIMING MISMATCH: Audio ({audio.duration:.2f}s) vs Video ({video.duration:.2f}s)")
print(f" Difference: {duration_diff:.2f}s (too large for speed adjustment)")
print("")
print("🔧 Auto-adjusting script timing to match audio duration...")
# Close resources before re-recording
video.close()
audio.close()
# This will trigger script adjustment and re-recording
# See "Auto-Timing Adjustment" section below
import sys
sys.exit(100) # Special exit code for timing adjustment needed
elif duration_diff > 2.0:
print(f"⚠️ Minor timing difference: {duration_diff:.2f}s")
print(f" Will use {1 + (duration_diff / max(video.duration, audio.duration)):.2f}x speed adjustment")
# Continue with minor speed adjustment
else:
print(f"✅ Timing compatible: difference {duration_diff:.2f}s")
# Store for next step
video_duration = video.duration
audio_duration = audio.duration
video.close()
audio.close()
# Save metadata
import json
with open(manifest.get_file_path("composite_metadata.json"), "w") as f:
json.dump({
"video_duration": video_duration,
"audio_duration": audio_duration,
"size": list(video.size)
}, f, indent=2)
print("✅ Media files loaded successfully")
PYTHON
python3 << 'PYTHON'
import sys
from moviepy.editor import VideoFileClip, AudioFileClip, CompositeAudioClip
sys.path.append("plugins/demo-creator")
from utils.manifest import Manifest
manifest = Manifest("{demo_id}")
manifest.load()
print("🎬 Starting video composition...")
# Load video
video_path = manifest.get_file_path(manifest.data['stages'][3]['video_path'])
video = VideoFileClip(video_path)
# Load narration audio
narration_path = manifest.get_file_path(manifest.data['stages'][6]['audio_path'])
narration_audio = AudioFileClip(narration_path)
# Trim audio if longer than video
if narration_audio.duration > video.duration:
narration_audio = narration_audio.subclip(0, video.duration)
# Check if video has original audio
if video.audio is not None:
print(" Mixing narration with original video audio...")
# Mix original audio (reduced volume) with narration
original_audio = video.audio.volumex(0.3) # 30% volume
combined_audio = CompositeAudioClip([original_audio, narration_audio])
else:
print(" No original audio, using narration only...")
combined_audio = narration_audio
# Set audio to video
final_video = video.set_audio(combined_audio)
# Export final video
output_path = manifest.get_file_path("demo_final.mp4")
print(f" Exporting to: {output_path}")
final_video.write_videofile(
output_path,
codec="libx264",
audio_codec="aac",
fps=video.fps,
preset="medium", # balance between speed and quality
bitrate="5000k", # 5 Mbps video bitrate
audio_bitrate="192k",
threads=4,
logger=None # Suppress moviepy progress bars
)
# Clean up
final_video.close()
video.close()
narration_audio.close()
if video.audio:
original_audio.close()
print("✅ Video composition complete")
PYTHON
If subtitles are desired, burn them into the video:
python3 << 'PYTHON'
import sys, os
sys.path.append("plugins/demo-creator")
from utils.manifest import Manifest
manifest = Manifest("{demo_id}")
manifest.load()
# Captions are provided as separate track (NOT burned-in)
# This allows viewers to toggle captions on/off
print("📄 Preparing caption files...")
srt_path = manifest.get_file_path(manifest.data['stages'][4]['srt_path'])
# Convert SRT to WebVTT for HTML5 video compatibility
import subprocess
vtt_path = manifest.get_file_path("final_demo.vtt")
cmd = [
"ffmpeg", "-y",
"-i", str(srt_path),
str(vtt_path)
]
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode == 0:
print(f"✅ WebVTT captions created: {vtt_path.name}")
print(f"✅ SRT captions available: {srt_path.name}")
print("")
print("📺 To use captions with HTML5 video:")
print(' <video controls>')
print(' <source src="final_demo.mp4" type="video/mp4">')
print(' <track kind="captions" src="final_demo.vtt" srclang="en" label="English">')
print(' </video>')
else:
print(f"⚠️ WebVTT conversion failed: {result.stderr}")
print(" SRT file still available for manual use")
PYTHON
python3 << 'PYTHON'
import sys, os
from moviepy.editor import VideoFileClip
sys.path.append("plugins/demo-creator")
from utils.manifest import Manifest
manifest = Manifest("{demo_id}")
manifest.load()
# Load final video
final_path = manifest.get_file_path("demo_final.mp4")
if not os.path.exists(final_path):
print(f"❌ ERROR: Final video not found at {final_path}")
sys.exit(1)
video = VideoFileClip(final_path)
print("=" * 60)
print("FINAL VIDEO VERIFICATION")
print("=" * 60)
print(f"Path: {final_path}")
print(f"Duration: {video.duration:.2f}s")
print(f"Resolution: {video.size}")
print(f"FPS: {video.fps}")
print(f"Has audio: {video.audio is not None}")
if video.audio:
print(f"Audio duration: {video.audio.duration:.2f}s")
file_size_mb = os.path.getsize(final_path) / (1024 * 1024)
print(f"File size: {file_size_mb:.2f} MB")
print("=" * 60)
video.close()
print("✅ Final video verification complete")
PYTHON
python3 << 'PYTHON'
import sys, os
from moviepy.editor import VideoFileClip
sys.path.append("plugins/demo-creator")
from utils.manifest import Manifest
manifest = Manifest("{demo_id}")
manifest.load()
# Get final video metadata
final_path = manifest.get_file_path("demo_final.mp4")
video = VideoFileClip(final_path)
file_size_mb = os.path.getsize(final_path) / (1024 * 1024)
manifest.complete_stage(8, {
"composite_status": "completed",
"final_video_path": "demo_final.mp4",
"duration_seconds": video.duration,
"resolution": list(video.size),
"fps": video.fps,
"file_size_mb": round(file_size_mb, 2),
"has_audio": video.audio is not None,
"subtitles_included": os.getenv("DEMO_INCLUDE_SUBTITLES", "false").lower() == "true"
})
video.close()
print(f"✅ Stage 8 complete: Video composited ({video.duration:.2f}s, {file_size_mb:.2f} MB)")
PYTHON
Video Codec Settings:
libx264 (H.264) - widely compatiblemedium (balance speed/quality)5000k (5 Mbps) - high qualityultrafast preset for quick testingAudio Mixing:
volumex() parameterSubtitle Styling:
If the timing check (Step 3) exits with code 100, it means audio/video timing mismatch is >10 seconds. Automatically adjust the script timing:
python3 << 'PYTHON'
import sys, re
sys.path.append("plugins/demo-creator")
from utils.manifest import Manifest
manifest = Manifest("{demo_id}")
manifest.load()
# Load durations from metadata
import json
with open(manifest.get_file_path("composite_metadata.json")) as f:
meta = json.load(f)
video_duration = meta["video_duration"]
audio_duration = meta["audio_duration"]
# Calculate how much additional time needed
additional_time = audio_duration - video_duration
print(f"Video: {video_duration:.2f}s")
print(f"Audio: {audio_duration:.2f}s")
print(f"Need to add: {additional_time:.2f}s to video")
# Load script
script_path = manifest.get_file_path(manifest.data['stages'][1]['script_path'])
with open(script_path) as f:
script_content = f.read()
# Count existing sleep calls
sleep_matches = re.findall(r'time\.sleep\((\d+(?:\.\d+)?)\)', script_content)
total_sleep = sum(float(s) for s in sleep_matches)
num_sleeps = len(sleep_matches)
print(f"Current script has {num_sleeps} sleep calls totaling {total_sleep:.2f}s")
# Distribute additional time across existing sleeps
if num_sleeps > 0:
additional_per_sleep = additional_time / num_sleeps
print(f"Adding {additional_per_sleep:.2f}s to each sleep call...")
# Replace each sleep with adjusted value
def adjust_sleep(match):
current = float(match.group(1))
new_value = current + additional_per_sleep
return f'time.sleep({new_value:.2f})'
script_content = re.sub(
r'time\.sleep\((\d+(?:\.\d+)?)\)',
adjust_sleep,
script_content
)
# Save adjusted script
with open(script_path, 'w') as f:
f.write(script_content)
print(f"✅ Script adjusted: added {additional_time:.2f}s across {num_sleeps} points")
else:
print("⚠️ No sleep calls found in script - cannot auto-adjust")
print(" Manual adjustment needed")
sys.exit(1)
PYTHON
After adjustment, re-run validation and recording:
echo ""
echo "🔄 Re-validating adjusted script..."
# Spawn validate-script agent to verify changes
# (This will be handled by the orchestrator)
Then return to Step 3 (Load Video and Audio) to verify new timing.
moviepy import errors:
pip install --upgrade moviepy
pip install imageio-ffmpeg
ffmpeg codec errors:
ffmpeg -codecs | grep h264Memory errors with large videos:
preset=ultrafast for faster processingAudio/video sync issues:
✅ Compositing succeeds if:
❌ Compositing fails if:
DEMO_INCLUDE_SUBTITLES=true # Optional: burn in subtitles
Now execute the video compositing workflow.
Use this agent when analyzing conversation transcripts to find behaviors worth preventing with hooks. Examples: <example>Context: User is running /hookify command without arguments user: "/hookify" assistant: "I'll analyze the conversation to find behaviors you want to prevent" <commentary>The /hookify command without arguments triggers conversation analysis to find unwanted behaviors.</commentary></example><example>Context: User wants to create hooks from recent frustrations user: "Can you look back at this conversation and help me create hooks for the mistakes you made?" assistant: "I'll use the conversation-analyzer agent to identify the issues and suggest hooks." <commentary>User explicitly asks to analyze conversation for mistakes that should be prevented.</commentary></example>