Recording

Record your E2E tests.
Watch them back.

Capture browser tests as MP4 video with live panel overlays. Minimal dependencies, easy install. SDKs for Python, Ruby, Go, TypeScript, Node, and Java. Generate interactive HTML reports where clicking a step seeks the video to that exact moment.

from thea import RecorderClient client = RecorderClient("http://localhost:9123") client.start_display() with client.recording("login_test"): # run your browser test pass client.download_recording("login_test", "login_test.mp4")
client = Recorder::Client.new("http://localhost:9123") client.start_display client.recording("login_test") do # run your browser test end client.download_recording("login_test", "login_test.mp4")
client := recorder.NewClient("http://localhost:9123") client.StartDisplay(ctx) stop, _ := client.Recording(ctx, "login_test") defer stop() // run your browser test client.DownloadRecordingToFile(ctx, "login_test", "login_test.mp4")
import { RecorderClient } from "thea-recorder"; const client: RecorderClient = new RecorderClient({ url: "http://localhost:9123", }); await client.startDisplay(); await client.recording("login_test", async () => { // run your Playwright test }); await client.downloadRecordingToFile("login_test", "login_test.mp4");
const { RecorderClient } = require("thea-recorder"); const client = new RecorderClient({ url: "http://localhost:9123" }); await client.startDisplay(); await client.recording("login_test", async () => { // run your browser test }); await client.downloadRecordingToFile("login_test", "login_test.mp4");
# start display and record recorder start-display recorder start-recording --name login_test # run your test, then stop and download recorder stop-recording recorder download --name login_test -o login_test.mp4
# start display and record curl -X POST http://localhost:9123/display/start curl -X POST http://localhost:9123/recording/start \ -H "Content-Type: application/json" \ -d '{"name": "login_test"}' # run your test, then stop and download curl -X POST http://localhost:9123/recording/stop curl -o login_test.mp4 http://localhost:9123/recordings/login_test
pip install thea-recorder

E2E failures are a black box

Your browser was right there doing the thing. You just weren't watching.

Without recorder

The debugging loop from hell

A test fails in CI. You read the stack trace. You try to reproduce locally. You can't. You add more logging, push, wait 20 minutes for CI, read logs again. You do this three more times before you find the actual problem: a modal was covering the button.

1. Read the failure log
2. Try to reproduce locally
3. Add more logging
4. Push, wait for CI, read logs again
5. goto 3
With recorder

You stop guessing. You start watching.

Every scenario is recorded as MP4 video with a live panel overlay showing test status, current step, and any context you want. The HTML report lets you click a step and the video seeks to that exact moment. You see the modal covering the button in five seconds.

1. Open report.html
2. Click the failed step
3. Watch what happened
4. Fix it

Everything you need.
Nothing you don't.

Minimal dependencies, easy install.

MP4 video capture

Records the Xvfb virtual display via ffmpeg with H.264 encoding. One video per scenario. Mobile-compatible MP4 output.

Panel overlay system

Named columns below the browser viewport with live-updating text. Fixed or auto-width layout. Smart scrolling keeps the active line visible.

📖

Interactive reports

Single-page HTML with embedded video players. Click any step to seek the video. Playback highlights the current step automatically.

🔌

Framework agnostic

Works with Behave, pytest-bdd, Cucumber, or any runner. The report takes a simple list of dicts — no framework coupling.

📦

Zero dependencies

Pure Python stdlib. No pip install chain. No version conflicts. Just add it to your Docker image and go.

🔨

Docker ready

Example Dockerfile included. Works with any CI system that supports Docker. Just mount a volume for the recordings.

HTTP server + native SDKs

One server process. Any language. The recorder runs as a service that your test suite talks to via HTTP.

Your code Go · Py · Ruby · TS · Node Test Suite Your App browser on :99 drives Recorder manages Recorder Server thea serve :9123 HTTP Xvfb + ffmpeg virtual display :99 renders on MP4 + HTML report.html produces download via HTTP API

Record tests in any language.
Under 10 lines of code.

SDKs for Python, Ruby, Go, TypeScript, Node, and Java. Minimal dependencies, easy install. Same API everywhere.

from thea import RecorderClient client = RecorderClient("http://localhost:9123") client.wait_until_ready() client.start_display() with client.recording("login_test"): # run your browser test pass client.download_recording("login_test", "login_test.mp4")
require "recorder" client = Recorder::Client.new("http://localhost:9123") client.wait_until_ready client.start_display client.recording("login_test") do # run your browser test end client.download_recording("login_test", "login_test.mp4")
client := recorder.NewClient("http://localhost:9123") client.WaitUntilReady(ctx, 30*time.Second) client.StartDisplay(ctx) stop, _ := client.Recording(ctx, "login_test") defer stop() // run your browser test client.DownloadRecordingToFile(ctx, "login_test", "login_test.mp4")
import { RecorderClient } from "thea-recorder"; const client: RecorderClient = new RecorderClient({ url: "http://localhost:9123", }); await client.waitUntilReady(); await client.startDisplay(); await client.recording("login_test", async () => { // run your Playwright test }); await client.downloadRecordingToFile("login_test", "login_test.mp4");
const { RecorderClient } = require("thea-recorder"); const client = new RecorderClient({ url: "http://localhost:9123" }); await client.waitUntilReady(); await client.startDisplay(); await client.recording("login_test", async () => { // run your browser test }); await client.downloadRecordingToFile("login_test", "login_test.mp4");
# start display and record recorder start-display recorder start-recording --name login_test # run your test, then stop and download recorder stop-recording recorder download --name login_test -o login_test.mp4
# start display and record curl -X POST http://localhost:9123/display/start curl -X POST http://localhost:9123/recording/start \ -H "Content-Type: application/json" \ -d '{"name": "login_test"}' # run your test, then stop and download curl -X POST http://localhost:9123/recording/stop curl -o login_test.mp4 http://localhost:9123/recordings/login_test

Drop-in. Five minutes.

Start the server, connect from any language, or use the CLI directly from bash.

# features/environment.py — drop-in Behave integration from recorder import Recorder, generate_report def before_all(context): recorder = Recorder(output_dir="/app/recordings", display=99) recorder.add_panel("status", title="Status", width=120) recorder.add_panel("scenario", title="Scenario") recorder.start_display() context.recorder = recorder context.recorded_videos = [] def before_scenario(context, scenario): context.recorder.start_recording(scenario.name) context.recorder.update_panel("status", "Running") def after_scenario(context, scenario): video = context.recorder.stop_recording() context.recorded_videos.append({ "feature": scenario.feature.name, "scenario": scenario.name, "status": scenario.status.name, "video": video, }) def after_all(context): context.recorder.cleanup() generate_report(context.recorded_videos, title="E2E Test Report")
# Dockerfile FROM python:3.12-slim RUN apt-get update && apt-get install -qyy --no-install-recommends \ chromium-driver xvfb ffmpeg x11-xserver-utils \ fonts-dejavu-core \ && rm -rf /var/lib/apt/lists/* RUN pip install thea-recorder behave selenium COPY features/ /app/features/ WORKDIR /app ENTRYPOINT ["behave", "--no-capture"]
# Build and run $ docker build -t my-e2e-tests . $ docker run --shm-size=2g \ -v $(pwd)/recordings:/app/recordings \ my-e2e-tests # Open the report $ open recordings/report.html # That's it. Every scenario is an MP4. # The report has clickable step timelines. # You never stare at a stack trace again.

Reports that tell the whole story

Click a step. Watch what happened. Dark-themed, responsive, single-file HTML.

recordings/report.html
E2E Test Report
Automated test recordings
8
Scenarios
7
Passed
1
Failed
Invoice Management PASS
REC
app.ledgerco.dev/invoices
Invoices
+ New Invoice
INV #ClientAmountStatus
INV-041Acme Corp$12,400PAID
INV-042Globex Inc$8,750DUE
INV-043Initech$3,200PAID
Status
Running
Steps
  Given 3 invoices exist
  When I open the invoice list
* Then I see all invoices
0:00 Given 3 invoices exist
0:02 When I open the invoice list
0:04 Then I see all invoices
0:06 And totals are correct
Tax Submission FAIL
app.ledgerco.dev/tax/submit
Submit BAS Return
Period
Q4 2025
GST collected
$14,280.00
GST credits
$6,140.00
Submit to ATO
Error: ABN validation failed — gateway timeout
Status
FAILED
Steps
  Given Q4 tax data
  When I submit the return
* Then I see a receipt
0:00 Given Q4 tax data
0:04 When I submit the return
0:09 Then I see a receipt

Passing tests are documentation too

Recordings of green tests are living proof that your features actually work, updated every time your suite runs.

Share with QA

QA can communicate issues effectively by sharing documented video evidence of both successes and failures. No need to reproduce bugs or explain what happened — the recording shows exactly what the browser did.

Onboard new developers

New team members can watch the test suite to understand what the app does. It's a demo reel that's always current, generated automatically from the test run.

Prove it to stakeholders

Product managers want to see features working, not test output. Recordings are proof that the sprint deliverables actually function in a real browser.

Audit and compliance

Some industries require evidence that testing was performed. Video recordings with timestamped steps are a far stronger artefact than a JUnit XML report.

Scripted product demos

You don't need a test framework. Write a Python script that drives a browser, narrates each scene in the overlay panel, and produces a polished MP4. Ship a fresh demo video with every release — no presenter required.

Parallel user simulation

One server manages multiple independent sessions. Pretend to be Alice, Bob, and Carol at the same time: each gets their own virtual display, their own recording, and their own panel overlay — driven by threads or separate processes from a single script.

One server. Multiple sessions.

A single thea serve process manages any number of concurrent recording sessions. Each session gets its own Xvfb display, its own ffmpeg process, and its own panel overlay — completely isolated from every other session.

import threading from thea import RecorderClient def user_session(user_id): client = RecorderClient("http://localhost:9123") # Each session gets its own Xvfb display, auto-allocated client.create_session(f"user_{user_id}") client.use_session(f"user_{user_id}") client.start_display() client.add_panel("status", title="Status") with client.recording(f"user_{user_id}_checkout") as result: # drive this user's browser independently… client.update_panel("status", "Checking out") print(result.path, result.elapsed) client.delete_session(f"user_{user_id}") threads = [threading.Thread(target=user_session, args=(i,)) for i in [1, 2, 3]] for t in threads: t.start() for t in threads: t.join()
from thea import RecorderClient client = RecorderClient("http://localhost:9123") client.wait_until_ready(timeout=30) client.start_display() client.add_panel("scene", title="Scene", width=260) client.add_panel("action", title="Action") def narrate(scene, action): client.update_panel("scene", scene) client.update_panel("action", action) with client.recording("product_demo_v2") as result: narrate("Login", "Navigating to the login page") # driver.get("https://app.example.com/login") narrate("Login", "Entering credentials") narrate("Dashboard", "Key metrics at a glance") narrate("Reports", "Exporting monthly PDF…") print(f"Demo: {result.path} ({result.elapsed:.1f}s)") client.cleanup()
# Create two independent sessions curl -X POST http://localhost:9123/sessions -d '{"name":"alice"}' # → {"name":"alice","display":100,"url_prefix":"/sessions/alice"} curl -X POST http://localhost:9123/sessions -d '{"name":"bob"}' # → {"name":"bob","display":101,"url_prefix":"/sessions/bob"} # Each session has its own Xvfb, panels, and recording curl -X POST http://localhost:9123/sessions/alice/display/start curl -X POST http://localhost:9123/sessions/bob/display/start curl -X POST http://localhost:9123/sessions/alice/recording/start \ -d '{"name":"alice_checkout"}' curl -X POST http://localhost:9123/sessions/bob/recording/start \ -d '{"name":"bob_checkout"}' # … both browsers record independently … curl -X POST http://localhost:9123/sessions/alice/recording/stop curl -X POST http://localhost:9123/sessions/bob/recording/stop curl -X DELETE http://localhost:9123/sessions/alice curl -X DELETE http://localhost:9123/sessions/bob