Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions bun.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions packages/producer/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@
"docker:test": "docker run --rm --security-opt seccomp=unconfined --shm-size=2g -v ./tests:/app/packages/producer/tests hyperframes-producer:test",
"docker:test:update": "docker run --rm --security-opt seccomp=unconfined --shm-size=2g -v ./tests:/app/packages/producer/tests hyperframes-producer:test --update",
"docker:test:distributed": "docker run --rm --security-opt seccomp=unconfined --shm-size=2g -v ./tests:/app/packages/producer/tests hyperframes-producer:test --mode=distributed-simulated",
"test:lambda-local": "tsx src/regression-harness.ts --exclude-tags transparency --mode=lambda-local",
"docker:test:lambda-local": "docker run --rm --security-opt seccomp=unconfined --shm-size=2g -v ./tests:/app/packages/producer/tests hyperframes-producer:test --mode=lambda-local",
"prepublishOnly": "echo skip"
},
"dependencies": {
Expand Down Expand Up @@ -81,6 +83,7 @@
"@fontsource/poppins": "^5.2.7",
"@fontsource/roboto": "^5.2.10",
"@fontsource/source-code-pro": "^5.2.7",
"@hyperframes/aws-lambda": "workspace:*",
"@types/node": "^25.0.10",
"@webgpu/types": "^0.1.69",
"esbuild": "^0.25.12",
Expand Down
16 changes: 16 additions & 0 deletions packages/producer/src/regression-harness-distributed.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ describe("parseHarnessModeFlag()", () => {
expect(parseHarnessModeFlag("--mode=distributed-simulated")).toBe("distributed-simulated");
});

it("parses --mode=lambda-local", () => {
expect(parseHarnessModeFlag("--mode=lambda-local")).toBe("lambda-local");
});

it("returns null for tokens that aren't --mode", () => {
expect(parseHarnessModeFlag("--update")).toBeNull();
expect(parseHarnessModeFlag("font-variant-numeric")).toBeNull();
Expand All @@ -28,6 +32,11 @@ describe("parseHarnessModeFlag()", () => {
expect(() => parseHarnessModeFlag("--mode=foo")).toThrow(/--mode must be/);
expect(() => parseHarnessModeFlag("--mode=")).toThrow(/--mode must be/);
});

it("error message lists all three accepted modes", () => {
expect(() => parseHarnessModeFlag("--mode=foo")).toThrow(/lambda-local/);
expect(() => parseHarnessModeFlag("--mode=foo")).toThrow(/distributed-simulated/);
});
});

describe("checkDistributedSupport()", () => {
Expand Down Expand Up @@ -113,6 +122,13 @@ describe("resolveMinPsnrForMode()", () => {
);
});

it("lambda-local mirrors distributed-simulated's pathology floor", () => {
// Both non-in-process modes run through the same producer primitives,
// so they share the same pathology threshold.
expect(resolveMinPsnrForMode("lambda-local", 30)).toBe(30);
expect(resolveMinPsnrForMode("lambda-local", 0)).toBe(DISTRIBUTED_SIMULATED_MIN_PSNR_DB);
});

it("every committed fixture authors a minPsnr above the absolute floor", async () => {
// The pathology floor only fires for a fixture whose authored minPsnr
// is below it — by design that should be no committed fixture. If
Expand Down
21 changes: 18 additions & 3 deletions packages/producer/src/regression-harness-distributed.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,20 @@ import { join } from "node:path";
import type { Fps } from "@hyperframes/core";
import { assemble, plan, renderChunk } from "./distributed.js";

/** Two-mode contract that backs `--mode=<value>` on the regression harness CLI. */
export type HarnessMode = "in-process" | "distributed-simulated";
/**
* Three-mode contract that backs `--mode=<value>` on the regression
* harness CLI:
*
* - `in-process` — `executeRenderJob`, the same path the CLI takes.
* - `distributed-simulated` — `plan` → `renderChunk` × N → `assemble`
* in-process. No adapter (no Temporal, no Lambda).
* - `lambda-local` — drives the OSS `@hyperframes/aws-lambda` handler
* dispatch through a filesystem-backed fake S3, so every event
* shape SFN sends in production also lands here. Catches regressions
* in event JSON / S3 path conventions without paying for a real AWS
* round-trip.
*/
export type HarnessMode = "in-process" | "distributed-simulated" | "lambda-local";

/**
* Absolute pathology floor for `--mode=distributed-simulated` — catches
Expand Down Expand Up @@ -207,6 +219,8 @@ export async function runDistributedSimulatedRender(
*/
export function resolveMinPsnrForMode(mode: HarnessMode, fixtureMinPsnr: number): number {
if (mode === "in-process") return fixtureMinPsnr;
// `lambda-local` shares the distributed-simulated pathology floor —
// both modes go through the same plan/renderChunk/assemble primitives.
return Math.max(fixtureMinPsnr, DISTRIBUTED_SIMULATED_MIN_PSNR_DB);
}

Expand All @@ -220,10 +234,11 @@ export function resolveMinPsnrForMode(mode: HarnessMode, fixtureMinPsnr: number)
export function parseHarnessModeFlag(token: string): HarnessMode | null {
if (token === "--mode=in-process") return "in-process";
if (token === "--mode=distributed-simulated") return "distributed-simulated";
if (token === "--mode=lambda-local") return "lambda-local";
if (token.startsWith("--mode=")) {
const value = token.slice("--mode=".length);
throw new Error(
`regression-harness: --mode must be 'in-process' or 'distributed-simulated' (got ${JSON.stringify(value)})`,
`regression-harness: --mode must be 'in-process', 'distributed-simulated', or 'lambda-local' (got ${JSON.stringify(value)})`,
);
}
return null;
Expand Down
227 changes: 227 additions & 0 deletions packages/producer/src/regression-harness-lambda-local.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,227 @@
/**
* Lambda-local render path for the regression harness.
*
* Drives the OSS `@hyperframes/aws-lambda` handler through the exact
* sequence Step Functions invokes in production:
*
* handler({ Action: "plan" }) → planDir tarball on S3
* handler({ Action: "renderChunk" }) × N → chunk artifacts on S3
* handler({ Action: "assemble" }) → final mp4 / mov / png-seq
*
* The S3 client is a filesystem-backed fake: every `s3://test-bucket/<key>`
* URI maps to `<tempRoot>/s3/<key>`. This means the harness exercises
* the handler's event-parsing + tar / S3 layout + dispatch logic in
* addition to the underlying producer primitives, catching regressions
* (event JSON drift, S3 key conventions, plan-hash boundary checks)
* that `distributed-simulated` mode wouldn't.
*
* `lambda-local` is **deliberately** not a Docker / RIE invocation —
* that would gate the producer test suite on Docker-in-Docker support
* which most CI runners lack. Real-ZIP-via-RIE tests live in
* `packages/aws-lambda/scripts/` (`probe:beginframe`) and the
* maintainer-run `smoke.sh`.
*/

import {
createWriteStream,
existsSync,
mkdirSync,
readFileSync,
statSync,
writeFileSync,
} from "node:fs";
import { dirname, join } from "node:path";
import { pipeline } from "node:stream/promises";
import { Readable } from "node:stream";
import { downloadS3ObjectToFile, tarDirectory, untarDirectory } from "@hyperframes/aws-lambda";
import { handler } from "@hyperframes/aws-lambda/handler";
import type {
AssembleEvent,
AssembleLambdaResult,
HandlerDeps,
PlanEvent,
PlanLambdaResult,
RenderChunkEvent,
RenderChunkLambdaResult,
SerializableDistributedRenderConfig,
} from "@hyperframes/aws-lambda";

/** Inputs for {@link runLambdaLocalRender}. Same contract as `runDistributedSimulatedRender`. */
export interface RunLambdaLocalInput {
projectDir: string;
tempRoot: string;
renderedOutputPath: string;
fps: 24 | 30 | 60;
/**
* Width/height from the fixture's renderConfig. Forwarded directly to
* the Lambda event so this mode catches drift if the handler ever
* starts honouring `Config.width/height` for canvas sizing rather
* than reading the composition's `data-width`/`data-height`. The
* `distributed-simulated` mode hardcodes 1920×1080 because it
* bypasses the event-serialization boundary; lambda-local goes
* through it, which is the whole point.
*/
width: number;
height: number;
format: "mp4" | "mov" | "png-sequence";
codec?: "h264" | "h265";
chunkSize?: number;
maxParallelChunks?: number;
variables?: Record<string, unknown>;
}

const FAKE_BUCKET = "harness-lambda-local";

/** S3 URI helpers — keep the URI shape identical to what SFN uses in production. */
function uri(key: string): string {
return `s3://${FAKE_BUCKET}/${key}`;
}

/**
* Run plan → renderChunk × N → assemble through the OSS handler with a
* filesystem-backed fake S3. Output lands at `input.renderedOutputPath`.
*/
export async function runLambdaLocalRender(input: RunLambdaLocalInput): Promise<void> {
const s3Root = join(input.tempRoot, "s3");
mkdirSync(s3Root, { recursive: true });

// STEP 0: stage the project as a tar.gz at the fake-S3 path the Plan
// event will reference, mirroring what `deploySite` does in prod.
const projectKey = `sites/harness/${Date.now()}/project.tar.gz`;
const projectS3Path = join(s3Root, projectKey);
mkdirSync(dirname(projectS3Path), { recursive: true });
await tarDirectory(input.projectDir, projectS3Path);

const fakeS3 = new FilesystemBackedFakeS3(s3Root);
const deps: HandlerDeps = {
s3: fakeS3 as unknown as HandlerDeps["s3"],
// The handler resolves a Chrome path via `@sparticuz/chromium` by
// default; that's the Lambda-specific binary. In Dockerfile.test
// we want the producer's already-configured Chrome instead. The
// skip flag tells the handler not to override PRODUCER_HEADLESS_SHELL_PATH.
skipChromeResolution: true,
tmpRoot: join(input.tempRoot, "lambda-tmp"),
};
mkdirSync(deps.tmpRoot as string, { recursive: true });

const config: SerializableDistributedRenderConfig = {
fps: input.fps,
width: input.width,
height: input.height,
format: input.format,
...(input.format === "mp4" && input.codec !== undefined ? { codec: input.codec } : {}),
chunkSize: input.chunkSize,
maxParallelChunks: input.maxParallelChunks,
hdrMode: "force-sdr",
};

// STEP A: plan
const planPrefix = `renders/harness/${Date.now()}/`;
const planEvent: PlanEvent = {
Action: "plan",
ProjectS3Uri: uri(projectKey),
PlanOutputS3Prefix: uri(planPrefix),
Config: config,
};
const planResult = (await handler(planEvent, deps)) as PlanLambdaResult;

// STEP B: render every chunk through the handler.
const chunkUris: string[] = [];
for (let i = 0; i < planResult.ChunkCount; i++) {
const chunkEvent: RenderChunkEvent = {
Action: "renderChunk",
PlanS3Uri: planResult.PlanS3Uri,
PlanHash: planResult.PlanHash,
ChunkIndex: i,
ChunkOutputS3Prefix: uri(planPrefix),
Format: input.format,
};
const chunkResult = (await handler(chunkEvent, deps)) as RenderChunkLambdaResult;
chunkUris.push(chunkResult.ChunkS3Uri);
}

// STEP C: assemble
const finalUri = uri(
`${planPrefix}output${input.format === "png-sequence" ? ".tar.gz" : `.${input.format}`}`,
);
const assembleEvent: AssembleEvent = {
Action: "assemble",
PlanS3Uri: planResult.PlanS3Uri,
ChunkS3Uris: chunkUris,
AudioS3Uri: planResult.AudioS3Uri,
OutputS3Uri: finalUri,
Format: input.format,
};
(await handler(assembleEvent, deps)) as AssembleLambdaResult;

// Copy the final output from fake-S3 land back out to the path the
// harness expects. For png-sequence, untar into the dir.
const finalKey = finalUri.slice(`s3://${FAKE_BUCKET}/`.length);
if (input.format === "png-sequence") {
const tarPath = join(s3Root, finalKey);
mkdirSync(input.renderedOutputPath, { recursive: true });
await untarDirectory(tarPath, input.renderedOutputPath);
} else {
await downloadS3ObjectToFile(
fakeS3 as unknown as Parameters<typeof downloadS3ObjectToFile>[0],
finalUri,
input.renderedOutputPath,
);
}
}

/**
* Minimum AWS-SDK-shaped fake S3 the handler's `send(GetObject)` and
* `send(PutObject)` calls land in. Stores blobs on the local filesystem
* under `root/<key>` so the harness can pre-stage inputs (tarball'd
* project) and post-inspect outputs (per-chunk artifacts, final video)
* without going through a real S3 endpoint.
*/
class FilesystemBackedFakeS3 {
constructor(private readonly root: string) {}

async send(command: unknown): Promise<unknown> {
const cmdName = (command as { constructor: { name: string } }).constructor.name;
const input = (command as { input: { Bucket: string; Key: string; Body?: unknown } }).input;
const fsPath = join(this.root, input.Key);

if (cmdName === "GetObjectCommand") {
if (!existsSync(fsPath)) {
const err = new Error(
`FakeS3: GetObject for missing key ${input.Bucket}/${input.Key}`,
) as Error & {
$metadata: { httpStatusCode: number };
};
err.$metadata = { httpStatusCode: 404 };
throw err;
}
const bytes = readFileSync(fsPath);
return { Body: Readable.from([bytes]) };
}
if (cmdName === "PutObjectCommand") {
mkdirSync(dirname(fsPath), { recursive: true });
const body = input.Body;
if (body instanceof Buffer || typeof body === "string") {
writeFileSync(fsPath, body);
} else if (body && typeof (body as NodeJS.ReadableStream).pipe === "function") {
await pipeline(body as NodeJS.ReadableStream, createWriteStream(fsPath));
} else {
throw new Error(`FakeS3: PutObject body shape not supported (${typeof body})`);
}
return { ETag: `"fake-${statSync(fsPath).size}"` };
}
if (cmdName === "HeadObjectCommand") {
if (!existsSync(fsPath)) {
const err = new Error(
`FakeS3: HeadObject for missing key ${input.Bucket}/${input.Key}`,
) as Error & {
$metadata: { httpStatusCode: number };
};
err.$metadata = { httpStatusCode: 404 };
throw err;
}
return { ContentLength: statSync(fsPath).size, LastModified: new Date() };
}
throw new Error(`FakeS3: unexpected command ${cmdName}`);
}
}
Loading
Loading