Document the background transformer a bit better

This commit is contained in:
Robin
2025-04-28 15:59:32 +02:00
parent e88e0b1901
commit 02ce1f701a

View File

@@ -21,8 +21,16 @@ interface WasmFileset {
wasmBinaryPath: string;
}
// n.b. this only includes the SIMD versions of the WASM files which have good support:
// https://caniuse.com/?search=simd
// The MediaPipe package, by default, ships some alternative versions of the
// WASM files which avoid SIMD for compatibility with older browsers. But SIMD
// in WASM is actually fine by our support policy, so we include just the SIMD
// versions.
// It's really not ideal that we have to reference these internal files from
// MediaPipe and depend on node_modules having this specific structure. It's
// easy to see this breaking if our dependencies changed and MediaPipe were
// no longer hoisted, or if we switched to another dependency loader such as
// Yarn PnP.
// https://github.com/google-ai-edge/mediapipe/issues/5961
const wasmFileset: WasmFileset = {
wasmLoaderPath: new URL(
"../../node_modules/@mediapipe/tasks-vision/wasm/vision_wasm_internal.js",
@@ -34,12 +42,20 @@ const wasmFileset: WasmFileset = {
).href,
};
/**
* Track processor that applies effects such as blurring to a user's background.
*
* This is just like LiveKit's prebuilt BackgroundTransformer except that it
* loads the segmentation models from our own bundle rather than as an external
* resource fetched from the public internet.
*/
export class BlurBackgroundTransformer extends BackgroundTransformer {
public async init({
outputCanvas,
inputElement: inputVideo,
}: VideoTransformerInitOptions): Promise<void> {
// call super.super.init()
// Call super.super.init() since we're totally replacing the init method of
// BackgroundTransformer here, rather than extending it
await VideoTransformer.prototype.init.call(this, {
outputCanvas,
inputElement: inputVideo,