Merge pull request #2812 from element-hq/toger5/track-processor-blur

Add background blur feature for supported devices
This commit is contained in:
Robin
2025-04-29 22:25:02 +02:00
committed by GitHub
15 changed files with 1328 additions and 85 deletions

View File

@@ -164,6 +164,9 @@
"effect_volume_description": "Adjust the volume at which reactions and hand raised effects play.",
"effect_volume_label": "Sound effect volume"
},
"background_blur_header": "Background",
"background_blur_label": "Blur the background of the video",
"blur_not_supported_by_browser": "(Background blur is not supported by this device.)",
"developer_tab_title": "Developer",
"devices": {
"camera": "Camera",

View File

@@ -40,6 +40,8 @@
"@livekit/components-core": "^0.12.0",
"@livekit/components-react": "^2.0.0",
"@livekit/protocol": "^1.33.0",
"@livekit/track-processors": "^0.5.5",
"@mediapipe/tasks-vision": "^0.10.18",
"@opentelemetry/api": "^1.4.0",
"@opentelemetry/core": "^1.25.1",
"@opentelemetry/exporter-trace-otlp-http": "^0.57.0",
@@ -70,8 +72,8 @@
"@types/react-dom": "^18.3.0",
"@types/sdp-transform": "^2.4.5",
"@types/uuid": "10",
"@typescript-eslint/eslint-plugin": "^8.0.0",
"@typescript-eslint/parser": "^8.0.0",
"@typescript-eslint/eslint-plugin": "^8.31.0",
"@typescript-eslint/parser": "^8.31.0",
"@use-gesture/react": "^10.2.11",
"@vector-im/compound-design-tokens": "^3.0.0",
"@vector-im/compound-web": "^7.2.0",
@@ -118,7 +120,7 @@
"react-use-measure": "^2.1.1",
"rxjs": "^7.8.1",
"sass": "^1.42.1",
"typescript": "^5.1.6",
"typescript": "^5.8.3",
"typescript-eslint-language-service": "^5.0.5",
"unique-names-generator": "^4.6.0",
"vaul": "^1.0.0",
@@ -131,6 +133,7 @@
},
"resolutions": {
"@livekit/components-core/rxjs": "^7.8.1",
"@livekit/track-processors/@mediapipe/tasks-vision": "^0.10.18",
"matrix-widget-api": "1.11.0"
},
"packageManager": "yarn@4.7.0"

View File

@@ -0,0 +1,146 @@
/* eslint-disable */
// The contents of this file below the line are copied from
// @types/dom-mediacapture-transform, which is inlined here into Element Call so
// that we can apply the patch to @types/dom-webcodecs found in
// ./dom-webcodecs.d.ts, which it depends on.
// (https://github.com/DefinitelyTyped/DefinitelyTyped/pull/72625)
// Once that PR is merged and released, we can remove this file and return to
// depending on @types/dom-mediacapture-transform.
// -----------------------------------------------------------------------------
// This project is licensed under the MIT license.
// Copyrights are respective of each contributor listed at the beginning of each definition file.
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// In general, these types are only available behind a command line flag or an origin trial in
// Chrome 90+.
// This API depends on WebCodecs.
// Versioning:
// Until the above-mentioned spec is finalized, the major version number is 0. Although not
// necessary for version 0, consider incrementing the minor version number for breaking changes.
// The following modify existing DOM types to allow defining type-safe APIs on audio and video tracks.
/** Specialize MediaStreamTrack so that we can refer specifically to an audio track. */
interface MediaStreamAudioTrack extends MediaStreamTrack {
readonly kind: "audio";
clone(): MediaStreamAudioTrack;
}
/** Specialize MediaStreamTrack so that we can refer specifically to a video track. */
interface MediaStreamVideoTrack extends MediaStreamTrack {
readonly kind: "video";
clone(): MediaStreamVideoTrack;
}
/** Assert that getAudioTracks and getVideoTracks return the tracks with the appropriate kind. */
interface MediaStream {
getAudioTracks(): MediaStreamAudioTrack[];
getVideoTracks(): MediaStreamVideoTrack[];
}
// The following were originally generated from the spec using
// https://github.com/microsoft/TypeScript-DOM-lib-generator, then heavily modified.
/**
* A track sink that is capable of exposing the unencoded frames from the track to a
* ReadableStream, and exposes a control channel for signals going in the oppposite direction.
*/
interface MediaStreamTrackProcessor<T extends AudioData | VideoFrame> {
/**
* Allows reading the frames flowing through the MediaStreamTrack provided to the constructor.
*/
readonly readable: ReadableStream<T>;
/** Allows sending control signals to the MediaStreamTrack provided to the constructor. */
readonly writableControl: WritableStream<MediaStreamTrackSignal>;
}
declare var MediaStreamTrackProcessor: {
prototype: MediaStreamTrackProcessor<any>;
/** Constructor overrides based on the type of track. */
new (
init: MediaStreamTrackProcessorInit & { track: MediaStreamAudioTrack },
): MediaStreamTrackProcessor<AudioData>;
new (
init: MediaStreamTrackProcessorInit & { track: MediaStreamVideoTrack },
): MediaStreamTrackProcessor<VideoFrame>;
};
interface MediaStreamTrackProcessorInit {
track: MediaStreamTrack;
/**
* If media frames are not read from MediaStreamTrackProcessor.readable quickly enough, the
* MediaStreamTrackProcessor will internally buffer up to maxBufferSize of the frames produced
* by the track. If the internal buffer is full, each time the track produces a new frame, the
* oldest frame in the buffer will be dropped and the new frame will be added to the buffer.
*/
maxBufferSize?: number | undefined;
}
/**
* Takes video frames as input, and emits control signals that result from subsequent processing.
*/
interface MediaStreamTrackGenerator<T extends AudioData | VideoFrame>
extends MediaStreamTrack {
/**
* Allows writing media frames to the MediaStreamTrackGenerator, which is itself a
* MediaStreamTrack. When a frame is written to writable, the frames close() method is
* automatically invoked, so that its internal resources are no longer accessible from
* JavaScript.
*/
readonly writable: WritableStream<T>;
/**
* Allows reading control signals sent from any sinks connected to the
* MediaStreamTrackGenerator.
*/
readonly readableControl: ReadableStream<MediaStreamTrackSignal>;
}
type MediaStreamAudioTrackGenerator = MediaStreamTrackGenerator<AudioData> &
MediaStreamAudioTrack;
type MediaStreamVideoTrackGenerator = MediaStreamTrackGenerator<VideoFrame> &
MediaStreamVideoTrack;
declare var MediaStreamTrackGenerator: {
prototype: MediaStreamTrackGenerator<any>;
/** Constructor overrides based on the type of track. */
new (
init: MediaStreamTrackGeneratorInit & {
kind: "audio";
signalTarget?: MediaStreamAudioTrack | undefined;
},
): MediaStreamAudioTrackGenerator;
new (
init: MediaStreamTrackGeneratorInit & {
kind: "video";
signalTarget?: MediaStreamVideoTrack | undefined;
},
): MediaStreamVideoTrackGenerator;
};
interface MediaStreamTrackGeneratorInit {
kind: MediaStreamTrackGeneratorKind;
/**
* (Optional) track to which the MediaStreamTrackGenerator will automatically forward control
* signals. If signalTarget is provided and signalTarget.kind and kind do not match, the
* MediaStreamTrackGenerators constructor will raise an exception.
*/
signalTarget?: MediaStreamTrack | undefined;
}
type MediaStreamTrackGeneratorKind = "audio" | "video";
type MediaStreamTrackSignalType = "request-frame";
interface MediaStreamTrackSignal {
signalType: MediaStreamTrackSignalType;
}

745
src/@types/dom-webcodecs.d.ts vendored Normal file
View File

@@ -0,0 +1,745 @@
/* eslint-disable */
// The contents of this file below the line are copied from
// @types/dom-webcodecs, which is inlined here into Element Call so that we can
// apply the patch https://github.com/DefinitelyTyped/DefinitelyTyped/pull/72625
// which is needed for TypeScript 5.8 compatibility. Once that PR is merged and
// released, we can remove this file and return to depending on
// @types/dom-webcodecs.
// -----------------------------------------------------------------------------
// This project is licensed under the MIT license.
// Copyrights are respective of each contributor listed at the beginning of each definition file.
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// Versioning:
// Until the WebCodecs spec is finalized, the major version number is 0. I have chosen to use minor
// version 1 to denote the API as defined by the IDL files from the Chromium repo at
// https://chromium.googlesource.com/chromium/src/+/main/third_party/blink/renderer/modules/webcodecs.
// Please use a version number above 0.1 if using the spec at https://w3c.github.io/webcodecs/ as
// the source.
// The declarations in webcodecs.generated.d.ts have been generated using the code in
// https://github.com/yume-chan/webcodecs-lib-generator. See
// https://github.com/yume-chan/webcodecs-lib-generator/blob/main/README.md for more detail.
// The following declarations are copied from
// https://github.com/microsoft/TypeScript-DOM-lib-generator/blob/a75338e1ea8a958bf08a5745141d2ab8f14ba2ca/baselines/dom.generated.d.ts
// and modified to expand the types to include VideoFrame.
/** Shim for OffscreenCanvas, which was removed in TS 4.4 */
interface OffscreenCanvas extends EventTarget {}
/**
* Replaces CanvasImageSource; only applies if WebCodecs is available.
*/
type CanvasImageSourceWebCodecs =
| HTMLOrSVGImageElement
| HTMLVideoElement
| HTMLCanvasElement
| ImageBitmap
| OffscreenCanvas
| VideoFrame;
interface CanvasRenderingContext2D {
drawImage(image: CanvasImageSourceWebCodecs, dx: number, dy: number): void;
drawImage(
image: CanvasImageSourceWebCodecs,
dx: number,
dy: number,
dw: number,
dh: number,
): void;
drawImage(
image: CanvasImageSourceWebCodecs,
sx: number,
sy: number,
sw: number,
sh: number,
dx: number,
dy: number,
dw: number,
dh: number,
): void;
createPattern(
image: CanvasImageSourceWebCodecs,
repetition: string | null,
): CanvasPattern | null;
}
interface OffscreenCanvasRenderingContext2D {
drawImage(image: CanvasImageSourceWebCodecs, dx: number, dy: number): void;
drawImage(
image: CanvasImageSourceWebCodecs,
dx: number,
dy: number,
dw: number,
dh: number,
): void;
drawImage(
image: CanvasImageSourceWebCodecs,
sx: number,
sy: number,
sw: number,
sh: number,
dx: number,
dy: number,
dw: number,
dh: number,
): void;
createPattern(
image: CanvasImageSourceWebCodecs,
repetition: string | null,
): CanvasPattern | null;
}
/**
* Replaces ImageBitmapSource; only applies if WebCodecs is available.
*/
type ImageBitmapSourceWebCodecs = CanvasImageSourceWebCodecs | Blob | ImageData;
declare function createImageBitmap(
image: ImageBitmapSourceWebCodecs,
options?: ImageBitmapOptions,
): Promise<ImageBitmap>;
declare function createImageBitmap(
image: ImageBitmapSourceWebCodecs,
sx: number,
sy: number,
sw: number,
sh: number,
options?: ImageBitmapOptions,
): Promise<ImageBitmap>;
/**
* Replaces TexImageSource; only applies if WebCodecs is available.
*/
type TexImageSourceWebCodecs =
| ImageBitmap
| ImageData
| HTMLImageElement
| HTMLCanvasElement
| HTMLVideoElement
| OffscreenCanvas
| VideoFrame;
interface WebGLRenderingContextOverloads {
texImage2D(
target: GLenum,
level: GLint,
internalformat: GLint,
format: GLenum,
type: GLenum,
source: TexImageSourceWebCodecs,
): void;
texSubImage2D(
target: GLenum,
level: GLint,
xoffset: GLint,
yoffset: GLint,
format: GLenum,
type: GLenum,
source: TexImageSourceWebCodecs,
): void;
}
interface WebGL2RenderingContextBase {
texImage3D(
target: GLenum,
level: GLint,
internalformat: GLint,
width: GLsizei,
height: GLsizei,
depth: GLsizei,
border: GLint,
format: GLenum,
type: GLenum,
source: TexImageSourceWebCodecs,
): void;
texSubImage3D(
target: GLenum,
level: GLint,
xoffset: GLint,
yoffset: GLint,
zoffset: GLint,
width: GLsizei,
height: GLsizei,
depth: GLsizei,
format: GLenum,
type: GLenum,
source: TexImageSourceWebCodecs,
): void;
}
interface WebGL2RenderingContextOverloads {
texImage2D(
target: GLenum,
level: GLint,
internalformat: GLint,
format: GLenum,
type: GLenum,
source: TexImageSourceWebCodecs,
): void;
texImage2D(
target: GLenum,
level: GLint,
internalformat: GLint,
width: GLsizei,
height: GLsizei,
border: GLint,
format: GLenum,
type: GLenum,
source: TexImageSourceWebCodecs,
): void;
texSubImage2D(
target: GLenum,
level: GLint,
xoffset: GLint,
yoffset: GLint,
format: GLenum,
type: GLenum,
source: TexImageSourceWebCodecs,
): void;
texSubImage2D(
target: GLenum,
level: GLint,
xoffset: GLint,
yoffset: GLint,
width: GLsizei,
height: GLsizei,
format: GLenum,
type: GLenum,
source: TexImageSourceWebCodecs,
): void;
}
/////////////////////////////
/// webcodecs APIs
/////////////////////////////
interface AudioDataCopyToOptions {
format?: AudioSampleFormat | undefined;
frameCount?: number | undefined;
frameOffset?: number | undefined;
planeIndex: number;
}
interface AudioDataInit {
data: AllowSharedBufferSource;
format: AudioSampleFormat;
numberOfChannels: number;
numberOfFrames: number;
sampleRate: number;
timestamp: number;
}
interface AudioDecoderConfig {
codec: string;
description?: AllowSharedBufferSource | undefined;
numberOfChannels: number;
sampleRate: number;
}
interface AudioDecoderInit {
error: WebCodecsErrorCallback;
output: AudioDataOutputCallback;
}
interface AudioDecoderSupport {
config?: AudioDecoderConfig;
supported?: boolean;
}
interface AudioEncoderConfig {
bitrate?: number | undefined;
codec: string;
numberOfChannels: number;
sampleRate: number;
}
interface AudioEncoderInit {
error: WebCodecsErrorCallback;
output: EncodedAudioChunkOutputCallback;
}
interface AudioEncoderSupport {
config?: AudioEncoderConfig;
supported?: boolean;
}
interface AvcEncoderConfig {
format?: AvcBitstreamFormat | undefined;
}
interface EncodedAudioChunkInit {
data: AllowSharedBufferSource;
duration?: number | undefined;
timestamp: number;
type: EncodedAudioChunkType;
}
interface EncodedAudioChunkMetadata {
decoderConfig?: AudioDecoderConfig | undefined;
}
interface EncodedVideoChunkInit {
data: AllowSharedBufferSource;
duration?: number | undefined;
timestamp: number;
type: EncodedVideoChunkType;
}
interface EncodedVideoChunkMetadata {
decoderConfig?: VideoDecoderConfig | undefined;
temporalLayerId?: number | undefined;
}
interface ImageDecodeOptions {
completeFramesOnly?: boolean | undefined;
frameIndex?: number | undefined;
}
interface ImageDecodeResult {
complete: boolean;
image: VideoFrame;
}
interface ImageDecoderInit {
colorSpaceConversion?: ColorSpaceConversion | undefined;
data: ImageBufferSource;
desiredHeight?: number | undefined;
desiredWidth?: number | undefined;
preferAnimation?: boolean | undefined;
premultiplyAlpha?: PremultiplyAlpha | undefined;
type: string;
}
interface PlaneLayout {
offset: number;
stride: number;
}
interface VideoColorSpaceInit {
fullRange?: boolean | null | undefined;
matrix?: VideoMatrixCoefficients | null | undefined;
primaries?: VideoColorPrimaries | null | undefined;
transfer?: VideoTransferCharacteristics | null | undefined;
}
interface VideoDecoderConfig {
codec: string;
codedHeight?: number | undefined;
codedWidth?: number | undefined;
colorSpace?: VideoColorSpaceInit | undefined;
description?: AllowSharedBufferSource | undefined;
displayAspectHeight?: number | undefined;
displayAspectWidth?: number | undefined;
hardwareAcceleration?: HardwarePreference | undefined;
optimizeForLatency?: boolean | undefined;
}
interface VideoDecoderInit {
error: WebCodecsErrorCallback;
output: VideoFrameOutputCallback;
}
interface VideoDecoderSupport {
config?: VideoDecoderConfig;
supported?: boolean;
}
interface VideoEncoderConfig {
alpha?: AlphaOption | undefined;
avc?: AvcEncoderConfig | undefined;
bitrate?: number | undefined;
bitrateMode?: VideoEncoderBitrateMode | undefined;
codec: string;
displayHeight?: number | undefined;
displayWidth?: number | undefined;
framerate?: number | undefined;
hardwareAcceleration?: HardwarePreference | undefined;
height: number;
latencyMode?: LatencyMode | undefined;
scalabilityMode?: string | undefined;
width: number;
}
interface VideoEncoderEncodeOptions {
keyFrame?: boolean;
}
interface VideoEncoderInit {
error: WebCodecsErrorCallback;
output: EncodedVideoChunkOutputCallback;
}
interface VideoEncoderSupport {
config?: VideoEncoderConfig;
supported?: boolean;
}
interface VideoFrameBufferInit {
codedHeight: number;
codedWidth: number;
colorSpace?: VideoColorSpaceInit | undefined;
displayHeight?: number | undefined;
displayWidth?: number | undefined;
duration?: number | undefined;
format: VideoPixelFormat;
layout?: PlaneLayout[] | undefined;
timestamp: number;
visibleRect?: DOMRectInit | undefined;
}
interface VideoFrameCopyToOptions {
layout?: PlaneLayout[] | undefined;
rect?: DOMRectInit | undefined;
}
interface VideoFrameInit {
alpha?: AlphaOption | undefined;
displayHeight?: number | undefined;
displayWidth?: number | undefined;
duration?: number | undefined;
timestamp?: number | undefined;
visibleRect?: DOMRectInit | undefined;
}
interface AudioData {
readonly duration: number;
readonly format: AudioSampleFormat | null;
readonly numberOfChannels: number;
readonly numberOfFrames: number;
readonly sampleRate: number;
readonly timestamp: number;
allocationSize(options: AudioDataCopyToOptions): number;
clone(): AudioData;
close(): void;
copyTo(
destination: AllowSharedBufferSource,
options: AudioDataCopyToOptions,
): void;
}
declare var AudioData: {
prototype: AudioData;
new (init: AudioDataInit): AudioData;
};
interface AudioDecoderEventMap {
dequeue: Event;
}
/** Available only in secure contexts. */
interface AudioDecoder {
readonly decodeQueueSize: number;
readonly state: CodecState;
ondequeue: ((this: AudioDecoder, ev: Event) => any) | null;
close(): void;
configure(config: AudioDecoderConfig): void;
decode(chunk: EncodedAudioChunk): void;
flush(): Promise<void>;
reset(): void;
addEventListener<K extends keyof AudioDecoderEventMap>(
type: K,
listener: (this: AudioDecoder, ev: AudioDecoderEventMap[K]) => any,
options?: boolean | AddEventListenerOptions,
): void;
addEventListener(
type: string,
listener: EventListenerOrEventListenerObject,
options?: boolean | AddEventListenerOptions,
): void;
removeEventListener<K extends keyof AudioDecoderEventMap>(
type: K,
listener: (this: AudioDecoder, ev: AudioDecoderEventMap[K]) => any,
options?: boolean | EventListenerOptions,
): void;
removeEventListener(
type: string,
listener: EventListenerOrEventListenerObject,
options?: boolean | EventListenerOptions,
): void;
}
declare var AudioDecoder: {
prototype: AudioDecoder;
new (init: AudioDecoderInit): AudioDecoder;
isConfigSupported(config: AudioDecoderConfig): Promise<AudioDecoderSupport>;
};
interface AudioEncoderEventMap {
dequeue: Event;
}
/** Available only in secure contexts. */
interface AudioEncoder {
readonly encodeQueueSize: number;
readonly state: CodecState;
ondequeue: ((this: AudioEncoder, ev: Event) => any) | null;
close(): void;
configure(config: AudioEncoderConfig): void;
encode(data: AudioData): void;
flush(): Promise<void>;
reset(): void;
addEventListener<K extends keyof AudioEncoderEventMap>(
type: K,
listener: (this: AudioEncoder, ev: AudioEncoderEventMap[K]) => any,
options?: boolean | AddEventListenerOptions,
): void;
addEventListener(
type: string,
listener: EventListenerOrEventListenerObject,
options?: boolean | AddEventListenerOptions,
): void;
removeEventListener<K extends keyof AudioEncoderEventMap>(
type: K,
listener: (this: AudioEncoder, ev: AudioEncoderEventMap[K]) => any,
options?: boolean | EventListenerOptions,
): void;
removeEventListener(
type: string,
listener: EventListenerOrEventListenerObject,
options?: boolean | EventListenerOptions,
): void;
}
declare var AudioEncoder: {
prototype: AudioEncoder;
new (init: AudioEncoderInit): AudioEncoder;
isConfigSupported(config: AudioEncoderConfig): Promise<AudioEncoderSupport>;
};
interface EncodedAudioChunk {
readonly byteLength: number;
readonly duration: number | null;
readonly timestamp: number;
readonly type: EncodedAudioChunkType;
copyTo(destination: AllowSharedBufferSource): void;
}
declare var EncodedAudioChunk: {
prototype: EncodedAudioChunk;
new (init: EncodedAudioChunkInit): EncodedAudioChunk;
};
interface EncodedVideoChunk {
readonly byteLength: number;
readonly duration: number | null;
readonly timestamp: number;
readonly type: EncodedVideoChunkType;
copyTo(destination: AllowSharedBufferSource): void;
}
declare var EncodedVideoChunk: {
prototype: EncodedVideoChunk;
new (init: EncodedVideoChunkInit): EncodedVideoChunk;
};
/** Available only in secure contexts. */
interface ImageDecoder {
readonly complete: boolean;
readonly completed: Promise<void>;
readonly tracks: ImageTrackList;
readonly type: string;
close(): void;
decode(options?: ImageDecodeOptions): Promise<ImageDecodeResult>;
reset(): void;
}
// declare var ImageDecoder: {
// prototype: ImageDecoder;
// new(init: ImageDecoderInit): ImageDecoder;
// isTypeSupported(type: string): Promise<boolean>;
// };
// interface ImageTrack {
// readonly animated: boolean;
// readonly frameCount: number;
// readonly repetitionCount: number;
// selected: boolean;
// }
// declare var ImageTrack: {
// prototype: ImageTrack;
// new(): ImageTrack;
// };
// interface ImageTrackList {
// readonly length: number;
// readonly ready: Promise<void>;
// readonly selectedIndex: number;
// readonly selectedTrack: ImageTrack | null;
// [index: number]: ImageTrack;
// }
// declare var ImageTrackList: {
// prototype: ImageTrackList;
// new(): ImageTrackList;
// };
interface VideoColorSpace {
readonly fullRange: boolean | null;
readonly matrix: VideoMatrixCoefficients | null;
readonly primaries: VideoColorPrimaries | null;
readonly transfer: VideoTransferCharacteristics | null;
toJSON(): VideoColorSpaceInit;
}
declare var VideoColorSpace: {
prototype: VideoColorSpace;
new (init?: VideoColorSpaceInit): VideoColorSpace;
};
interface VideoDecoderEventMap {
dequeue: Event;
}
/** Available only in secure contexts. */
interface VideoDecoder {
readonly decodeQueueSize: number;
readonly state: CodecState;
ondequeue: ((this: VideoDecoder, ev: Event) => any) | null;
close(): void;
configure(config: VideoDecoderConfig): void;
decode(chunk: EncodedVideoChunk): void;
flush(): Promise<void>;
reset(): void;
addEventListener<K extends keyof VideoDecoderEventMap>(
type: K,
listener: (this: VideoDecoder, ev: VideoDecoderEventMap[K]) => any,
options?: boolean | AddEventListenerOptions,
): void;
addEventListener(
type: string,
listener: EventListenerOrEventListenerObject,
options?: boolean | AddEventListenerOptions,
): void;
removeEventListener<K extends keyof VideoDecoderEventMap>(
type: K,
listener: (this: VideoDecoder, ev: VideoDecoderEventMap[K]) => any,
options?: boolean | EventListenerOptions,
): void;
removeEventListener(
type: string,
listener: EventListenerOrEventListenerObject,
options?: boolean | EventListenerOptions,
): void;
}
declare var VideoDecoder: {
prototype: VideoDecoder;
new (init: VideoDecoderInit): VideoDecoder;
isConfigSupported(config: VideoDecoderConfig): Promise<VideoDecoderSupport>;
};
interface VideoEncoderEventMap {
dequeue: Event;
}
/** Available only in secure contexts. */
interface VideoEncoder {
readonly encodeQueueSize: number;
readonly state: CodecState;
close(): void;
ondequeue: ((this: VideoEncoder, ev: Event) => any) | null;
configure(config: VideoEncoderConfig): void;
encode(frame: VideoFrame, options?: VideoEncoderEncodeOptions): void;
flush(): Promise<void>;
reset(): void;
addEventListener<K extends keyof VideoEncoderEventMap>(
type: K,
listener: (this: VideoEncoder, ev: VideoEncoderEventMap[K]) => any,
options?: boolean | AddEventListenerOptions,
): void;
addEventListener(
type: string,
listener: EventListenerOrEventListenerObject,
options?: boolean | AddEventListenerOptions,
): void;
removeEventListener<K extends keyof VideoEncoderEventMap>(
type: K,
listener: (this: VideoEncoder, ev: VideoEncoderEventMap[K]) => any,
options?: boolean | EventListenerOptions,
): void;
removeEventListener(
type: string,
listener: EventListenerOrEventListenerObject,
options?: boolean | EventListenerOptions,
): void;
}
declare var VideoEncoder: {
prototype: VideoEncoder;
new (init: VideoEncoderInit): VideoEncoder;
isConfigSupported(config: VideoEncoderConfig): Promise<VideoEncoderSupport>;
};
interface VideoFrame {
readonly codedHeight: number;
readonly codedRect: DOMRectReadOnly | null;
readonly codedWidth: number;
readonly colorSpace: VideoColorSpace;
readonly displayHeight: number;
readonly displayWidth: number;
readonly duration: number | null;
readonly format: VideoPixelFormat | null;
readonly timestamp: number;
readonly visibleRect: DOMRectReadOnly | null;
allocationSize(options?: VideoFrameCopyToOptions): number;
clone(): VideoFrame;
close(): void;
copyTo(
destination: AllowSharedBufferSource,
options?: VideoFrameCopyToOptions,
): Promise<PlaneLayout[]>;
}
declare var VideoFrame: {
prototype: VideoFrame;
new (source: CanvasImageSource, init?: VideoFrameInit): VideoFrame;
new (data: AllowSharedBufferSource, init: VideoFrameBufferInit): VideoFrame;
};
interface AudioDataOutputCallback {
(output: AudioData): void;
}
interface EncodedAudioChunkOutputCallback {
(output: EncodedAudioChunk, metadata: EncodedAudioChunkMetadata): void;
}
interface EncodedVideoChunkOutputCallback {
(chunk: EncodedVideoChunk, metadata: EncodedVideoChunkMetadata): void;
}
interface VideoFrameOutputCallback {
(output: VideoFrame): void;
}
interface WebCodecsErrorCallback {
(error: DOMException): void;
}
// type AllowSharedBufferSource = ArrayBuffer | ArrayBufferView;
// type BitrateMode = "constant" | "variable";
// type ImageBufferSource = ArrayBuffer | ArrayBufferView | ReadableStream;
// type AlphaOption = "discard" | "keep";
// type AudioSampleFormat = "f32" | "f32-planar" | "s16" | "s16-planar" | "s32" | "s32-planar" | "u8" | "u8-planar";
// type AvcBitstreamFormat = "annexb" | "avc";
// type CodecState = "closed" | "configured" | "unconfigured";
// type EncodedAudioChunkType = "delta" | "key";
// type EncodedVideoChunkType = "delta" | "key";
type HardwarePreference =
| "no-preference"
| "prefer-hardware"
| "prefer-software";
// type LatencyMode = "quality" | "realtime";
// type VideoColorPrimaries = "bt470bg" | "bt709" | "smpte170m";
// type VideoMatrixCoefficients = "bt470bg" | "bt709" | "rgb" | "smpte170m";
// type VideoPixelFormat = "BGRA" | "BGRX" | "I420" | "I420A" | "I422" | "I444" | "NV12" | "RGBA" | "RGBX";
// type VideoTransferCharacteristics = "bt709" | "iec61966-2-1" | "smpte170m";

View File

@@ -22,6 +22,7 @@ import { Initializer } from "./initializer";
import { MediaDevicesProvider } from "./livekit/MediaDevicesContext";
import { widget } from "./widget";
import { useTheme } from "./useTheme";
import { ProcessorProvider } from "./livekit/TrackProcessorContext";
const SentryRoute = Sentry.withSentryReactRouterV7Routing(Route);
@@ -72,22 +73,24 @@ export const App: FC = () => {
<Suspense fallback={null}>
<ClientProvider>
<MediaDevicesProvider>
<Sentry.ErrorBoundary
fallback={(error) => (
<ErrorPage error={error} widget={widget} />
)}
>
<DisconnectedBanner />
<Routes>
<SentryRoute path="/" element={<HomePage />} />
<SentryRoute path="/login" element={<LoginPage />} />
<SentryRoute
path="/register"
element={<RegisterPage />}
/>
<SentryRoute path="*" element={<RoomPage />} />
</Routes>
</Sentry.ErrorBoundary>
<ProcessorProvider>
<Sentry.ErrorBoundary
fallback={(error) => (
<ErrorPage error={error} widget={widget} />
)}
>
<DisconnectedBanner />
<Routes>
<SentryRoute path="/" element={<HomePage />} />
<SentryRoute path="/login" element={<LoginPage />} />
<SentryRoute
path="/register"
element={<RegisterPage />}
/>
<SentryRoute path="*" element={<RoomPage />} />
</Routes>
</Sentry.ErrorBoundary>
</ProcessorProvider>
</MediaDevicesProvider>
</ClientProvider>
</Suspense>

View File

@@ -0,0 +1,80 @@
/*
Copyright 2024-2025 New Vector Ltd.
SPDX-License-Identifier: AGPL-3.0-only
Please see LICENSE in the repository root for full details.
*/
import {
BackgroundTransformer,
VideoTransformer,
type VideoTransformerInitOptions,
} from "@livekit/track-processors";
import { ImageSegmenter } from "@mediapipe/tasks-vision";
import modelAssetPath from "../mediapipe/imageSegmenter/selfie_segmenter.tflite?url";
interface WasmFileset {
/** The path to the Wasm loader script. */
wasmLoaderPath: string;
/** The path to the Wasm binary. */
wasmBinaryPath: string;
}
// The MediaPipe package, by default, ships some alternative versions of the
// WASM files which avoid SIMD for compatibility with older browsers. But SIMD
// in WASM is actually fine by our support policy, so we include just the SIMD
// versions.
// It's really not ideal that we have to reference these internal files from
// MediaPipe and depend on node_modules having this specific structure. It's
// easy to see this breaking if our dependencies changed and MediaPipe were
// no longer hoisted, or if we switched to another dependency loader such as
// Yarn PnP.
// https://github.com/google-ai-edge/mediapipe/issues/5961
const wasmFileset: WasmFileset = {
wasmLoaderPath: new URL(
"../../node_modules/@mediapipe/tasks-vision/wasm/vision_wasm_internal.js",
import.meta.url,
).href,
wasmBinaryPath: new URL(
"../../node_modules/@mediapipe/tasks-vision/wasm/vision_wasm_internal.wasm",
import.meta.url,
).href,
};
/**
* Track processor that applies effects such as blurring to a user's background.
*
* This is just like LiveKit's prebuilt BackgroundTransformer except that it
* loads the segmentation models from our own bundle rather than as an external
* resource fetched from the public internet.
*/
export class BlurBackgroundTransformer extends BackgroundTransformer {
public async init({
outputCanvas,
inputElement: inputVideo,
}: VideoTransformerInitOptions): Promise<void> {
// Call super.super.init() since we're totally replacing the init method of
// BackgroundTransformer here, rather than extending it
await VideoTransformer.prototype.init.call(this, {
outputCanvas,
inputElement: inputVideo,
});
this.imageSegmenter = await ImageSegmenter.createFromOptions(wasmFileset, {
baseOptions: {
modelAssetPath,
delegate: "GPU",
...this.options.segmenterOptions,
},
canvas: this.canvas,
runningMode: "VIDEO",
outputCategoryMask: true,
outputConfidenceMasks: false,
});
if (this.options.blurRadius) {
this.gl?.setBlurRadius(this.options.blurRadius);
}
}
}

View File

@@ -0,0 +1,84 @@
/*
Copyright 2024 New Vector Ltd.
SPDX-License-Identifier: AGPL-3.0-only
Please see LICENSE in the repository root for full details.
*/
import {
ProcessorWrapper,
supportsBackgroundProcessors,
type BackgroundOptions,
} from "@livekit/track-processors";
import { createContext, type FC, useContext, useEffect, useMemo } from "react";
import { type LocalVideoTrack } from "livekit-client";
import {
backgroundBlur as backgroundBlurSettings,
useSetting,
} from "../settings/settings";
import { BlurBackgroundTransformer } from "./BlurBackgroundTransformer";
type ProcessorState = {
supported: boolean | undefined;
processor: undefined | ProcessorWrapper<BackgroundOptions>;
};
const ProcessorContext = createContext<ProcessorState | undefined>(undefined);
export function useTrackProcessor(): ProcessorState {
const state = useContext(ProcessorContext);
if (state === undefined)
throw new Error(
"useTrackProcessor must be used within a ProcessorProvider",
);
return state;
}
export const useTrackProcessorSync = (
videoTrack: LocalVideoTrack | null,
): void => {
const { processor } = useTrackProcessor();
useEffect(() => {
if (!videoTrack) return;
if (processor && !videoTrack.getProcessor()) {
void videoTrack.setProcessor(processor);
}
if (!processor && videoTrack.getProcessor()) {
void videoTrack.stopProcessor();
}
}, [processor, videoTrack]);
};
interface Props {
children: JSX.Element;
}
export const ProcessorProvider: FC<Props> = ({ children }) => {
// The setting the user wants to have
const [blurActivated] = useSetting(backgroundBlurSettings);
const supported = useMemo(() => supportsBackgroundProcessors(), []);
const blur = useMemo(
() =>
new ProcessorWrapper(
new BlurBackgroundTransformer({ blurRadius: 15 }),
"background-blur",
),
[],
);
// This is the actual state exposed through the context
const processorState = useMemo(
() => ({
supported,
processor: supported && blurActivated ? blur : undefined,
}),
[supported, blurActivated, blur],
);
return (
<ProcessorContext.Provider value={processorState}>
{children}
</ProcessorContext.Provider>
);
};

View File

@@ -9,6 +9,7 @@ import {
ConnectionState,
type E2EEManagerOptions,
ExternalE2EEKeyProvider,
LocalVideoTrack,
Room,
type RoomOptions,
Track,
@@ -17,6 +18,8 @@ import { useEffect, useMemo, useRef } from "react";
import E2EEWorker from "livekit-client/e2ee-worker?worker";
import { logger } from "matrix-js-sdk/lib/logger";
import { type MatrixRTCSession } from "matrix-js-sdk/lib/matrixrtc";
import { useObservable, useObservableEagerState } from "observable-hooks";
import { map } from "rxjs";
import { defaultLiveKitOptions } from "./options";
import { type SFUConfig } from "./openIDSFU";
@@ -33,6 +36,12 @@ import {
import { MatrixKeyProvider } from "../e2ee/matrixKeyProvider";
import { E2eeType } from "../e2ee/e2eeType";
import { type EncryptionSystem } from "../e2ee/sharedKeyManagement";
import {
useTrackProcessor,
useTrackProcessorSync,
} from "./TrackProcessorContext";
import { useInitial } from "../useInitial";
import { observeTrackReference$ } from "../state/MediaViewModel";
interface UseLivekitResult {
livekitRoom?: Room;
@@ -82,12 +91,15 @@ export function useLiveKit(
const devices = useMediaDevices();
const initialDevices = useRef<MediaDevices>(devices);
const { processor } = useTrackProcessor();
const initialProcessor = useInitial(() => processor);
const roomOptions = useMemo(
(): RoomOptions => ({
...defaultLiveKitOptions,
videoCaptureDefaults: {
...defaultLiveKitOptions.videoCaptureDefaults,
deviceId: initialDevices.current.videoInput.selectedId,
processor: initialProcessor,
},
audioCaptureDefaults: {
...defaultLiveKitOptions.audioCaptureDefaults,
@@ -98,7 +110,7 @@ export function useLiveKit(
},
e2ee: e2eeOptions,
}),
[e2eeOptions],
[e2eeOptions, initialProcessor],
);
// Store if audio/video are currently updating. If to prohibit unnecessary calls
@@ -123,6 +135,25 @@ export function useLiveKit(
return r;
}, [roomOptions, e2eeSystem]);
// Sync the requested track processors with LiveKit
useTrackProcessorSync(
useObservableEagerState(
useObservable(
(room$) =>
observeTrackReference$(
room$.pipe(map(([room]) => room.localParticipant)),
Track.Source.Camera,
).pipe(
map((trackRef) => {
const track = trackRef?.publication?.track;
return track instanceof LocalVideoTrack ? track : null;
}),
),
[room],
),
),
);
const connectionState = useECConnectionState(
{
deviceId: initialDevices.current.audioInput.selectedId,
@@ -198,6 +229,7 @@ export function useLiveKit(
audioMuteUpdating.current = true;
trackPublication = await participant.setMicrophoneEnabled(
buttonEnabled.current.audio,
room.options.audioCaptureDefaults,
);
audioMuteUpdating.current = false;
break;
@@ -205,6 +237,7 @@ export function useLiveKit(
videoMuteUpdating.current = true;
trackPublication = await participant.setCameraEnabled(
buttonEnabled.current.video,
room.options.videoCaptureDefaults,
);
videoMuteUpdating.current = false;
break;

View File

@@ -0,0 +1,5 @@
# Google AI Edge MediaPipe Selfie Segmentation
- See: https://ai.google.dev/edge/mediapipe/solutions/vision/image_segmenter
- Latest: https://storage.googleapis.com/mediapipe-models/image_segmenter/selfie_segmenter/float16/latest/selfie_segmenter.tflite
- License: Apache 2.0 as per https://storage.googleapis.com/mediapipe-assets/Model%20Card%20MediaPipe%20Selfie%20Segmentation.pdf

Binary file not shown.

View File

@@ -38,6 +38,7 @@ import { GroupCallView } from "./GroupCallView";
import { type WidgetHelpers } from "../widget";
import { LazyEventEmitter } from "../LazyEventEmitter";
import { MatrixRTCFocusMissingError } from "../utils/errors";
import { ProcessorProvider } from "../livekit/TrackProcessorContext";
vi.mock("../soundUtils");
vi.mock("../useAudioContext");
@@ -46,6 +47,13 @@ vi.mock("react-use-measure", () => ({
default: (): [() => void, object] => [(): void => {}, {}],
}));
vi.hoisted(
() =>
(global.ImageData = class MockImageData {
public data: number[] = [];
} as unknown as typeof ImageData),
);
const enterRTCSession = vi.hoisted(() => vi.fn(async () => Promise.resolve()));
const leaveRTCSession = vi.hoisted(() =>
vi.fn(
@@ -137,18 +145,20 @@ function createGroupCallView(
const { getByText } = render(
<BrowserRouter>
<TooltipProvider>
<GroupCallView
client={client}
isPasswordlessUser={false}
confineToRoom={false}
preload={false}
skipLobby={false}
hideHeader={true}
rtcSession={rtcSession as unknown as MatrixRTCSession}
isJoined={joined}
muteStates={muteState}
widget={widget}
/>
<ProcessorProvider>
<GroupCallView
client={client}
isPasswordlessUser={false}
confineToRoom={false}
preload={false}
skipLobby={false}
hideHeader={true}
rtcSession={rtcSession as unknown as MatrixRTCSession}
isJoined={joined}
muteStates={muteState}
widget={widget}
/>
</ProcessorProvider>
</TooltipProvider>
</BrowserRouter>,
);

View File

@@ -12,7 +12,11 @@ import { Button } from "@vector-im/compound-web";
import classNames from "classnames";
import { logger } from "matrix-js-sdk/lib/logger";
import { usePreviewTracks } from "@livekit/components-react";
import { type LocalVideoTrack, Track } from "livekit-client";
import {
type CreateLocalTracksOptions,
type LocalVideoTrack,
Track,
} from "livekit-client";
import { useObservable } from "observable-hooks";
import { map } from "rxjs";
import { useNavigate } from "react-router-dom";
@@ -36,7 +40,11 @@ import { E2eeType } from "../e2ee/e2eeType";
import { Link } from "../button/Link";
import { useMediaDevices } from "../livekit/MediaDevicesContext";
import { useInitial } from "../useInitial";
import { useSwitchCamera } from "./useSwitchCamera";
import { useSwitchCamera as useShowSwitchCamera } from "./useSwitchCamera";
import {
useTrackProcessor,
useTrackProcessorSync,
} from "../livekit/TrackProcessorContext";
import { usePageTitle } from "../usePageTitle";
interface Props {
@@ -112,7 +120,10 @@ export const LobbyView: FC<Props> = ({
muteStates.audio.enabled && { deviceId: devices.audioInput.selectedId },
);
const localTrackOptions = useMemo(
const { processor } = useTrackProcessor();
const initialProcessor = useInitial(() => processor);
const localTrackOptions = useMemo<CreateLocalTracksOptions>(
() => ({
// The only reason we request audio here is to get the audio permission
// request over with at the same time. But changing the audio settings
@@ -123,12 +134,14 @@ export const LobbyView: FC<Props> = ({
audio: Object.assign({}, initialAudioOptions),
video: muteStates.video.enabled && {
deviceId: devices.videoInput.selectedId,
processor: initialProcessor,
},
}),
[
initialAudioOptions,
devices.videoInput.selectedId,
muteStates.video.enabled,
devices.videoInput.selectedId,
initialProcessor,
],
);
@@ -149,8 +162,8 @@ export const LobbyView: FC<Props> = ({
null) as LocalVideoTrack | null,
[tracks],
);
const switchCamera = useSwitchCamera(
useTrackProcessorSync(videoTrack);
const showSwitchCamera = useShowSwitchCamera(
useObservable(
(inputs$) => inputs$.pipe(map(([video]) => video)),
[videoTrack],
@@ -212,7 +225,9 @@ export const LobbyView: FC<Props> = ({
onClick={onVideoPress}
disabled={muteStates.video.setEnabled === null}
/>
{switchCamera && <SwitchCameraButton onClick={switchCamera} />}
{showSwitchCamera && (
<SwitchCameraButton onClick={showSwitchCamera} />
)}
<SettingsButton onClick={openSettings} />
{!confineToRoom && <EndCallButton onClick={onLeaveClick} />}
</div>

View File

@@ -5,10 +5,10 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type FC, useState } from "react";
import { type FC, type ReactNode, useState } from "react";
import { useTranslation } from "react-i18next";
import { type MatrixClient } from "matrix-js-sdk";
import { Root as Form } from "@vector-im/compound-web";
import { Root as Form, Separator } from "@vector-im/compound-web";
import { type Room as LivekitRoom } from "livekit-client";
import { Modal } from "../Modal";
@@ -24,12 +24,15 @@ import { widget } from "../widget";
import {
useSetting,
soundEffectVolumeSetting,
backgroundBlur as backgroundBlurSetting,
developerMode,
} from "./settings";
import { PreferencesSettingsTab } from "./PreferencesSettingsTab";
import { Slider } from "../Slider";
import { DeviceSelection } from "./DeviceSelection";
import { useTrackProcessor } from "../livekit/TrackProcessorContext";
import { DeveloperSettingsTab } from "./DeveloperSettingsTab";
import { FieldRow, InputField } from "../input/Input";
import { useSubmitRageshake } from "./submit-rageshake";
type SettingsTab =
@@ -64,6 +67,33 @@ export const SettingsModal: FC<Props> = ({
}) => {
const { t } = useTranslation();
// Generate a `Checkbox` input to turn blur on or off.
const BlurCheckbox: React.FC = (): ReactNode => {
const { supported } = useTrackProcessor();
const [blurActive, setBlurActive] = useSetting(backgroundBlurSetting);
return (
<>
<h4>{t("settings.background_blur_header")}</h4>
<FieldRow>
<InputField
id="activateBackgroundBlur"
label={t("settings.background_blur_label")}
description={
supported ? "" : t("settings.blur_not_supported_by_browser")
}
type="checkbox"
checked={!!blurActive}
onChange={(b): void => setBlurActive(b.target.checked)}
disabled={!supported}
/>
</FieldRow>
</>
);
};
const devices = useMediaDevices();
useMediaDeviceNames(devices, open);
const [soundVolume, setSoundVolume] = useSetting(soundEffectVolumeSetting);
@@ -113,13 +143,17 @@ export const SettingsModal: FC<Props> = ({
key: "video",
name: t("common.video"),
content: (
<Form>
<DeviceSelection
devices={devices.videoInput}
title={t("settings.devices.camera")}
numberedLabel={(n) => t("settings.devices.camera_numbered", { n })}
/>
</Form>
<>
<Form>
<DeviceSelection
devices={devices.videoInput}
title={t("settings.devices.camera")}
numberedLabel={(n) => t("settings.devices.camera_numbered", { n })}
/>
</Form>
<Separator />
<BlurCheckbox />
</>
),
};

View File

@@ -96,6 +96,8 @@ export const videoInput = new Setting<string | undefined>(
undefined,
);
export const backgroundBlur = new Setting<boolean>("background-blur", false);
export const showHandRaisedTimer = new Setting<boolean>(
"hand-raised-show-timer",
false,

154
yarn.lock
View File

@@ -2536,6 +2536,17 @@ __metadata:
languageName: node
linkType: hard
"@livekit/track-processors@npm:^0.5.5":
version: 0.5.5
resolution: "@livekit/track-processors@npm:0.5.5"
dependencies:
"@mediapipe/tasks-vision": "npm:0.10.14"
peerDependencies:
livekit-client: ^1.12.0 || ^2.1.0
checksum: 10c0/b86737c4ce6b93c714dfc814c997d07f1d40b0869b5e269b28e00034a3c06da811d9117c4914d8a2706ac760be31f2e9d0705108c07b29f4bbb4626dcd41ed64
languageName: node
linkType: hard
"@matrix-org/matrix-sdk-crypto-wasm@npm:^14.0.1":
version: 14.0.1
resolution: "@matrix-org/matrix-sdk-crypto-wasm@npm:14.0.1"
@@ -2550,6 +2561,13 @@ __metadata:
languageName: node
linkType: hard
"@mediapipe/tasks-vision@npm:^0.10.18":
version: 0.10.21
resolution: "@mediapipe/tasks-vision@npm:0.10.21"
checksum: 10c0/11b2bdf98b8cb6e044f2a954e7c8393169e62c86ff49b3d0b61c3b327d18e1ccd47a187999b023bad48380c9da41bfa66eb165301c80da07746390482cb18a19
languageName: node
linkType: hard
"@nodelib/fs.scandir@npm:2.1.5":
version: 2.1.5
resolution: "@nodelib/fs.scandir@npm:2.1.5"
@@ -4797,15 +4815,15 @@ __metadata:
languageName: node
linkType: hard
"@typescript-eslint/eslint-plugin@npm:^8.0.0":
version: 8.25.0
resolution: "@typescript-eslint/eslint-plugin@npm:8.25.0"
"@typescript-eslint/eslint-plugin@npm:^8.31.0":
version: 8.31.0
resolution: "@typescript-eslint/eslint-plugin@npm:8.31.0"
dependencies:
"@eslint-community/regexpp": "npm:^4.10.0"
"@typescript-eslint/scope-manager": "npm:8.25.0"
"@typescript-eslint/type-utils": "npm:8.25.0"
"@typescript-eslint/utils": "npm:8.25.0"
"@typescript-eslint/visitor-keys": "npm:8.25.0"
"@typescript-eslint/scope-manager": "npm:8.31.0"
"@typescript-eslint/type-utils": "npm:8.31.0"
"@typescript-eslint/utils": "npm:8.31.0"
"@typescript-eslint/visitor-keys": "npm:8.31.0"
graphemer: "npm:^1.4.0"
ignore: "npm:^5.3.1"
natural-compare: "npm:^1.4.0"
@@ -4813,8 +4831,8 @@ __metadata:
peerDependencies:
"@typescript-eslint/parser": ^8.0.0 || ^8.0.0-alpha.0
eslint: ^8.57.0 || ^9.0.0
typescript: ">=4.8.4 <5.8.0"
checksum: 10c0/11d63850f5f03b29cd31166f8da111788dc74e46877c2e16a5c488d6c4aa4b6c68c0857b9a396ad920aa7f0f3e7166f4faecbb194c19cd2bb9d3f687c5d2b292
typescript: ">=4.8.4 <5.9.0"
checksum: 10c0/7d78e0cdcc967742752d49d2d38986ee38d0b7ca64af247e5fe0816cea9ae5f1bfa5c126154acc0846af515c4fb1c52c96926ee25c73b4c3f7e6fd73cb6d2b0e
languageName: node
linkType: hard
@@ -4829,19 +4847,19 @@ __metadata:
languageName: node
linkType: hard
"@typescript-eslint/parser@npm:^8.0.0":
version: 8.25.0
resolution: "@typescript-eslint/parser@npm:8.25.0"
"@typescript-eslint/parser@npm:^8.31.0":
version: 8.31.0
resolution: "@typescript-eslint/parser@npm:8.31.0"
dependencies:
"@typescript-eslint/scope-manager": "npm:8.25.0"
"@typescript-eslint/types": "npm:8.25.0"
"@typescript-eslint/typescript-estree": "npm:8.25.0"
"@typescript-eslint/visitor-keys": "npm:8.25.0"
"@typescript-eslint/scope-manager": "npm:8.31.0"
"@typescript-eslint/types": "npm:8.31.0"
"@typescript-eslint/typescript-estree": "npm:8.31.0"
"@typescript-eslint/visitor-keys": "npm:8.31.0"
debug: "npm:^4.3.4"
peerDependencies:
eslint: ^8.57.0 || ^9.0.0
typescript: ">=4.8.4 <5.8.0"
checksum: 10c0/9a54539ba297791f23093ff42a885cc57d36b26205d7a390e114d1f01cc584ce91ac6ead01819daa46b48f873cac6c829fcf399a436610bdbfa98e5cd78148a2
typescript: ">=4.8.4 <5.9.0"
checksum: 10c0/9bd903b3ea4e24bfeb444d7a5c2ed82e591ef5cffc0874c609de854c05d34935cd85543e66678ecdb8e0e3eae2cda2df5c1ba66eb72010632cb9f8779031d56d
languageName: node
linkType: hard
@@ -4865,18 +4883,28 @@ __metadata:
languageName: node
linkType: hard
"@typescript-eslint/type-utils@npm:8.25.0":
version: 8.25.0
resolution: "@typescript-eslint/type-utils@npm:8.25.0"
"@typescript-eslint/scope-manager@npm:8.31.0":
version: 8.31.0
resolution: "@typescript-eslint/scope-manager@npm:8.31.0"
dependencies:
"@typescript-eslint/typescript-estree": "npm:8.25.0"
"@typescript-eslint/utils": "npm:8.25.0"
"@typescript-eslint/types": "npm:8.31.0"
"@typescript-eslint/visitor-keys": "npm:8.31.0"
checksum: 10c0/eae758a24cc578fa351b8bf0c30c50de384292c0b05a58762f9b632d65a009bd5d902d806eccb6b678cc0b09686289fb4f1fd67da7f12d59ad43ff033b35cc4f
languageName: node
linkType: hard
"@typescript-eslint/type-utils@npm:8.31.0":
version: 8.31.0
resolution: "@typescript-eslint/type-utils@npm:8.31.0"
dependencies:
"@typescript-eslint/typescript-estree": "npm:8.31.0"
"@typescript-eslint/utils": "npm:8.31.0"
debug: "npm:^4.3.4"
ts-api-utils: "npm:^2.0.1"
peerDependencies:
eslint: ^8.57.0 || ^9.0.0
typescript: ">=4.8.4 <5.8.0"
checksum: 10c0/b7477a2d239cfd337f7d28641666763cf680a43a8d377a09dc42415f715670d35fbb4e772e103dfe8cd620c377e66bce740106bb3983ee65a739c28fab7325d1
typescript: ">=4.8.4 <5.9.0"
checksum: 10c0/f6938413a583430468b259f6823bb2ab1b5cd77cd6d4e21e1803df70e329046b9579aed5bdc9bdcf4046c8091615a911ac3990859db78d00210bb867915ba37f
languageName: node
linkType: hard
@@ -4894,6 +4922,13 @@ __metadata:
languageName: node
linkType: hard
"@typescript-eslint/types@npm:8.31.0":
version: 8.31.0
resolution: "@typescript-eslint/types@npm:8.31.0"
checksum: 10c0/04130a30aac477d36d6a155399b27773457aeb9b485ef8fb56fee05725b6e36768c9fac7e4d1f073fd16988de0eb7dffc743c3f834ae907cf918cabb075e5cd8
languageName: node
linkType: hard
"@typescript-eslint/typescript-estree@npm:5.62.0":
version: 5.62.0
resolution: "@typescript-eslint/typescript-estree@npm:5.62.0"
@@ -4930,6 +4965,24 @@ __metadata:
languageName: node
linkType: hard
"@typescript-eslint/typescript-estree@npm:8.31.0":
version: 8.31.0
resolution: "@typescript-eslint/typescript-estree@npm:8.31.0"
dependencies:
"@typescript-eslint/types": "npm:8.31.0"
"@typescript-eslint/visitor-keys": "npm:8.31.0"
debug: "npm:^4.3.4"
fast-glob: "npm:^3.3.2"
is-glob: "npm:^4.0.3"
minimatch: "npm:^9.0.4"
semver: "npm:^7.6.0"
ts-api-utils: "npm:^2.0.1"
peerDependencies:
typescript: ">=4.8.4 <5.9.0"
checksum: 10c0/0ec074b2b9c49f80fafea716aa0cc4b05085e65730a3ef7c7d2d39db1657a40b38abe83f22bbe15ac4f6fdf576692f47d2d057347242e6cef5be81d070f55064
languageName: node
linkType: hard
"@typescript-eslint/utils@npm:5.62.0":
version: 5.62.0
resolution: "@typescript-eslint/utils@npm:5.62.0"
@@ -4948,7 +5001,22 @@ __metadata:
languageName: node
linkType: hard
"@typescript-eslint/utils@npm:8.25.0, @typescript-eslint/utils@npm:^8.13.0":
"@typescript-eslint/utils@npm:8.31.0":
version: 8.31.0
resolution: "@typescript-eslint/utils@npm:8.31.0"
dependencies:
"@eslint-community/eslint-utils": "npm:^4.4.0"
"@typescript-eslint/scope-manager": "npm:8.31.0"
"@typescript-eslint/types": "npm:8.31.0"
"@typescript-eslint/typescript-estree": "npm:8.31.0"
peerDependencies:
eslint: ^8.57.0 || ^9.0.0
typescript: ">=4.8.4 <5.9.0"
checksum: 10c0/1fd4f62e16a44a5be2de501f70ba4b2d64479e014370bde7bbc6de6897cf1699766a8b7be4deb9b0328e74c2b4171839336ede4e3c60fec6ac8378b623a75275
languageName: node
linkType: hard
"@typescript-eslint/utils@npm:^8.13.0":
version: 8.25.0
resolution: "@typescript-eslint/utils@npm:8.25.0"
dependencies:
@@ -4983,6 +5051,16 @@ __metadata:
languageName: node
linkType: hard
"@typescript-eslint/visitor-keys@npm:8.31.0":
version: 8.31.0
resolution: "@typescript-eslint/visitor-keys@npm:8.31.0"
dependencies:
"@typescript-eslint/types": "npm:8.31.0"
eslint-visitor-keys: "npm:^4.2.0"
checksum: 10c0/e41e2a9e287d11232cda6126377d1df4de69c6e9dc2a14058819cff15280ec654a3877886a6806728196f299766cfbb0b299eb021c2ce168eb15dff5eb07b51b
languageName: node
linkType: hard
"@ungap/structured-clone@npm:^1.2.0":
version: 1.2.0
resolution: "@ungap/structured-clone@npm:1.2.0"
@@ -6853,6 +6931,8 @@ __metadata:
"@livekit/components-core": "npm:^0.12.0"
"@livekit/components-react": "npm:^2.0.0"
"@livekit/protocol": "npm:^1.33.0"
"@livekit/track-processors": "npm:^0.5.5"
"@mediapipe/tasks-vision": "npm:^0.10.18"
"@opentelemetry/api": "npm:^1.4.0"
"@opentelemetry/core": "npm:^1.25.1"
"@opentelemetry/exporter-trace-otlp-http": "npm:^0.57.0"
@@ -6883,8 +6963,8 @@ __metadata:
"@types/react-dom": "npm:^18.3.0"
"@types/sdp-transform": "npm:^2.4.5"
"@types/uuid": "npm:10"
"@typescript-eslint/eslint-plugin": "npm:^8.0.0"
"@typescript-eslint/parser": "npm:^8.0.0"
"@typescript-eslint/eslint-plugin": "npm:^8.31.0"
"@typescript-eslint/parser": "npm:^8.31.0"
"@use-gesture/react": "npm:^10.2.11"
"@vector-im/compound-design-tokens": "npm:^3.0.0"
"@vector-im/compound-web": "npm:^7.2.0"
@@ -6931,7 +7011,7 @@ __metadata:
react-use-measure: "npm:^2.1.1"
rxjs: "npm:^7.8.1"
sass: "npm:^1.42.1"
typescript: "npm:^5.1.6"
typescript: "npm:^5.8.3"
typescript-eslint-language-service: "npm:^5.0.5"
unique-names-generator: "npm:^4.6.0"
vaul: "npm:^1.0.0"
@@ -12672,13 +12752,13 @@ __metadata:
languageName: node
linkType: hard
"typescript@npm:^5.1.6":
version: 5.7.3
resolution: "typescript@npm:5.7.3"
"typescript@npm:^5.8.3":
version: 5.8.3
resolution: "typescript@npm:5.8.3"
bin:
tsc: bin/tsc
tsserver: bin/tsserver
checksum: 10c0/b7580d716cf1824736cc6e628ab4cd8b51877408ba2be0869d2866da35ef8366dd6ae9eb9d0851470a39be17cbd61df1126f9e211d8799d764ea7431d5435afa
checksum: 10c0/5f8bb01196e542e64d44db3d16ee0e4063ce4f3e3966df6005f2588e86d91c03e1fb131c2581baf0fb65ee79669eea6e161cd448178986587e9f6844446dbb48
languageName: node
linkType: hard
@@ -12692,13 +12772,13 @@ __metadata:
languageName: node
linkType: hard
"typescript@patch:typescript@npm%3A^5.1.6#optional!builtin<compat/typescript>":
version: 5.7.3
resolution: "typescript@patch:typescript@npm%3A5.7.3#optional!builtin<compat/typescript>::version=5.7.3&hash=5786d5"
"typescript@patch:typescript@npm%3A^5.8.3#optional!builtin<compat/typescript>":
version: 5.8.3
resolution: "typescript@patch:typescript@npm%3A5.8.3#optional!builtin<compat/typescript>::version=5.8.3&hash=5786d5"
bin:
tsc: bin/tsc
tsserver: bin/tsserver
checksum: 10c0/6fd7e0ed3bf23a81246878c613423730c40e8bdbfec4c6e4d7bf1b847cbb39076e56ad5f50aa9d7ebd89877999abaee216002d3f2818885e41c907caaa192cc4
checksum: 10c0/39117e346ff8ebd87ae1510b3a77d5d92dae5a89bde588c747d25da5c146603a99c8ee588c7ef80faaf123d89ed46f6dbd918d534d641083177d5fac38b8a1cb
languageName: node
linkType: hard