feat: video auto fit based on video stream size

This commit is contained in:
Valere
2026-02-25 15:47:25 +01:00
parent 4a0e89730d
commit 1de8d93b4b
6 changed files with 441 additions and 45 deletions

251
src/utils/videoFit.test.ts Normal file
View File

@@ -0,0 +1,251 @@
/*
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { describe, expect, test, vi } from "vitest";
import {
LocalTrack,
type LocalTrackPublication,
type RemoteTrackPublication,
Track,
} from "livekit-client";
import { ObservableScope } from "../state/ObservableScope";
import { videoFit$, videoSizeFromParticipant$ } from "./videoFit";
import { constant } from "../state/Behavior";
import {
flushPromises,
mockLocalParticipant,
mockRemoteParticipant,
} from "./test";
describe("videoFit$ defaults", () => {
test.each([
{
videoSize: { width: 1920, height: 1080 },
tileSize: undefined,
},
{
videoSize: { width: 1080, height: 1920 },
tileSize: undefined,
},
{
videoSize: undefined,
tileSize: { width: 1920, height: 1080 },
},
{
videoSize: undefined,
tileSize: { width: 1080, height: 1920 },
},
])(
"videoFit$ returns `cover` when videoSize is $videoSize and tileSize is $tileSize",
({ videoSize, tileSize }) => {
const scope = new ObservableScope();
const videoSize$ = constant(videoSize);
const tileSize$ = constant(tileSize);
const fit = videoFit$(scope, videoSize$, tileSize$);
expect(fit.value).toBe("cover");
},
);
});
const VIDEO_480_L = { width: 640, height: 480 };
const VIDEO_720_L = { width: 1280, height: 720 };
const VIDEO_1080_L = { width: 1920, height: 1080 };
// Some sizes from real world testing, which don't match the standard video sizes exactly
const TILE_SIZE_1_L = { width: 180, height: 135 };
const TILE_SIZE_3_P = { width: 379, height: 542 };
const TILE_SIZE_4_L = { width: 957, height: 542 };
// This is the size of an iPhone Xr in portrait mode
const TILE_SIZE_5_P = { width: 414, height: 896 };
export function invertSize(size: { width: number; height: number }): {
width: number;
height: number;
} {
return {
width: size.height,
height: size.width,
};
}
test.each([
{
videoSize: VIDEO_480_L,
tileSize: TILE_SIZE_1_L,
expected: "cover",
},
{
videoSize: invertSize(VIDEO_480_L),
tileSize: TILE_SIZE_1_L,
expected: "contain",
},
{
videoSize: VIDEO_720_L,
tileSize: TILE_SIZE_4_L,
expected: "cover",
},
{
videoSize: invertSize(VIDEO_720_L),
tileSize: TILE_SIZE_4_L,
expected: "contain",
},
{
videoSize: invertSize(VIDEO_1080_L),
tileSize: TILE_SIZE_3_P,
expected: "cover",
},
{
videoSize: VIDEO_1080_L,
tileSize: TILE_SIZE_5_P,
expected: "contain",
},
{
videoSize: invertSize(VIDEO_1080_L),
tileSize: TILE_SIZE_5_P,
expected: "cover",
},
{
// square video
videoSize: { width: 400, height: 400 },
tileSize: VIDEO_480_L,
expected: "contain",
},
])(
"videoFit$ returns $expected when videoSize is $videoSize and tileSize is $tileSize",
({ videoSize, tileSize, expected }) => {
const scope = new ObservableScope();
const videoSize$ = constant(videoSize);
const tileSize$ = constant(tileSize);
const fit = videoFit$(scope, videoSize$, tileSize$);
expect(fit.value).toBe(expected);
},
);
describe("extracting video size from participant stats", () => {
function createMockRtpStats(
isInbound: boolean,
props: Partial<RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats> = {},
): RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats {
const baseStats = {
id: "mock-stats-id",
timestamp: Date.now(),
type: isInbound ? "inbound-rtp" : "outbound-rtp",
kind: "video",
...props,
};
return baseStats as RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats;
}
test("get stats for local user", async () => {
const localParticipant = mockLocalParticipant({
identity: "@local:example.org:AAAAAA",
});
const mockReport: RTCStatsReport = new Map([
[
"OT01V639885149",
createMockRtpStats(false, {
frameWidth: 1280,
frameHeight: 720,
}),
],
]);
const track = {
source: Track.Source.Camera,
getRTCStatsReport: vi
.fn()
.mockImplementation(async () => Promise.resolve(mockReport)),
} as Partial<LocalTrack> as LocalTrack;
// Set up the prototype chain (there is an instanceof check in getRTCStatsReport)
Object.setPrototypeOf(track, LocalTrack.prototype);
localParticipant.getTrackPublication = vi
.fn()
.mockImplementation((source: Track.Source) => {
if (source === Track.Source.Camera) {
return {
track,
} as unknown as LocalTrackPublication;
} else {
return undefined;
}
});
const videoDimensions$ = videoSizeFromParticipant$(
constant(localParticipant),
);
const publishedDimensions: { width: number; height: number }[] = [];
videoDimensions$.subscribe((dimensions) => {
if (dimensions) publishedDimensions.push(dimensions);
});
await flushPromises();
const dimension = publishedDimensions.pop();
expect(dimension).toEqual({ width: 1280, height: 720 });
});
test("get stats for remote user", async () => {
// vi.useFakeTimers()
const remoteParticipant = mockRemoteParticipant({
identity: "@bob:example.org:AAAAAA",
});
const mockReport: RTCStatsReport = new Map([
[
"OT01V639885149",
createMockRtpStats(true, {
frameWidth: 480,
frameHeight: 640,
}),
],
]);
const track = {
source: Track.Source.Camera,
getRTCStatsReport: vi
.fn()
.mockImplementation(async () => Promise.resolve(mockReport)),
} as Partial<LocalTrack> as LocalTrack;
// Set up the prototype chain (there is an instanceof check in getRTCStatsReport)
Object.setPrototypeOf(track, LocalTrack.prototype);
remoteParticipant.getTrackPublication = vi
.fn()
.mockImplementation((source: Track.Source) => {
if (source === Track.Source.Camera) {
return {
track,
} as unknown as RemoteTrackPublication;
} else {
return undefined;
}
});
const videoDimensions$ = videoSizeFromParticipant$(
constant(remoteParticipant),
);
const publishedDimensions: { width: number; height: number }[] = [];
videoDimensions$.subscribe((dimensions) => {
if (dimensions) publishedDimensions.push(dimensions);
});
await flushPromises();
const dimension = publishedDimensions.pop();
expect(dimension).toEqual({ width: 480, height: 640 });
});
});

94
src/utils/videoFit.ts Normal file
View File

@@ -0,0 +1,94 @@
/*
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { combineLatest, map, type Observable, of, switchMap } from "rxjs";
import {
type LocalParticipant,
type RemoteParticipant,
Track,
} from "livekit-client";
import { type ObservableScope } from "../state/ObservableScope.ts";
import { type Behavior } from "../state/Behavior.ts";
import {
observeInboundRtpStreamStats$,
observeOutboundRtpStreamStats$,
} from "../state/MediaViewModel.ts";
type Size = {
width: number;
height: number;
};
export function videoFit$(
scope: ObservableScope,
videoSize$: Observable<Size | undefined>,
tileSize$: Observable<Size | undefined>,
): Behavior<"cover" | "contain"> {
const fit$ = combineLatest([videoSize$, tileSize$]).pipe(
map(([videoSize, tileSize]) => {
if (!videoSize || !tileSize) {
// If we don't have the sizes, default to cover to avoid black bars.
// This is a reasonable default as it will ensure the video fills the tile, even if it means cropping.
return "cover";
}
const videoAspectRatio = videoSize.width / videoSize.height;
const tileAspectRatio = tileSize.width / tileSize.height;
// If video is landscape (ratio > 1) and tile is portrait (ratio < 1) or vice versa,
// we want to use "contain" (fit) mode to avoid excessive cropping
const videoIsLandscape = videoAspectRatio > 1;
const tileIsLandscape = tileAspectRatio > 1;
// If the orientations are the same, use the cover mode (Preserves the aspect ratio, and the image fills the container.)
// If they're not the same orientation, use the contain mode (Preserves the aspect ratio, but the image is letterboxed - black bars- to fit within the container.)
return videoIsLandscape === tileIsLandscape ? "cover" : "contain";
}),
);
return scope.behavior(fit$, "cover");
}
/**
* Helper function to get the video size from a participant.
* It observes the participant's video track stats and extracts the frame width and height.
* @param participant$ - an Observable of a LocalParticipant or RemoteParticipant, or null if no participant is selected.
* @returns an Observable of the video size (width and height) or undefined if the size cannot be determined.
*/
export function videoSizeFromParticipant$(
participant$: Observable<LocalParticipant | RemoteParticipant | null>,
): Observable<{ width: number; height: number } | undefined> {
return participant$
.pipe(
// If we have a participant, observe their video track stats. If not, return undefined.
switchMap((p) => {
if (!p) return of(undefined);
if (p.isLocal) {
return observeOutboundRtpStreamStats$(p, Track.Source.Camera);
} else {
return observeInboundRtpStreamStats$(p, Track.Source.Camera);
}
}),
)
.pipe(
// Extract the frame width and height from the stats. If we don't have valid stats, return undefined.
map((stats) => {
if (!stats) return undefined;
if (
// For video tracks, frameWidth and frameHeight should be numbers. If they're not, we can't determine the size.
typeof stats.frameWidth !== "number" ||
typeof stats.frameHeight !== "number"
) {
return undefined;
}
return {
width: stats.frameWidth,
height: stats.frameHeight,
};
}),
);
}