Merge pull request #3756 from element-hq/valere/auto_fit_based_on_video_ratio

Auto fit video in tile depending on orientation of the tile and the source stream
This commit is contained in:
Valere Fedronic
2026-03-09 15:31:15 +01:00
committed by GitHub
9 changed files with 450 additions and 37 deletions

View File

@@ -250,7 +250,6 @@
"video_tile": {
"always_show": "Always show",
"camera_starting": "Video loading...",
"change_fit_contain": "Fit to frame",
"collapse": "Collapse",
"expand": "Expand",
"mute_for_me": "Mute for me",

View File

@@ -27,7 +27,13 @@ interface Props<M, R extends HTMLElement> {
state: Parameters<Handler<"drag", EventTypes["drag"]>>[0],
) => void
> | null;
/**
* The width this tile will have once its animations have settled.
*/
targetWidth: number;
/**
* The width this tile will have once its animations have settled.
*/
targetHeight: number;
model: M;
Tile: ComponentType<TileProps<M, R>>;

View File

@@ -160,21 +160,6 @@ test("control a participant's screen share volume", () => {
});
});
test("toggle fit/contain for a participant's video", () => {
const vm = mockRemoteMedia(rtcMembership, {}, mockRemoteParticipant({}));
withTestScheduler(({ expectObservable, schedule }) => {
schedule("-ab|", {
a: () => vm.toggleCropVideo(),
b: () => vm.toggleCropVideo(),
});
expectObservable(vm.cropVideo$).toBe("abc", {
a: true,
b: false,
c: true,
});
});
});
test("local media remembers whether it should always be shown", () => {
const vm1 = mockLocalMedia(
rtcMembership,

View File

@@ -7,6 +7,7 @@ Please see LICENSE in the repository root for full details.
*/
import {
BehaviorSubject,
combineLatest,
map,
type Observable,
@@ -30,9 +31,9 @@ import {
} from "./MemberMediaViewModel";
import { type RemoteUserMediaViewModel } from "./RemoteUserMediaViewModel";
import { type ObservableScope } from "../ObservableScope";
import { createToggle$ } from "../../utils/observable";
import { showConnectionStats } from "../../settings/settings";
import { observeRtpStreamStats$ } from "./observeRtpStreamStats";
import { videoFit$, videoSizeFromParticipant$ } from "../../utils/videoFit.ts";
/**
* A participant's user media (i.e. their microphone and camera feed).
@@ -46,7 +47,7 @@ export interface BaseUserMediaViewModel extends MemberMediaViewModel {
speaking$: Behavior<boolean>;
audioEnabled$: Behavior<boolean>;
videoEnabled$: Behavior<boolean>;
cropVideo$: Behavior<boolean>;
videoFit$: Behavior<"cover" | "contain">;
toggleCropVideo: () => void;
/**
* The expected identity of the LiveKit participant. Exposed for debugging.
@@ -60,6 +61,13 @@ export interface BaseUserMediaViewModel extends MemberMediaViewModel {
videoStreamStats$: Observable<
RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats | undefined
>;
/**
* Set the target dimensions of the HTML element (final dimension after anim).
* This can be used to determine the best video fit (fit to frame / keep ratio).
* @param targetWidth - The target width of the HTML element displaying the video.
* @param targetHeight - The target height of the HTML element displaying the video.
*/
setTargetDimensions: (targetWidth: number, targetHeight: number) => void;
}
export interface BaseUserMediaInputs extends Omit<
@@ -90,6 +98,12 @@ export function createBaseUserMedia(
);
const toggleCropVideo$ = new Subject<void>();
// The target size of the video element, used to determine the best video fit.
// The target size is the final size of the HTML element after any animations have completed.
const targetSize$ = new BehaviorSubject<
{ width: number; height: number } | undefined
>(undefined);
return {
...createMemberMedia(scope, {
...inputs,
@@ -115,7 +129,11 @@ export function createBaseUserMedia(
videoEnabled$: scope.behavior(
media$.pipe(map((m) => m?.cameraTrack?.isMuted === false)),
),
cropVideo$: createToggle$(scope, true, toggleCropVideo$),
videoFit$: videoFit$(
scope,
videoSizeFromParticipant$(participant$),
targetSize$,
),
toggleCropVideo: () => toggleCropVideo$.next(),
rtcBackendIdentity,
handRaised$,
@@ -139,5 +157,8 @@ export function createBaseUserMedia(
return observeRtpStreamStats$(p, Track.Source.Camera, statsType);
}),
),
setTargetDimensions: (targetWidth: number, targetHeight: number): void => {
targetSize$.next({ width: targetWidth, height: targetHeight });
},
};
}

View File

@@ -67,3 +67,12 @@ export function observeInboundRtpStreamStats$(
map((x) => x as RTCInboundRtpStreamStats | undefined),
);
}
export function observeOutboundRtpStreamStats$(
participant: Participant,
source: Track.Source,
): Observable<RTCOutboundRtpStreamStats | undefined> {
return observeRtpStreamStats$(participant, source, "outbound-rtp").pipe(
map((x) => x as RTCOutboundRtpStreamStats | undefined),
);
}

View File

@@ -11,6 +11,7 @@ import {
type ReactNode,
type Ref,
useCallback,
useEffect,
useRef,
useState,
} from "react";
@@ -26,7 +27,6 @@ import {
VolumeOffIcon,
VisibilityOnIcon,
UserProfileIcon,
ExpandIcon,
VolumeOffSolidIcon,
SwitchCameraSolidIcon,
} from "@vector-im/compound-design-tokens/assets/web/icons";
@@ -87,6 +87,8 @@ const UserMediaTile: FC<UserMediaTileProps> = ({
displayName,
mxcAvatarUrl,
focusable,
targetWidth,
targetHeight,
...props
}) => {
const { toggleRaisedHand } = useReactionsSender();
@@ -103,18 +105,19 @@ const UserMediaTile: FC<UserMediaTileProps> = ({
const audioEnabled = useBehavior(vm.audioEnabled$);
const videoEnabled = useBehavior(vm.videoEnabled$);
const speaking = useBehavior(vm.speaking$);
const cropVideo = useBehavior(vm.cropVideo$);
const onSelectFitContain = useCallback(
(e: Event) => {
e.preventDefault();
vm.toggleCropVideo();
},
[vm],
);
const videoFit = useBehavior(vm.videoFit$);
const rtcBackendIdentity = vm.rtcBackendIdentity;
const handRaised = useBehavior(vm.handRaised$);
const reaction = useBehavior(vm.reaction$);
// Whenever bounds change, inform the viewModel
useEffect(() => {
if (targetWidth > 0 && targetHeight > 0) {
vm.setTargetDimensions(targetWidth, targetHeight);
}
}, [targetWidth, targetHeight, vm]);
const AudioIcon = playbackMuted
? VolumeOffSolidIcon
: audioEnabled
@@ -130,12 +133,10 @@ const UserMediaTile: FC<UserMediaTileProps> = ({
const menu = (
<>
{menuStart}
<ToggleMenuItem
Icon={ExpandIcon}
label={t("video_tile.change_fit_contain")}
checked={cropVideo}
onSelect={onSelectFitContain}
/>
{/*
No additional menu item (used to be the manual fit to frame.
Placeholder for future menu items that should be placed here.
*/}
{menuEnd}
</>
);
@@ -154,7 +155,7 @@ const UserMediaTile: FC<UserMediaTileProps> = ({
unencryptedWarning={unencryptedWarning}
encryptionStatus={encryptionStatus}
videoEnabled={videoEnabled}
videoFit={cropVideo ? "cover" : "contain"}
videoFit={videoFit}
className={classNames(className, styles.tile, {
[styles.speaking]: showSpeaking,
[styles.handRaised]: !showSpeaking && handRaised,
@@ -200,6 +201,8 @@ const UserMediaTile: FC<UserMediaTileProps> = ({
audioStreamStats={audioStreamStats}
videoStreamStats={videoStreamStats}
rtcBackendIdentity={rtcBackendIdentity}
targetWidth={targetWidth}
targetHeight={targetHeight}
{...props}
/>
);

View File

@@ -111,12 +111,12 @@ const SpotlightUserMediaItem: FC<SpotlightUserMediaItemProps> = ({
vm,
...props
}) => {
const cropVideo = useBehavior(vm.cropVideo$);
const videoFit = useBehavior(vm.videoFit$);
const videoEnabled = useBehavior(vm.videoEnabled$);
const baseProps: SpotlightUserMediaItemBaseProps &
RefAttributes<HTMLDivElement> = {
videoFit: cropVideo ? "cover" : "contain",
videoFit,
videoEnabled,
...props,
};
@@ -158,7 +158,13 @@ const SpotlightRemoteScreenShareItem: FC<
interface SpotlightItemProps {
ref?: Ref<HTMLDivElement>;
vm: MediaViewModel;
/**
* The width this tile will have once its animations have settled.
*/
targetWidth: number;
/**
* The height this tile will have once its animations have settled.
*/
targetHeight: number;
focusable: boolean;
intersectionObserver$: Observable<IntersectionObserver>;
@@ -180,6 +186,16 @@ const SpotlightItem: FC<SpotlightItemProps> = ({
"aria-hidden": ariaHidden,
}) => {
const ourRef = useRef<HTMLDivElement | null>(null);
// Whenever target bounds change, inform the viewModel
useEffect(() => {
if (targetWidth > 0 && targetHeight > 0) {
if (vm.type != "screen share") {
vm.setTargetDimensions(targetWidth, targetHeight);
}
}
}, [targetWidth, targetHeight, vm]);
const ref = useMergedRefs(ourRef, theirRef);
const focusUrl = useBehavior(vm.focusUrl$);
const displayName = useBehavior(vm.displayName$);

263
src/utils/videoFit.test.ts Normal file
View File

@@ -0,0 +1,263 @@
/*
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { describe, expect, test, vi } from "vitest";
import {
LocalTrack,
type LocalTrackPublication,
type RemoteTrackPublication,
Track,
} from "livekit-client";
import { ObservableScope } from "../state/ObservableScope";
import { videoFit$, videoSizeFromParticipant$ } from "./videoFit";
import { constant } from "../state/Behavior";
import {
flushPromises,
mockLocalParticipant,
mockRemoteParticipant,
} from "./test";
describe("videoFit$ defaults", () => {
test.each([
{
videoSize: { width: 1920, height: 1080 },
tileSize: undefined,
},
{
videoSize: { width: 1080, height: 1920 },
tileSize: undefined,
},
{
videoSize: undefined,
tileSize: { width: 1920, height: 1080 },
},
{
videoSize: undefined,
tileSize: { width: 1080, height: 1920 },
},
])(
"videoFit$ returns `cover` when videoSize is $videoSize and tileSize is $tileSize",
({ videoSize, tileSize }) => {
const scope = new ObservableScope();
const videoSize$ = constant(videoSize);
const tileSize$ = constant(tileSize);
const fit = videoFit$(scope, videoSize$, tileSize$);
expect(fit.value).toBe("cover");
},
);
});
const VIDEO_480_L = { width: 640, height: 480 };
const VIDEO_720_L = { width: 1280, height: 720 };
const VIDEO_1080_L = { width: 1920, height: 1080 };
// Some sizes from real world testing, which don't match the standard video sizes exactly
const TILE_SIZE_1_L = { width: 180, height: 135 };
const TILE_SIZE_3_P = { width: 379, height: 542 };
const TILE_SIZE_4_L = { width: 957, height: 542 };
// This is the size of an iPhone Xr in portrait mode
const TILE_SIZE_5_P = { width: 414, height: 896 };
export function invertSize(size: { width: number; height: number }): {
width: number;
height: number;
} {
return {
width: size.height,
height: size.width,
};
}
test.each([
{
videoSize: VIDEO_480_L,
tileSize: TILE_SIZE_1_L,
expected: "cover",
},
{
videoSize: invertSize(VIDEO_480_L),
tileSize: TILE_SIZE_1_L,
expected: "contain",
},
{
videoSize: VIDEO_720_L,
tileSize: TILE_SIZE_4_L,
expected: "cover",
},
{
videoSize: invertSize(VIDEO_720_L),
tileSize: TILE_SIZE_4_L,
expected: "contain",
},
{
videoSize: invertSize(VIDEO_1080_L),
tileSize: TILE_SIZE_3_P,
expected: "cover",
},
{
videoSize: VIDEO_1080_L,
tileSize: TILE_SIZE_5_P,
expected: "contain",
},
{
videoSize: invertSize(VIDEO_1080_L),
tileSize: TILE_SIZE_5_P,
expected: "cover",
},
{
// square video
videoSize: { width: 400, height: 400 },
tileSize: VIDEO_480_L,
expected: "contain",
},
{
// Should default to cover if the initial size is 0:0.
// Or else it will cause a flash of "contain" mode until the real size is loaded, which can be jarring.
videoSize: VIDEO_480_L,
tileSize: { width: 0, height: 0 },
expected: "cover",
},
{
videoSize: { width: 0, height: 0 },
tileSize: VIDEO_480_L,
expected: "cover",
},
])(
"videoFit$ returns $expected when videoSize is $videoSize and tileSize is $tileSize",
({ videoSize, tileSize, expected }) => {
const scope = new ObservableScope();
const videoSize$ = constant(videoSize);
const tileSize$ = constant(tileSize);
const fit = videoFit$(scope, videoSize$, tileSize$);
expect(fit.value).toBe(expected);
},
);
describe("extracting video size from participant stats", () => {
function createMockRtpStats(
isInbound: boolean,
props: Partial<RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats> = {},
): RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats {
const baseStats = {
id: "mock-stats-id",
timestamp: Date.now(),
type: isInbound ? "inbound-rtp" : "outbound-rtp",
kind: "video",
...props,
};
return baseStats as RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats;
}
test("get stats for local user", async () => {
const localParticipant = mockLocalParticipant({
identity: "@local:example.org:AAAAAA",
});
const mockReport: RTCStatsReport = new Map([
[
"OT01V639885149",
createMockRtpStats(false, {
frameWidth: 1280,
frameHeight: 720,
}),
],
]);
const track = {
source: Track.Source.Camera,
getRTCStatsReport: vi
.fn()
.mockImplementation(async () => Promise.resolve(mockReport)),
} as Partial<LocalTrack> as LocalTrack;
// Set up the prototype chain (there is an instanceof check in getRTCStatsReport)
Object.setPrototypeOf(track, LocalTrack.prototype);
localParticipant.getTrackPublication = vi
.fn()
.mockImplementation((source: Track.Source) => {
if (source === Track.Source.Camera) {
return {
track,
} as unknown as LocalTrackPublication;
} else {
return undefined;
}
});
const videoDimensions$ = videoSizeFromParticipant$(
constant(localParticipant),
);
const publishedDimensions: { width: number; height: number }[] = [];
videoDimensions$.subscribe((dimensions) => {
if (dimensions) publishedDimensions.push(dimensions);
});
await flushPromises();
const dimension = publishedDimensions.pop();
expect(dimension).toEqual({ width: 1280, height: 720 });
});
test("get stats for remote user", async () => {
// vi.useFakeTimers()
const remoteParticipant = mockRemoteParticipant({
identity: "@bob:example.org:AAAAAA",
});
const mockReport: RTCStatsReport = new Map([
[
"OT01V639885149",
createMockRtpStats(true, {
frameWidth: 480,
frameHeight: 640,
}),
],
]);
const track = {
source: Track.Source.Camera,
getRTCStatsReport: vi
.fn()
.mockImplementation(async () => Promise.resolve(mockReport)),
} as Partial<LocalTrack> as LocalTrack;
// Set up the prototype chain (there is an instanceof check in getRTCStatsReport)
Object.setPrototypeOf(track, LocalTrack.prototype);
remoteParticipant.getTrackPublication = vi
.fn()
.mockImplementation((source: Track.Source) => {
if (source === Track.Source.Camera) {
return {
track,
} as unknown as RemoteTrackPublication;
} else {
return undefined;
}
});
const videoDimensions$ = videoSizeFromParticipant$(
constant(remoteParticipant),
);
const publishedDimensions: { width: number; height: number }[] = [];
videoDimensions$.subscribe((dimensions) => {
if (dimensions) publishedDimensions.push(dimensions);
});
await flushPromises();
const dimension = publishedDimensions.pop();
expect(dimension).toEqual({ width: 480, height: 640 });
});
});

111
src/utils/videoFit.ts Normal file
View File

@@ -0,0 +1,111 @@
/*
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { combineLatest, map, type Observable, of, switchMap } from "rxjs";
import {
type LocalParticipant,
type RemoteParticipant,
Track,
} from "livekit-client";
import { type ObservableScope } from "../state/ObservableScope.ts";
import { type Behavior } from "../state/Behavior.ts";
import {
observeInboundRtpStreamStats$,
observeOutboundRtpStreamStats$,
} from "../state/media/observeRtpStreamStats";
type Size = {
width: number;
height: number;
};
/**
* Computes the appropriate video fit mode ("cover" or "contain") based on the aspect ratios of the video and the tile.
* - If the video and tile have the same orientation (both landscape or both portrait), we use "cover" to fill the tile, even if it means cropping.
* - If the video and tile have different orientations, we use "contain" to ensure the entire video is visible, even if it means letterboxing (black bars).
* @param scope - the ObservableScope to create the Behavior in
* @param videoSize$ - an Observable of the video size (width and height) or undefined if the size is not yet known (no data yet received).
* @param tileSize$ - an Observable of the tile size (width and height) or undefined if the size is not yet known (not yet rendered).
*/
export function videoFit$(
scope: ObservableScope,
videoSize$: Observable<Size | undefined>,
tileSize$: Observable<Size | undefined>,
): Behavior<"cover" | "contain"> {
const fit$ = combineLatest([videoSize$, tileSize$]).pipe(
map(([videoSize, tileSize]) => {
if (!videoSize || !tileSize) {
// If we don't have the sizes, default to cover to avoid black bars.
// This is a reasonable default as it will ensure the video fills the tile, even if it means cropping.
return "cover";
}
if (
videoSize.width === 0 ||
videoSize.height === 0 ||
tileSize.width === 0 ||
tileSize.height === 0
) {
// If we have invalid sizes (e.g. width or height is 0), default to cover to avoid black bars.
return "cover";
}
const videoAspectRatio = videoSize.width / videoSize.height;
const tileAspectRatio = tileSize.width / tileSize.height;
// If video is landscape (ratio > 1) and tile is portrait (ratio < 1) or vice versa,
// we want to use "contain" (fit) mode to avoid excessive cropping
const videoIsLandscape = videoAspectRatio > 1;
const tileIsLandscape = tileAspectRatio > 1;
// If the orientations are the same, use the cover mode (Preserves the aspect ratio, and the image fills the container.)
// If they're not the same orientation, use the contain mode (Preserves the aspect ratio, but the image is letterboxed - black bars- to fit within the container.)
return videoIsLandscape === tileIsLandscape ? "cover" : "contain";
}),
);
return scope.behavior(fit$, "cover");
}
/**
* Helper function to get the video size from a participant.
* It observes the participant's video track stats and extracts the frame width and height.
* @param participant$ - an Observable of a LocalParticipant or RemoteParticipant, or null if no participant is selected.
* @returns an Observable of the video size (width and height) or undefined if the size cannot be determined.
*/
export function videoSizeFromParticipant$(
participant$: Observable<LocalParticipant | RemoteParticipant | null>,
): Observable<{ width: number; height: number } | undefined> {
return participant$
.pipe(
// If we have a participant, observe their video track stats. If not, return undefined.
switchMap((p) => {
if (!p) return of(undefined);
if (p.isLocal) {
return observeOutboundRtpStreamStats$(p, Track.Source.Camera);
} else {
return observeInboundRtpStreamStats$(p, Track.Source.Camera);
}
}),
)
.pipe(
// Extract the frame width and height from the stats. If we don't have valid stats, return undefined.
map((stats) => {
if (!stats) return undefined;
if (
// For video tracks, frameWidth and frameHeight should be numbers. If they're not, we can't determine the size.
typeof stats.frameWidth !== "number" ||
typeof stats.frameHeight !== "number"
) {
return undefined;
}
return {
width: stats.frameWidth,
height: stats.frameHeight,
};
}),
);
}