mirror of
https://github.com/vector-im/element-call.git
synced 2026-03-28 06:50:26 +00:00
This implements the new ringing UI by showing a placeholder tile for the participant being dialed, rather than an overlay.
165 lines
5.0 KiB
TypeScript
165 lines
5.0 KiB
TypeScript
/*
|
|
Copyright 2023, 2024 New Vector Ltd.
|
|
Copyright 2026 Element Creations Ltd.
|
|
|
|
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
|
|
Please see LICENSE in the repository root for full details.
|
|
*/
|
|
|
|
import {
|
|
BehaviorSubject,
|
|
combineLatest,
|
|
map,
|
|
type Observable,
|
|
of,
|
|
Subject,
|
|
switchMap,
|
|
} from "rxjs";
|
|
import {
|
|
observeParticipantEvents,
|
|
observeParticipantMedia,
|
|
} from "@livekit/components-core";
|
|
import { ParticipantEvent, Track } from "livekit-client";
|
|
|
|
import { type ReactionOption } from "../../reactions";
|
|
import { type Behavior } from "../Behavior";
|
|
import { type LocalUserMediaViewModel } from "./LocalUserMediaViewModel";
|
|
import {
|
|
createMemberMedia,
|
|
type MemberMediaInputs,
|
|
type BaseMemberMediaViewModel,
|
|
} from "./MemberMediaViewModel";
|
|
import { type RemoteUserMediaViewModel } from "./RemoteUserMediaViewModel";
|
|
import { type ObservableScope } from "../ObservableScope";
|
|
import { showConnectionStats } from "../../settings/settings";
|
|
import { observeRtpStreamStats$ } from "./observeRtpStreamStats";
|
|
import { videoFit$, videoSizeFromParticipant$ } from "../../utils/videoFit.ts";
|
|
|
|
/**
|
|
* A participant's user media (i.e. their microphone and camera feed).
|
|
*/
|
|
export type UserMediaViewModel =
|
|
| LocalUserMediaViewModel
|
|
| RemoteUserMediaViewModel;
|
|
|
|
export interface BaseUserMediaViewModel extends BaseMemberMediaViewModel {
|
|
type: "user";
|
|
speaking$: Behavior<boolean>;
|
|
audioEnabled$: Behavior<boolean>;
|
|
videoEnabled$: Behavior<boolean>;
|
|
videoFit$: Behavior<"cover" | "contain">;
|
|
toggleCropVideo: () => void;
|
|
/**
|
|
* The expected identity of the LiveKit participant. Exposed for debugging.
|
|
*/
|
|
rtcBackendIdentity: string;
|
|
handRaised$: Behavior<Date | null>;
|
|
reaction$: Behavior<ReactionOption | null>;
|
|
audioStreamStats$: Observable<
|
|
RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats | undefined
|
|
>;
|
|
videoStreamStats$: Observable<
|
|
RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats | undefined
|
|
>;
|
|
/**
|
|
* Set the target dimensions of the HTML element (final dimension after anim).
|
|
* This can be used to determine the best video fit (fit to frame / keep ratio).
|
|
* @param targetWidth - The target width of the HTML element displaying the video.
|
|
* @param targetHeight - The target height of the HTML element displaying the video.
|
|
*/
|
|
setTargetDimensions: (targetWidth: number, targetHeight: number) => void;
|
|
}
|
|
|
|
export interface BaseUserMediaInputs extends Omit<
|
|
MemberMediaInputs,
|
|
"audioSource" | "videoSource"
|
|
> {
|
|
rtcBackendIdentity: string;
|
|
handRaised$: Behavior<Date | null>;
|
|
reaction$: Behavior<ReactionOption | null>;
|
|
statsType: "inbound-rtp" | "outbound-rtp";
|
|
}
|
|
|
|
export function createBaseUserMedia(
|
|
scope: ObservableScope,
|
|
{
|
|
rtcBackendIdentity,
|
|
handRaised$,
|
|
reaction$,
|
|
statsType,
|
|
...inputs
|
|
}: BaseUserMediaInputs,
|
|
): BaseUserMediaViewModel {
|
|
const { participant$ } = inputs;
|
|
const media$ = scope.behavior(
|
|
participant$.pipe(
|
|
switchMap((p) => (p && observeParticipantMedia(p)) ?? of(undefined)),
|
|
),
|
|
);
|
|
const toggleCropVideo$ = new Subject<void>();
|
|
|
|
// The target size of the video element, used to determine the best video fit.
|
|
// The target size is the final size of the HTML element after any animations have completed.
|
|
const targetSize$ = new BehaviorSubject<
|
|
{ width: number; height: number } | undefined
|
|
>(undefined);
|
|
|
|
return {
|
|
...createMemberMedia(scope, {
|
|
...inputs,
|
|
audioSource: Track.Source.Microphone,
|
|
videoSource: Track.Source.Camera,
|
|
}),
|
|
type: "user",
|
|
speaking$: scope.behavior(
|
|
participant$.pipe(
|
|
switchMap((p) =>
|
|
p
|
|
? observeParticipantEvents(
|
|
p,
|
|
ParticipantEvent.IsSpeakingChanged,
|
|
).pipe(map((p) => p.isSpeaking))
|
|
: of(false),
|
|
),
|
|
),
|
|
),
|
|
audioEnabled$: scope.behavior(
|
|
media$.pipe(map((m) => m?.microphoneTrack?.isMuted === false)),
|
|
),
|
|
videoEnabled$: scope.behavior(
|
|
media$.pipe(map((m) => m?.cameraTrack?.isMuted === false)),
|
|
),
|
|
videoFit$: videoFit$(
|
|
scope,
|
|
videoSizeFromParticipant$(participant$),
|
|
targetSize$,
|
|
),
|
|
toggleCropVideo: () => toggleCropVideo$.next(),
|
|
rtcBackendIdentity,
|
|
handRaised$,
|
|
reaction$,
|
|
audioStreamStats$: combineLatest([
|
|
participant$,
|
|
showConnectionStats.value$,
|
|
]).pipe(
|
|
switchMap(([p, showConnectionStats]) => {
|
|
//
|
|
if (!p || !showConnectionStats) return of(undefined);
|
|
return observeRtpStreamStats$(p, Track.Source.Microphone, statsType);
|
|
}),
|
|
),
|
|
videoStreamStats$: combineLatest([
|
|
participant$,
|
|
showConnectionStats.value$,
|
|
]).pipe(
|
|
switchMap(([p, showConnectionStats]) => {
|
|
if (!p || !showConnectionStats) return of(undefined);
|
|
return observeRtpStreamStats$(p, Track.Source.Camera, statsType);
|
|
}),
|
|
),
|
|
setTargetDimensions: (targetWidth: number, targetHeight: number): void => {
|
|
targetSize$.next({ width: targetWidth, height: targetHeight });
|
|
},
|
|
};
|
|
}
|