mirror of
https://github.com/vector-im/element-call.git
synced 2026-01-18 02:32:27 +00:00
Add sounds for ringing (#3490)
* add wait for pickup overlay Signed-off-by: Timo K <toger5@hotmail.de> * refactor and leave logic Signed-off-by: Timo K <toger5@hotmail.de> * recursive play sound logic Signed-off-by: Timo K <toger5@hotmail.de> * review Signed-off-by: Timo K <toger5@hotmail.de> * text color Signed-off-by: Timo K <toger5@hotmail.de> * overlay styling and interval fixes Signed-off-by: Timo K <toger5@hotmail.de> * fix permissions and styling Signed-off-by: Timo K <toger5@hotmail.de> * fix always getting pickup sound Signed-off-by: Timo K <toger5@hotmail.de> * Add sound effects for declined,timeout and ringtone * better ringtone * Integrate sounds * Ensure leave sound does not play * Remove unused blocked sound * fix test * Improve tests * Loop ring sound inside Audio context for better perf. * lint * better ringtone * Update to delay ringtone logic. * lint + fix test * Tidy up ring sync and add comments. * lint * Refactor onLeave to take a sound so we don't need to repeat the sound * fix import --------- Signed-off-by: Timo K <toger5@hotmail.de> Co-authored-by: Timo K <toger5@hotmail.de>
This commit is contained in:
@@ -54,6 +54,8 @@ beforeEach(() => {
|
||||
playSound = vitest.fn();
|
||||
(useAudioContext as MockedFunction<typeof useAudioContext>).mockReturnValue({
|
||||
playSound,
|
||||
playSoundLooping: vitest.fn(),
|
||||
soundDuration: {},
|
||||
});
|
||||
});
|
||||
|
||||
@@ -105,6 +107,20 @@ test("plays a sound when a user leaves", () => {
|
||||
expect(playSound).toBeCalledWith("left");
|
||||
});
|
||||
|
||||
test("does not play a sound before the call is successful", () => {
|
||||
const { vm, rtcMemberships$ } = getBasicCallViewModelEnvironment(
|
||||
[local, alice],
|
||||
[localRtcMember],
|
||||
{ waitForCallPickup: true },
|
||||
);
|
||||
render(<CallEventAudioRenderer vm={vm} />);
|
||||
|
||||
act(() => {
|
||||
rtcMemberships$.next([localRtcMember]);
|
||||
});
|
||||
expect(playSound).not.toBeCalledWith("left");
|
||||
});
|
||||
|
||||
test("plays no sound when the participant list is more than the maximum size", () => {
|
||||
const mockRtcMemberships: CallMembership[] = [localRtcMember];
|
||||
for (let i = 0; i < MAX_PARTICIPANT_COUNT_FOR_SOUND; i++) {
|
||||
|
||||
@@ -16,6 +16,10 @@ import handSoundOgg from "../sound/raise_hand.ogg";
|
||||
import handSoundMp3 from "../sound/raise_hand.mp3";
|
||||
import screenShareStartedOgg from "../sound/screen_share_started.ogg";
|
||||
import screenShareStartedMp3 from "../sound/screen_share_started.mp3";
|
||||
import declineMp3 from "../sound/call_declined.mp3?url";
|
||||
import declineOgg from "../sound/call_declined.ogg?url";
|
||||
import timeoutMp3 from "../sound/call_timeout.mp3?url";
|
||||
import timeoutOgg from "../sound/call_timeout.ogg?url";
|
||||
import { useAudioContext } from "../useAudioContext";
|
||||
import { prefetchSounds } from "../soundUtils";
|
||||
import { useLatest } from "../useLatest";
|
||||
@@ -37,8 +41,18 @@ export const callEventAudioSounds = prefetchSounds({
|
||||
mp3: screenShareStartedMp3,
|
||||
ogg: screenShareStartedOgg,
|
||||
},
|
||||
decline: {
|
||||
mp3: declineMp3,
|
||||
ogg: declineOgg,
|
||||
},
|
||||
timeout: {
|
||||
mp3: timeoutMp3,
|
||||
ogg: timeoutOgg,
|
||||
},
|
||||
});
|
||||
|
||||
export type CallEventSounds = keyof Awaited<typeof callEventAudioSounds>;
|
||||
|
||||
export function CallEventAudioRenderer({
|
||||
vm,
|
||||
muted,
|
||||
|
||||
@@ -12,6 +12,7 @@ import {
|
||||
onTestFinished,
|
||||
test,
|
||||
vi,
|
||||
vitest,
|
||||
} from "vitest";
|
||||
import { render, waitFor, screen, act } from "@testing-library/react";
|
||||
import { type MatrixClient, JoinRule, type RoomState } from "matrix-js-sdk";
|
||||
@@ -97,13 +98,15 @@ beforeEach(() => {
|
||||
playSound = vi.fn();
|
||||
(useAudioContext as MockedFunction<typeof useAudioContext>).mockReturnValue({
|
||||
playSound,
|
||||
playSoundLooping: vi.fn(),
|
||||
soundDuration: {},
|
||||
});
|
||||
// A trivial implementation of Active call to ensure we are testing GroupCallView exclusively here.
|
||||
(ActiveCall as MockedFunction<typeof ActiveCall>).mockImplementation(
|
||||
({ onLeave }) => {
|
||||
return (
|
||||
<div>
|
||||
<button onClick={() => onLeave()}>Leave</button>
|
||||
<button onClick={() => onLeave("user")}>Leave</button>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
@@ -209,6 +212,8 @@ test("GroupCallView plays a leave sound synchronously in widget mode", async ()
|
||||
);
|
||||
(useAudioContext as MockedFunction<typeof useAudioContext>).mockReturnValue({
|
||||
playSound,
|
||||
playSoundLooping: vitest.fn(),
|
||||
soundDuration: {},
|
||||
});
|
||||
|
||||
const { getByText, rtcSession } = createGroupCallView(
|
||||
|
||||
@@ -53,7 +53,10 @@ import { InviteModal } from "./InviteModal";
|
||||
import { HeaderStyle, type UrlParams, useUrlParams } from "../UrlParams";
|
||||
import { E2eeType } from "../e2ee/e2eeType";
|
||||
import { useAudioContext } from "../useAudioContext";
|
||||
import { callEventAudioSounds } from "./CallEventAudioRenderer";
|
||||
import {
|
||||
callEventAudioSounds,
|
||||
type CallEventSounds,
|
||||
} from "./CallEventAudioRenderer";
|
||||
import { useLatest } from "../useLatest";
|
||||
import { usePageTitle } from "../usePageTitle";
|
||||
import {
|
||||
@@ -317,8 +320,11 @@ export const GroupCallView: FC<Props> = ({
|
||||
const navigate = useNavigate();
|
||||
|
||||
const onLeave = useCallback(
|
||||
(cause: "user" | "error" = "user"): void => {
|
||||
const audioPromise = leaveSoundContext.current?.playSound("left");
|
||||
(
|
||||
cause: "user" | "error" = "user",
|
||||
playSound: CallEventSounds = "left",
|
||||
): void => {
|
||||
const audioPromise = leaveSoundContext.current?.playSound(playSound);
|
||||
// In embedded/widget mode the iFrame will be killed right after the call ended prohibiting the posthog event from getting sent,
|
||||
// therefore we want the event to be sent instantly without getting queued/batched.
|
||||
const sendInstantly = !!widget;
|
||||
|
||||
@@ -95,7 +95,10 @@ import {
|
||||
} from "../reactions/useReactionsSender";
|
||||
import { ReactionsAudioRenderer } from "./ReactionAudioRenderer";
|
||||
import { ReactionsOverlay } from "./ReactionsOverlay";
|
||||
import { CallEventAudioRenderer } from "./CallEventAudioRenderer";
|
||||
import {
|
||||
CallEventAudioRenderer,
|
||||
type CallEventSounds,
|
||||
} from "./CallEventAudioRenderer";
|
||||
import {
|
||||
debugTileLayout as debugTileLayoutSetting,
|
||||
useExperimentalToDeviceTransport as useExperimentalToDeviceTransportSetting,
|
||||
@@ -117,11 +120,8 @@ import { Avatar, Size as AvatarSize } from "../Avatar";
|
||||
import waitingStyles from "./WaitingForJoin.module.css";
|
||||
import { prefetchSounds } from "../soundUtils";
|
||||
import { useAudioContext } from "../useAudioContext";
|
||||
// TODO: Dont use this!!! use the correct sound
|
||||
import genericSoundOgg from "../sound/reactions/generic.ogg?url";
|
||||
import genericSoundMp3 from "../sound/reactions/generic.mp3?url";
|
||||
import leftCallSoundMp3 from "../sound/left_call.mp3";
|
||||
import leftCallSoundOgg from "../sound/left_call.ogg";
|
||||
import ringtoneMp3 from "../sound/ringtone.mp3?url";
|
||||
import ringtoneOgg from "../sound/ringtone.ogg?url";
|
||||
|
||||
const canScreenshare = "getDisplayMedia" in (navigator.mediaDevices ?? {});
|
||||
|
||||
@@ -232,7 +232,7 @@ export interface InCallViewProps {
|
||||
livekitRoom: LivekitRoom;
|
||||
muteStates: MuteStates;
|
||||
/** Function to call when the user explicitly ends the call */
|
||||
onLeave: () => void;
|
||||
onLeave: (cause: "user", soundFile?: CallEventSounds) => void;
|
||||
header: HeaderStyle;
|
||||
otelGroupCallMembership?: OTelGroupCallMembership;
|
||||
connState: ECConnectionState;
|
||||
@@ -281,14 +281,9 @@ export const InCallView: FC<InCallViewProps> = ({
|
||||
// Preload a waiting and decline sounds
|
||||
const pickupPhaseSoundCache = useInitial(async () => {
|
||||
return prefetchSounds({
|
||||
waiting: { mp3: genericSoundMp3, ogg: genericSoundOgg },
|
||||
decline: { mp3: leftCallSoundMp3, ogg: leftCallSoundOgg },
|
||||
// Do we want a timeout sound?
|
||||
waiting: { mp3: ringtoneMp3, ogg: ringtoneOgg },
|
||||
});
|
||||
});
|
||||
// configure this to sth that fits to the pickup waiting sound.
|
||||
// 1600 is in sync with the animation.
|
||||
const PICKUP_SOUND_INTERVAL = 1600;
|
||||
|
||||
const pickupPhaseAudio = useAudioContext({
|
||||
sounds: pickupPhaseSoundCache,
|
||||
@@ -356,34 +351,47 @@ export const InCallView: FC<InCallViewProps> = ({
|
||||
const showFooter = useBehavior(vm.showFooter$);
|
||||
const earpieceMode = useBehavior(vm.earpieceMode$);
|
||||
const audioOutputSwitcher = useBehavior(vm.audioOutputSwitcher$);
|
||||
useSubscription(vm.autoLeave$, onLeave);
|
||||
useSubscription(vm.autoLeave$, () => onLeave("user"));
|
||||
|
||||
// We need to set the proper timings on the animation based upon the sound length.
|
||||
const ringDuration = pickupPhaseAudio?.soundDuration["waiting"] ?? 1;
|
||||
useEffect((): (() => void) => {
|
||||
// The CSS animation includes the delay, so we must double the length of the sound.
|
||||
window.document.body.style.setProperty(
|
||||
"--call-ring-duration-s",
|
||||
`${ringDuration * 2}s`,
|
||||
);
|
||||
window.document.body.style.setProperty(
|
||||
"--call-ring-delay-s",
|
||||
`${ringDuration}s`,
|
||||
);
|
||||
// Remove properties when we unload.
|
||||
return () => {
|
||||
window.document.body.style.removeProperty("--call-ring-duration-s");
|
||||
window.document.body.style.removeProperty("--call-ring-delay-s");
|
||||
};
|
||||
}, [pickupPhaseAudio?.soundDuration, ringDuration]);
|
||||
|
||||
// When we enter timeout or decline we will leave the call.
|
||||
useEffect((): void | (() => void) => {
|
||||
if (callPickupState === "timeout") {
|
||||
onLeave();
|
||||
onLeave("user", "timeout");
|
||||
}
|
||||
if (callPickupState === "decline") {
|
||||
// Wait for the sound to finish before leaving
|
||||
void pickupPhaseAudio
|
||||
?.playSound("decline")
|
||||
.catch((e) => {
|
||||
logger.error("Failed to play decline sound", e);
|
||||
})
|
||||
.finally(() => {
|
||||
onLeave();
|
||||
});
|
||||
onLeave("user", "decline");
|
||||
}
|
||||
}, [callPickupState, onLeave, pickupPhaseAudio]);
|
||||
|
||||
// When waiting for pickup, loop a waiting sound
|
||||
useEffect((): void | (() => void) => {
|
||||
if (callPickupState !== "ringing") return;
|
||||
const interval = window.setInterval(() => {
|
||||
void pickupPhaseAudio?.playSound("waiting");
|
||||
}, PICKUP_SOUND_INTERVAL);
|
||||
return (): void => window.clearInterval(interval);
|
||||
}, [callPickupState, pickupPhaseAudio]);
|
||||
if (callPickupState !== "ringing" || !pickupPhaseAudio) return;
|
||||
const endSound = pickupPhaseAudio.playSoundLooping("waiting", ringDuration);
|
||||
return () => {
|
||||
void endSound().catch((e) => {
|
||||
logger.error("Failed to stop ringing sound", e);
|
||||
});
|
||||
};
|
||||
}, [callPickupState, pickupPhaseAudio, ringDuration]);
|
||||
|
||||
// Waiting UI overlay
|
||||
const waitingOverlay: JSX.Element | null = useMemo(() => {
|
||||
@@ -823,7 +831,7 @@ export const InCallView: FC<InCallViewProps> = ({
|
||||
<EndCallButton
|
||||
key="end_call"
|
||||
onClick={function (): void {
|
||||
onLeave();
|
||||
onLeave("user");
|
||||
}}
|
||||
onTouchEnd={onControlsTouchEnd}
|
||||
data-testid="incall_leave"
|
||||
|
||||
@@ -69,6 +69,8 @@ beforeEach(() => {
|
||||
playSound = vitest.fn();
|
||||
(useAudioContext as MockedFunction<typeof useAudioContext>).mockReturnValue({
|
||||
playSound,
|
||||
playSoundLooping: vitest.fn(),
|
||||
soundDuration: {},
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -25,7 +25,9 @@
|
||||
inset: -12px;
|
||||
border-radius: 9999px;
|
||||
border: 12px solid rgba(255, 255, 255, 0.6);
|
||||
animation: pulse 1.6s ease-out infinite;
|
||||
animation: pulse var(--call-ring-duration-s) ease-out infinite;
|
||||
animation-delay: 1s;
|
||||
opacity: 0;
|
||||
}
|
||||
|
||||
.text {
|
||||
@@ -36,13 +38,24 @@
|
||||
0% {
|
||||
transform: scale(0.95);
|
||||
opacity: 0.7;
|
||||
transform: scale(0);
|
||||
opacity: 1;
|
||||
}
|
||||
70% {
|
||||
35% {
|
||||
transform: scale(1.15);
|
||||
opacity: 0.15;
|
||||
}
|
||||
100% {
|
||||
50% {
|
||||
transform: scale(1.2);
|
||||
opacity: 0;
|
||||
}
|
||||
50.01% {
|
||||
transform: scale(0);
|
||||
}
|
||||
85% {
|
||||
transform: scale(0);
|
||||
}
|
||||
100% {
|
||||
transform: scale(0);
|
||||
}
|
||||
}
|
||||
|
||||
Binary file not shown.
Binary file not shown.
BIN
src/sound/call_declined.mp3
Normal file
BIN
src/sound/call_declined.mp3
Normal file
Binary file not shown.
BIN
src/sound/call_declined.ogg
Normal file
BIN
src/sound/call_declined.ogg
Normal file
Binary file not shown.
BIN
src/sound/call_timeout.mp3
Normal file
BIN
src/sound/call_timeout.mp3
Normal file
Binary file not shown.
BIN
src/sound/call_timeout.ogg
Normal file
BIN
src/sound/call_timeout.ogg
Normal file
Binary file not shown.
BIN
src/sound/ringtone.mp3
Normal file
BIN
src/sound/ringtone.mp3
Normal file
Binary file not shown.
BIN
src/sound/ringtone.ogg
Normal file
BIN
src/sound/ringtone.ogg
Normal file
Binary file not shown.
@@ -49,6 +49,7 @@ import {
|
||||
race,
|
||||
scan,
|
||||
skip,
|
||||
skipWhile,
|
||||
startWith,
|
||||
switchAll,
|
||||
switchMap,
|
||||
@@ -853,17 +854,6 @@ export class CallViewModel extends ViewModel {
|
||||
throttleTime(THROTTLE_SOUND_EFFECT_MS),
|
||||
);
|
||||
|
||||
public readonly leaveSoundEffect$ = this.userMedia$.pipe(
|
||||
pairwise(),
|
||||
filter(
|
||||
([prev, current]) =>
|
||||
current.length <= MAX_PARTICIPANT_COUNT_FOR_SOUND &&
|
||||
current.length < prev.length,
|
||||
),
|
||||
map(() => {}),
|
||||
throttleTime(THROTTLE_SOUND_EFFECT_MS),
|
||||
);
|
||||
|
||||
/**
|
||||
* The number of participants currently in the call.
|
||||
*
|
||||
@@ -963,7 +953,9 @@ export class CallViewModel extends ViewModel {
|
||||
* - "success": Someone else joined. The call is in a normal state. No audiovisual feedback.
|
||||
* - null: EC is configured to never show any waiting for answer state.
|
||||
*/
|
||||
public readonly callPickupState$ = this.options.waitForCallPickup
|
||||
public readonly callPickupState$: Behavior<
|
||||
"unknown" | "ringing" | "timeout" | "decline" | "success" | null
|
||||
> = this.options.waitForCallPickup
|
||||
? this.scope.behavior<
|
||||
"unknown" | "ringing" | "timeout" | "decline" | "success"
|
||||
>(
|
||||
@@ -983,6 +975,24 @@ export class CallViewModel extends ViewModel {
|
||||
)
|
||||
: constant(null);
|
||||
|
||||
public readonly leaveSoundEffect$ = combineLatest([
|
||||
this.callPickupState$,
|
||||
this.userMedia$,
|
||||
]).pipe(
|
||||
// Until the call is successful, do not play a leave sound.
|
||||
// If callPickupState$ is null, then we always play the sound as it will not conflict with a decline sound.
|
||||
skipWhile(([c]) => c !== null && c !== "success"),
|
||||
map(([, userMedia]) => userMedia),
|
||||
pairwise(),
|
||||
filter(
|
||||
([prev, current]) =>
|
||||
current.length <= MAX_PARTICIPANT_COUNT_FOR_SOUND &&
|
||||
current.length < prev.length,
|
||||
),
|
||||
map(() => {}),
|
||||
throttleTime(THROTTLE_SOUND_EFFECT_MS),
|
||||
);
|
||||
|
||||
/**
|
||||
* List of MediaItems that we want to display, that are of type ScreenShare
|
||||
*/
|
||||
|
||||
@@ -32,6 +32,8 @@ async function playSound(
|
||||
buffer: AudioBuffer,
|
||||
volume: number,
|
||||
stereoPan: number,
|
||||
delayS = 0,
|
||||
abort?: AbortController,
|
||||
): Promise<void> {
|
||||
const gain = ctx.createGain();
|
||||
gain.gain.setValueAtTime(volume, 0);
|
||||
@@ -39,13 +41,62 @@ async function playSound(
|
||||
pan.pan.setValueAtTime(stereoPan, 0);
|
||||
const src = ctx.createBufferSource();
|
||||
src.buffer = buffer;
|
||||
src.connect(gain).connect(pan).connect(ctx.destination);
|
||||
abort?.signal.addEventListener("abort", () => {
|
||||
src.disconnect();
|
||||
});
|
||||
const p = new Promise<void>((r) => src.addEventListener("ended", () => r()));
|
||||
src.connect(gain).connect(pan).connect(ctx.destination);
|
||||
controls.setPlaybackStarted();
|
||||
src.start();
|
||||
src.start(ctx.currentTime + delayS);
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* Play a sound though a given AudioContext, looping until stopped. Will take
|
||||
* care of connecting the correct buffer and gating
|
||||
* through gain.
|
||||
* @param volume The volume to play at.
|
||||
* @param ctx The context to play through.
|
||||
* @param buffer The buffer to play.
|
||||
* @returns A function used to end the sound. This function will return a promise when the sound has stopped.
|
||||
*/
|
||||
function playSoundLooping(
|
||||
ctx: AudioContext,
|
||||
buffer: AudioBuffer,
|
||||
volume: number,
|
||||
stereoPan: number,
|
||||
delayS?: number,
|
||||
): () => Promise<void> {
|
||||
if (delayS === 0) {
|
||||
throw Error("Looping sounds must have a delay");
|
||||
}
|
||||
|
||||
// Our audio loop
|
||||
let lastSoundPromise: Promise<void>;
|
||||
let nextSoundPromise: Promise<void>;
|
||||
let ac: AbortController | undefined;
|
||||
void (async (): Promise<void> => {
|
||||
ac = new AbortController();
|
||||
// Play a sound immediately
|
||||
lastSoundPromise = Promise.resolve();
|
||||
do {
|
||||
// Queue up the next sound.
|
||||
nextSoundPromise = playSound(ctx, buffer, volume, stereoPan, delayS, ac);
|
||||
// Await the previous sound.
|
||||
await lastSoundPromise;
|
||||
// Swap the promises over, and loop round to play the next sound.
|
||||
lastSoundPromise = nextSoundPromise;
|
||||
} while (!ac.signal.aborted);
|
||||
})();
|
||||
|
||||
return async () => {
|
||||
ac?.abort();
|
||||
// Wait for sounds to finish.
|
||||
await lastSoundPromise;
|
||||
await nextSoundPromise;
|
||||
};
|
||||
}
|
||||
|
||||
interface Props<S extends string> {
|
||||
/**
|
||||
* The sounds to play. If no sounds should be played then
|
||||
@@ -57,8 +108,13 @@ interface Props<S extends string> {
|
||||
muted?: boolean;
|
||||
}
|
||||
|
||||
interface UseAudioContext<S> {
|
||||
interface UseAudioContext<S extends string> {
|
||||
playSound(soundName: S): Promise<void>;
|
||||
playSoundLooping(soundName: S, delayS?: number): () => Promise<void>;
|
||||
/**
|
||||
* Map of sound name to duration in seconds.
|
||||
*/
|
||||
soundDuration: Record<string, number>;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -146,5 +202,23 @@ export function useAudioContext<S extends string>(
|
||||
earpiecePan,
|
||||
);
|
||||
},
|
||||
playSoundLooping: (name, delayS: number): (() => Promise<void>) => {
|
||||
if (!audioBuffers[name]) {
|
||||
throw Error(`Tried to play a sound that wasn't buffered (${name})`);
|
||||
}
|
||||
return playSoundLooping(
|
||||
audioContext,
|
||||
audioBuffers[name],
|
||||
soundEffectVolume * earpieceVolume,
|
||||
earpiecePan,
|
||||
delayS,
|
||||
);
|
||||
},
|
||||
soundDuration: Object.fromEntries(
|
||||
Object.entries(audioBuffers).map(([k, v]) => [
|
||||
k,
|
||||
(v as AudioBuffer).duration,
|
||||
]),
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -22,7 +22,10 @@ import {
|
||||
} from "matrix-js-sdk";
|
||||
|
||||
import { E2eeType } from "../e2ee/e2eeType";
|
||||
import { CallViewModel } from "../state/CallViewModel";
|
||||
import {
|
||||
CallViewModel,
|
||||
type CallViewModelOptions,
|
||||
} from "../state/CallViewModel";
|
||||
import {
|
||||
mockLivekitRoom,
|
||||
mockMatrixRoom,
|
||||
@@ -122,6 +125,7 @@ export function getBasicRTCSession(
|
||||
export function getBasicCallViewModelEnvironment(
|
||||
members: RoomMember[],
|
||||
initialRtcMemberships: CallMembership[] = [localRtcMember, aliceRtcMember],
|
||||
callViewModelOptions: Partial<CallViewModelOptions> = {},
|
||||
): {
|
||||
vm: CallViewModel;
|
||||
rtcMemberships$: BehaviorSubject<CallMembership[]>;
|
||||
@@ -148,6 +152,7 @@ export function getBasicCallViewModelEnvironment(
|
||||
mockMediaDevices({}),
|
||||
{
|
||||
encryptionSystem: { kind: E2eeType.PER_PARTICIPANT },
|
||||
...callViewModelOptions,
|
||||
},
|
||||
of(ConnectionState.Connected),
|
||||
handRaisedSubject$,
|
||||
|
||||
Reference in New Issue
Block a user