diff --git a/src/room/CallEventAudioRenderer.test.tsx b/src/room/CallEventAudioRenderer.test.tsx index 894d1d24..40b79da4 100644 --- a/src/room/CallEventAudioRenderer.test.tsx +++ b/src/room/CallEventAudioRenderer.test.tsx @@ -54,6 +54,8 @@ beforeEach(() => { playSound = vitest.fn(); (useAudioContext as MockedFunction).mockReturnValue({ playSound, + playSoundLooping: vitest.fn(), + soundDuration: {}, }); }); @@ -105,6 +107,20 @@ test("plays a sound when a user leaves", () => { expect(playSound).toBeCalledWith("left"); }); +test("does not play a sound before the call is successful", () => { + const { vm, rtcMemberships$ } = getBasicCallViewModelEnvironment( + [local, alice], + [localRtcMember], + { waitForCallPickup: true }, + ); + render(); + + act(() => { + rtcMemberships$.next([localRtcMember]); + }); + expect(playSound).not.toBeCalledWith("left"); +}); + test("plays no sound when the participant list is more than the maximum size", () => { const mockRtcMemberships: CallMembership[] = [localRtcMember]; for (let i = 0; i < MAX_PARTICIPANT_COUNT_FOR_SOUND; i++) { diff --git a/src/room/CallEventAudioRenderer.tsx b/src/room/CallEventAudioRenderer.tsx index 797501b6..23997c37 100644 --- a/src/room/CallEventAudioRenderer.tsx +++ b/src/room/CallEventAudioRenderer.tsx @@ -16,6 +16,10 @@ import handSoundOgg from "../sound/raise_hand.ogg"; import handSoundMp3 from "../sound/raise_hand.mp3"; import screenShareStartedOgg from "../sound/screen_share_started.ogg"; import screenShareStartedMp3 from "../sound/screen_share_started.mp3"; +import declineMp3 from "../sound/call_declined.mp3?url"; +import declineOgg from "../sound/call_declined.ogg?url"; +import timeoutMp3 from "../sound/call_timeout.mp3?url"; +import timeoutOgg from "../sound/call_timeout.ogg?url"; import { useAudioContext } from "../useAudioContext"; import { prefetchSounds } from "../soundUtils"; import { useLatest } from "../useLatest"; @@ -37,8 +41,18 @@ export const callEventAudioSounds = prefetchSounds({ mp3: screenShareStartedMp3, ogg: screenShareStartedOgg, }, + decline: { + mp3: declineMp3, + ogg: declineOgg, + }, + timeout: { + mp3: timeoutMp3, + ogg: timeoutOgg, + }, }); +export type CallEventSounds = keyof Awaited; + export function CallEventAudioRenderer({ vm, muted, diff --git a/src/room/GroupCallView.test.tsx b/src/room/GroupCallView.test.tsx index ff2b89d5..084c06ec 100644 --- a/src/room/GroupCallView.test.tsx +++ b/src/room/GroupCallView.test.tsx @@ -12,6 +12,7 @@ import { onTestFinished, test, vi, + vitest, } from "vitest"; import { render, waitFor, screen, act } from "@testing-library/react"; import { type MatrixClient, JoinRule, type RoomState } from "matrix-js-sdk"; @@ -97,13 +98,15 @@ beforeEach(() => { playSound = vi.fn(); (useAudioContext as MockedFunction).mockReturnValue({ playSound, + playSoundLooping: vi.fn(), + soundDuration: {}, }); // A trivial implementation of Active call to ensure we are testing GroupCallView exclusively here. (ActiveCall as MockedFunction).mockImplementation( ({ onLeave }) => { return (
- +
); }, @@ -209,6 +212,8 @@ test("GroupCallView plays a leave sound synchronously in widget mode", async () ); (useAudioContext as MockedFunction).mockReturnValue({ playSound, + playSoundLooping: vitest.fn(), + soundDuration: {}, }); const { getByText, rtcSession } = createGroupCallView( diff --git a/src/room/GroupCallView.tsx b/src/room/GroupCallView.tsx index 18ec1a6a..dbc3ea18 100644 --- a/src/room/GroupCallView.tsx +++ b/src/room/GroupCallView.tsx @@ -53,7 +53,10 @@ import { InviteModal } from "./InviteModal"; import { HeaderStyle, type UrlParams, useUrlParams } from "../UrlParams"; import { E2eeType } from "../e2ee/e2eeType"; import { useAudioContext } from "../useAudioContext"; -import { callEventAudioSounds } from "./CallEventAudioRenderer"; +import { + callEventAudioSounds, + type CallEventSounds, +} from "./CallEventAudioRenderer"; import { useLatest } from "../useLatest"; import { usePageTitle } from "../usePageTitle"; import { @@ -317,8 +320,11 @@ export const GroupCallView: FC = ({ const navigate = useNavigate(); const onLeave = useCallback( - (cause: "user" | "error" = "user"): void => { - const audioPromise = leaveSoundContext.current?.playSound("left"); + ( + cause: "user" | "error" = "user", + playSound: CallEventSounds = "left", + ): void => { + const audioPromise = leaveSoundContext.current?.playSound(playSound); // In embedded/widget mode the iFrame will be killed right after the call ended prohibiting the posthog event from getting sent, // therefore we want the event to be sent instantly without getting queued/batched. const sendInstantly = !!widget; diff --git a/src/room/InCallView.tsx b/src/room/InCallView.tsx index c1244a2a..3d7044be 100644 --- a/src/room/InCallView.tsx +++ b/src/room/InCallView.tsx @@ -95,7 +95,10 @@ import { } from "../reactions/useReactionsSender"; import { ReactionsAudioRenderer } from "./ReactionAudioRenderer"; import { ReactionsOverlay } from "./ReactionsOverlay"; -import { CallEventAudioRenderer } from "./CallEventAudioRenderer"; +import { + CallEventAudioRenderer, + type CallEventSounds, +} from "./CallEventAudioRenderer"; import { debugTileLayout as debugTileLayoutSetting, useExperimentalToDeviceTransport as useExperimentalToDeviceTransportSetting, @@ -117,11 +120,8 @@ import { Avatar, Size as AvatarSize } from "../Avatar"; import waitingStyles from "./WaitingForJoin.module.css"; import { prefetchSounds } from "../soundUtils"; import { useAudioContext } from "../useAudioContext"; -// TODO: Dont use this!!! use the correct sound -import genericSoundOgg from "../sound/reactions/generic.ogg?url"; -import genericSoundMp3 from "../sound/reactions/generic.mp3?url"; -import leftCallSoundMp3 from "../sound/left_call.mp3"; -import leftCallSoundOgg from "../sound/left_call.ogg"; +import ringtoneMp3 from "../sound/ringtone.mp3?url"; +import ringtoneOgg from "../sound/ringtone.ogg?url"; const canScreenshare = "getDisplayMedia" in (navigator.mediaDevices ?? {}); @@ -232,7 +232,7 @@ export interface InCallViewProps { livekitRoom: LivekitRoom; muteStates: MuteStates; /** Function to call when the user explicitly ends the call */ - onLeave: () => void; + onLeave: (cause: "user", soundFile?: CallEventSounds) => void; header: HeaderStyle; otelGroupCallMembership?: OTelGroupCallMembership; connState: ECConnectionState; @@ -281,14 +281,9 @@ export const InCallView: FC = ({ // Preload a waiting and decline sounds const pickupPhaseSoundCache = useInitial(async () => { return prefetchSounds({ - waiting: { mp3: genericSoundMp3, ogg: genericSoundOgg }, - decline: { mp3: leftCallSoundMp3, ogg: leftCallSoundOgg }, - // Do we want a timeout sound? + waiting: { mp3: ringtoneMp3, ogg: ringtoneOgg }, }); }); - // configure this to sth that fits to the pickup waiting sound. - // 1600 is in sync with the animation. - const PICKUP_SOUND_INTERVAL = 1600; const pickupPhaseAudio = useAudioContext({ sounds: pickupPhaseSoundCache, @@ -356,34 +351,47 @@ export const InCallView: FC = ({ const showFooter = useBehavior(vm.showFooter$); const earpieceMode = useBehavior(vm.earpieceMode$); const audioOutputSwitcher = useBehavior(vm.audioOutputSwitcher$); - useSubscription(vm.autoLeave$, onLeave); + useSubscription(vm.autoLeave$, () => onLeave("user")); + + // We need to set the proper timings on the animation based upon the sound length. + const ringDuration = pickupPhaseAudio?.soundDuration["waiting"] ?? 1; + useEffect((): (() => void) => { + // The CSS animation includes the delay, so we must double the length of the sound. + window.document.body.style.setProperty( + "--call-ring-duration-s", + `${ringDuration * 2}s`, + ); + window.document.body.style.setProperty( + "--call-ring-delay-s", + `${ringDuration}s`, + ); + // Remove properties when we unload. + return () => { + window.document.body.style.removeProperty("--call-ring-duration-s"); + window.document.body.style.removeProperty("--call-ring-delay-s"); + }; + }, [pickupPhaseAudio?.soundDuration, ringDuration]); // When we enter timeout or decline we will leave the call. useEffect((): void | (() => void) => { if (callPickupState === "timeout") { - onLeave(); + onLeave("user", "timeout"); } if (callPickupState === "decline") { - // Wait for the sound to finish before leaving - void pickupPhaseAudio - ?.playSound("decline") - .catch((e) => { - logger.error("Failed to play decline sound", e); - }) - .finally(() => { - onLeave(); - }); + onLeave("user", "decline"); } }, [callPickupState, onLeave, pickupPhaseAudio]); // When waiting for pickup, loop a waiting sound useEffect((): void | (() => void) => { - if (callPickupState !== "ringing") return; - const interval = window.setInterval(() => { - void pickupPhaseAudio?.playSound("waiting"); - }, PICKUP_SOUND_INTERVAL); - return (): void => window.clearInterval(interval); - }, [callPickupState, pickupPhaseAudio]); + if (callPickupState !== "ringing" || !pickupPhaseAudio) return; + const endSound = pickupPhaseAudio.playSoundLooping("waiting", ringDuration); + return () => { + void endSound().catch((e) => { + logger.error("Failed to stop ringing sound", e); + }); + }; + }, [callPickupState, pickupPhaseAudio, ringDuration]); // Waiting UI overlay const waitingOverlay: JSX.Element | null = useMemo(() => { @@ -823,7 +831,7 @@ export const InCallView: FC = ({ { playSound = vitest.fn(); (useAudioContext as MockedFunction).mockReturnValue({ playSound, + playSoundLooping: vitest.fn(), + soundDuration: {}, }); }); diff --git a/src/room/WaitingForJoin.module.css b/src/room/WaitingForJoin.module.css index 8f7ebeec..a598e482 100644 --- a/src/room/WaitingForJoin.module.css +++ b/src/room/WaitingForJoin.module.css @@ -25,7 +25,9 @@ inset: -12px; border-radius: 9999px; border: 12px solid rgba(255, 255, 255, 0.6); - animation: pulse 1.6s ease-out infinite; + animation: pulse var(--call-ring-duration-s) ease-out infinite; + animation-delay: 1s; + opacity: 0; } .text { @@ -36,13 +38,24 @@ 0% { transform: scale(0.95); opacity: 0.7; + transform: scale(0); + opacity: 1; } - 70% { + 35% { transform: scale(1.15); opacity: 0.15; } - 100% { + 50% { transform: scale(1.2); opacity: 0; } + 50.01% { + transform: scale(0); + } + 85% { + transform: scale(0); + } + 100% { + transform: scale(0); + } } diff --git a/src/sound/blocked.mp3 b/src/sound/blocked.mp3 deleted file mode 100644 index d37e5fc6..00000000 Binary files a/src/sound/blocked.mp3 and /dev/null differ diff --git a/src/sound/blocked.ogg b/src/sound/blocked.ogg deleted file mode 100644 index c570359c..00000000 Binary files a/src/sound/blocked.ogg and /dev/null differ diff --git a/src/sound/call_declined.mp3 b/src/sound/call_declined.mp3 new file mode 100644 index 00000000..0d0c2335 Binary files /dev/null and b/src/sound/call_declined.mp3 differ diff --git a/src/sound/call_declined.ogg b/src/sound/call_declined.ogg new file mode 100644 index 00000000..458fcd26 Binary files /dev/null and b/src/sound/call_declined.ogg differ diff --git a/src/sound/call_timeout.mp3 b/src/sound/call_timeout.mp3 new file mode 100644 index 00000000..373c0b65 Binary files /dev/null and b/src/sound/call_timeout.mp3 differ diff --git a/src/sound/call_timeout.ogg b/src/sound/call_timeout.ogg new file mode 100644 index 00000000..ca303b90 Binary files /dev/null and b/src/sound/call_timeout.ogg differ diff --git a/src/sound/ringtone.mp3 b/src/sound/ringtone.mp3 new file mode 100644 index 00000000..d4c73adb Binary files /dev/null and b/src/sound/ringtone.mp3 differ diff --git a/src/sound/ringtone.ogg b/src/sound/ringtone.ogg new file mode 100644 index 00000000..7b654d5e Binary files /dev/null and b/src/sound/ringtone.ogg differ diff --git a/src/state/CallViewModel.ts b/src/state/CallViewModel.ts index 3802d6dd..ca797aa8 100644 --- a/src/state/CallViewModel.ts +++ b/src/state/CallViewModel.ts @@ -49,6 +49,7 @@ import { race, scan, skip, + skipWhile, startWith, switchAll, switchMap, @@ -853,17 +854,6 @@ export class CallViewModel extends ViewModel { throttleTime(THROTTLE_SOUND_EFFECT_MS), ); - public readonly leaveSoundEffect$ = this.userMedia$.pipe( - pairwise(), - filter( - ([prev, current]) => - current.length <= MAX_PARTICIPANT_COUNT_FOR_SOUND && - current.length < prev.length, - ), - map(() => {}), - throttleTime(THROTTLE_SOUND_EFFECT_MS), - ); - /** * The number of participants currently in the call. * @@ -963,7 +953,9 @@ export class CallViewModel extends ViewModel { * - "success": Someone else joined. The call is in a normal state. No audiovisual feedback. * - null: EC is configured to never show any waiting for answer state. */ - public readonly callPickupState$ = this.options.waitForCallPickup + public readonly callPickupState$: Behavior< + "unknown" | "ringing" | "timeout" | "decline" | "success" | null + > = this.options.waitForCallPickup ? this.scope.behavior< "unknown" | "ringing" | "timeout" | "decline" | "success" >( @@ -983,6 +975,24 @@ export class CallViewModel extends ViewModel { ) : constant(null); + public readonly leaveSoundEffect$ = combineLatest([ + this.callPickupState$, + this.userMedia$, + ]).pipe( + // Until the call is successful, do not play a leave sound. + // If callPickupState$ is null, then we always play the sound as it will not conflict with a decline sound. + skipWhile(([c]) => c !== null && c !== "success"), + map(([, userMedia]) => userMedia), + pairwise(), + filter( + ([prev, current]) => + current.length <= MAX_PARTICIPANT_COUNT_FOR_SOUND && + current.length < prev.length, + ), + map(() => {}), + throttleTime(THROTTLE_SOUND_EFFECT_MS), + ); + /** * List of MediaItems that we want to display, that are of type ScreenShare */ diff --git a/src/useAudioContext.tsx b/src/useAudioContext.tsx index 1b00e82b..59334dda 100644 --- a/src/useAudioContext.tsx +++ b/src/useAudioContext.tsx @@ -32,6 +32,8 @@ async function playSound( buffer: AudioBuffer, volume: number, stereoPan: number, + delayS = 0, + abort?: AbortController, ): Promise { const gain = ctx.createGain(); gain.gain.setValueAtTime(volume, 0); @@ -39,13 +41,62 @@ async function playSound( pan.pan.setValueAtTime(stereoPan, 0); const src = ctx.createBufferSource(); src.buffer = buffer; - src.connect(gain).connect(pan).connect(ctx.destination); + abort?.signal.addEventListener("abort", () => { + src.disconnect(); + }); const p = new Promise((r) => src.addEventListener("ended", () => r())); + src.connect(gain).connect(pan).connect(ctx.destination); controls.setPlaybackStarted(); - src.start(); + src.start(ctx.currentTime + delayS); return p; } +/** + * Play a sound though a given AudioContext, looping until stopped. Will take + * care of connecting the correct buffer and gating + * through gain. + * @param volume The volume to play at. + * @param ctx The context to play through. + * @param buffer The buffer to play. + * @returns A function used to end the sound. This function will return a promise when the sound has stopped. + */ +function playSoundLooping( + ctx: AudioContext, + buffer: AudioBuffer, + volume: number, + stereoPan: number, + delayS?: number, +): () => Promise { + if (delayS === 0) { + throw Error("Looping sounds must have a delay"); + } + + // Our audio loop + let lastSoundPromise: Promise; + let nextSoundPromise: Promise; + let ac: AbortController | undefined; + void (async (): Promise => { + ac = new AbortController(); + // Play a sound immediately + lastSoundPromise = Promise.resolve(); + do { + // Queue up the next sound. + nextSoundPromise = playSound(ctx, buffer, volume, stereoPan, delayS, ac); + // Await the previous sound. + await lastSoundPromise; + // Swap the promises over, and loop round to play the next sound. + lastSoundPromise = nextSoundPromise; + } while (!ac.signal.aborted); + })(); + + return async () => { + ac?.abort(); + // Wait for sounds to finish. + await lastSoundPromise; + await nextSoundPromise; + }; +} + interface Props { /** * The sounds to play. If no sounds should be played then @@ -57,8 +108,13 @@ interface Props { muted?: boolean; } -interface UseAudioContext { +interface UseAudioContext { playSound(soundName: S): Promise; + playSoundLooping(soundName: S, delayS?: number): () => Promise; + /** + * Map of sound name to duration in seconds. + */ + soundDuration: Record; } /** @@ -146,5 +202,23 @@ export function useAudioContext( earpiecePan, ); }, + playSoundLooping: (name, delayS: number): (() => Promise) => { + if (!audioBuffers[name]) { + throw Error(`Tried to play a sound that wasn't buffered (${name})`); + } + return playSoundLooping( + audioContext, + audioBuffers[name], + soundEffectVolume * earpieceVolume, + earpiecePan, + delayS, + ); + }, + soundDuration: Object.fromEntries( + Object.entries(audioBuffers).map(([k, v]) => [ + k, + (v as AudioBuffer).duration, + ]), + ), }; } diff --git a/src/utils/test-viewmodel.ts b/src/utils/test-viewmodel.ts index e5558ae2..09044e3f 100644 --- a/src/utils/test-viewmodel.ts +++ b/src/utils/test-viewmodel.ts @@ -22,7 +22,10 @@ import { } from "matrix-js-sdk"; import { E2eeType } from "../e2ee/e2eeType"; -import { CallViewModel } from "../state/CallViewModel"; +import { + CallViewModel, + type CallViewModelOptions, +} from "../state/CallViewModel"; import { mockLivekitRoom, mockMatrixRoom, @@ -122,6 +125,7 @@ export function getBasicRTCSession( export function getBasicCallViewModelEnvironment( members: RoomMember[], initialRtcMemberships: CallMembership[] = [localRtcMember, aliceRtcMember], + callViewModelOptions: Partial = {}, ): { vm: CallViewModel; rtcMemberships$: BehaviorSubject; @@ -148,6 +152,7 @@ export function getBasicCallViewModelEnvironment( mockMediaDevices({}), { encryptionSystem: { kind: E2eeType.PER_PARTICIPANT }, + ...callViewModelOptions, }, of(ConnectionState.Connected), handRaisedSubject$,