diff --git a/src/livekit/MediaDevicesContext.tsx b/src/livekit/MediaDevicesContext.tsx
index 4977f021..d19840ef 100644
--- a/src/livekit/MediaDevicesContext.tsx
+++ b/src/livekit/MediaDevicesContext.tsx
@@ -115,12 +115,12 @@ function useMediaDevice(
}, [available, preferredId, select, alwaysDefault]);
}
-const deviceStub: MediaDevice = {
+export const deviceStub: MediaDevice = {
available: [],
selectedId: undefined,
select: () => {},
};
-const devicesStub: MediaDevices = {
+export const devicesStub: MediaDevices = {
audioInput: deviceStub,
audioOutput: deviceStub,
videoInput: deviceStub,
diff --git a/src/room/CallEventAudioRenderer.test.tsx b/src/room/CallEventAudioRenderer.test.tsx
index 21d94a54..bb609323 100644
--- a/src/room/CallEventAudioRenderer.test.tsx
+++ b/src/room/CallEventAudioRenderer.test.tsx
@@ -6,12 +6,11 @@ Please see LICENSE in the repository root for full details.
*/
import { render } from "@testing-library/react";
-import { beforeEach, expect, test } from "vitest";
+import { beforeEach, expect, test, vitest } from "vitest";
import { MatrixClient } from "matrix-js-sdk/src/client";
import { ConnectionState, RemoteParticipant, Room } from "livekit-client";
import { of } from "rxjs";
-import { afterEach } from "node:test";
-import { act } from "react";
+import { act, ReactNode } from "react";
import { soundEffectVolumeSetting } from "../settings/settings";
import {
@@ -20,7 +19,6 @@ import {
mockLocalParticipant,
mockMatrixRoom,
mockMatrixRoomMember,
- mockMediaPlay,
mockRemoteParticipant,
} from "../utils/test";
import { E2eeType } from "../e2ee/e2eeType";
@@ -29,6 +27,17 @@ import {
CallEventAudioRenderer,
MAX_PARTICIPANT_COUNT_FOR_SOUND,
} from "./CallEventAudioRenderer";
+import {
+ prefetchSounds,
+ // We're using this from our mock, but it doesn't exist in the actual module.
+ //@ts-ignore
+ playSound,
+} from "../useAudioContext";
+import {
+ MockRoom,
+ MockRTCSession,
+ TestReactionsWrapper,
+} from "../utils/testReactions";
const alice = mockMatrixRoomMember({ userId: "@alice:example.org" });
const bob = mockMatrixRoomMember({ userId: "@bob:example.org" });
@@ -37,20 +46,39 @@ const bobId = `${bob.userId}:BBBB`;
const localParticipant = mockLocalParticipant({ identity: "" });
const aliceParticipant = mockRemoteParticipant({ identity: aliceId });
const bobParticipant = mockRemoteParticipant({ identity: bobId });
-
-const originalPlayFn = window.HTMLMediaElement.prototype.play;
-
-const enterSound = "http://localhost:3000/src/sound/join_call.ogg";
const leaveSound = "http://localhost:3000/src/sound/left_call.ogg";
beforeEach(() => {
soundEffectVolumeSetting.setValue(soundEffectVolumeSetting.defaultValue);
});
-afterEach(() => {
- window.HTMLMediaElement.prototype.play = originalPlayFn;
+vitest.mock("../useAudioContext", async () => {
+ const playSound = vitest.fn();
+ return {
+ prefetchSounds: vitest.fn().mockReturnValueOnce({
+ sound: new ArrayBuffer(0),
+ }),
+ playSound,
+ useAudioContext: () => ({
+ playSound,
+ }),
+ };
});
+function TestComponent({
+ room,
+ vm,
+}: {
+ room: MockRoom;
+ vm: CallViewModel;
+}): ReactNode {
+ return (
+
+
+
+ );
+}
+
/**
* We don't want to play a sound when loading the call state
* because typically this occurs in two stages. We first join
@@ -59,21 +87,15 @@ afterEach(() => {
* a noise every time.
*/
test("does NOT play a sound when entering a call", () => {
- const audioIsPlaying: string[] = mockMediaPlay();
const members = new Map([alice, bob].map((p) => [p.userId, p]));
const remoteParticipants = of([aliceParticipant]);
const liveKitRoom = mockLivekitRoom(
{ localParticipant },
{ remoteParticipants },
);
-
+ const room = new MockRoom(alice.userId);
const vm = new CallViewModel(
- mockMatrixRoom({
- client: {
- getUserId: () => "@carol:example.org",
- } as Partial as MatrixClient,
- getMember: (userId) => members.get(userId) ?? null,
- }),
+ room as any,
liveKitRoom,
{
kind: E2eeType.PER_PARTICIPANT,
@@ -81,13 +103,12 @@ test("does NOT play a sound when entering a call", () => {
of(ConnectionState.Connected),
);
- render();
- expect(audioIsPlaying).toHaveLength(0);
+ render();
+ expect(playSound).not.toBeCalled();
});
test("plays no sound when muted", () => {
soundEffectVolumeSetting.setValue(0);
- const audioIsPlaying: string[] = mockMediaPlay();
const members = new Map([alice, bob].map((p) => [p.userId, p]));
const remoteParticipants = of([aliceParticipant, bobParticipant]);
const liveKitRoom = mockLivekitRoom(
@@ -95,13 +116,9 @@ test("plays no sound when muted", () => {
{ remoteParticipants },
);
+ const room = new MockRoom(alice.userId);
const vm = new CallViewModel(
- mockMatrixRoom({
- client: {
- getUserId: () => "@carol:example.org",
- } as Partial as MatrixClient,
- getMember: (userId) => members.get(userId) ?? null,
- }),
+ room as any,
liveKitRoom,
{
kind: E2eeType.PER_PARTICIPANT,
@@ -109,14 +126,12 @@ test("plays no sound when muted", () => {
of(ConnectionState.Connected),
);
- render();
+ render();
// Play a sound when joining a call.
- expect(audioIsPlaying).toHaveLength(0);
+ expect(playSound).not.toBeCalled();
});
test("plays a sound when a user joins", () => {
- const audioIsPlaying: string[] = mockMediaPlay();
- const members = new Map([alice].map((p) => [p.userId, p]));
const remoteParticipants = new Map(
[aliceParticipant].map((p) => [p.identity, p]),
);
@@ -125,34 +140,25 @@ test("plays a sound when a user joins", () => {
remoteParticipants,
});
+ const room = new MockRoom(alice.userId);
const vm = new CallViewModel(
- mockMatrixRoom({
- client: {
- getUserId: () => "@carol:example.org",
- } as Partial as MatrixClient,
- getMember: (userId) => members.get(userId) ?? null,
- }),
+ room as any,
liveKitRoom as unknown as Room,
{
kind: E2eeType.PER_PARTICIPANT,
},
of(ConnectionState.Connected),
);
- render();
+ render();
act(() => {
liveKitRoom.addParticipant(bobParticipant);
});
// Play a sound when joining a call.
- expect(audioIsPlaying).toEqual([
- // Bob leaves
- enterSound,
- ]);
+ expect(playSound).toBeCalledWith("join");
});
test("plays a sound when a user leaves", () => {
- const audioIsPlaying: string[] = mockMediaPlay();
- const members = new Map([alice].map((p) => [p.userId, p]));
const remoteParticipants = new Map(
[aliceParticipant].map((p) => [p.identity, p]),
);
@@ -160,34 +166,25 @@ test("plays a sound when a user leaves", () => {
localParticipant,
remoteParticipants,
});
+ const room = new MockRoom(alice.userId);
const vm = new CallViewModel(
- mockMatrixRoom({
- client: {
- getUserId: () => "@carol:example.org",
- } as Partial as MatrixClient,
- getMember: (userId) => members.get(userId) ?? null,
- }),
+ room as any,
liveKitRoom as unknown as Room,
{
kind: E2eeType.PER_PARTICIPANT,
},
of(ConnectionState.Connected),
);
- render();
+ render();
act(() => {
liveKitRoom.removeParticipant(aliceParticipant);
});
- expect(audioIsPlaying).toEqual([
- // Alice leaves
- leaveSound,
- ]);
+ expect(playSound).toBeCalledWith("leave");
});
test("plays no sound when the participant list is more than the maximum size", () => {
- const audioIsPlaying: string[] = mockMediaPlay();
- const members = new Map([alice].map((p) => [p.userId, p]));
const remoteParticipants = new Map([
[aliceParticipant.identity, aliceParticipant],
...Array.from({ length: MAX_PARTICIPANT_COUNT_FOR_SOUND - 1 }).map<
@@ -201,25 +198,19 @@ test("plays no sound when the participant list is more than the maximum size", (
localParticipant,
remoteParticipants,
});
-
+ const room = new MockRoom(alice.userId);
const vm = new CallViewModel(
- mockMatrixRoom({
- client: {
- getUserId: () => "@carol:example.org",
- } as Partial as MatrixClient,
- getMember: (userId) => members.get(userId) ?? null,
- }),
+ room as any,
liveKitRoom as unknown as Room,
{
kind: E2eeType.PER_PARTICIPANT,
},
of(ConnectionState.Connected),
);
- render();
- expect(audioIsPlaying).toEqual([]);
+ render();
// When the count drops
act(() => {
liveKitRoom.removeParticipant(aliceParticipant);
});
- expect(audioIsPlaying).toEqual([leaveSound]);
+ expect(playSound).not.toBeCalled();
});
diff --git a/src/room/CallEventAudioRenderer.tsx b/src/room/CallEventAudioRenderer.tsx
index f814e1ab..8f4576f2 100644
--- a/src/room/CallEventAudioRenderer.tsx
+++ b/src/room/CallEventAudioRenderer.tsx
@@ -6,7 +6,7 @@ Please see LICENSE in the repository root for full details.
*/
import { ReactNode, useDeferredValue, useEffect, useMemo } from "react";
-import { debounce, filter, interval, throttle } from "rxjs";
+import { debounce, filter, interval, tap, throttle } from "rxjs";
import { CallViewModel } from "../state/CallViewModel";
import joinCallSoundMp3 from "../sound/join_call.mp3";
import joinCallSoundOgg from "../sound/join_call.ogg";
@@ -16,12 +16,12 @@ import handSoundOgg from "../sound/raise_hand.ogg?url";
import handSoundMp3 from "../sound/raise_hand.mp3?url";
import { prefetchSounds, useAudioContext } from "../useAudioContext";
import { useReactions } from "../useReactions";
+import { useLatest } from "../useLatest";
// Do not play any sounds if the participant count has exceeded this
// number.
export const MAX_PARTICIPANT_COUNT_FOR_SOUND = 8;
export const THROTTLE_SOUND_EFFECT_MS = 500;
-export const DEBOUNCE_SOUND_EFFECT_MS = 150;
const Sounds = prefetchSounds({
join: {
@@ -47,6 +47,7 @@ export function CallEventAudioRenderer({
sounds: Sounds,
latencyHint: "interactive",
});
+ const audioEngineRef = useLatest(audioEngineCtx);
const { raisedHands } = useReactions();
const raisedHandCount = useMemo(
@@ -56,16 +57,12 @@ export function CallEventAudioRenderer({
const previousRaisedHandCount = useDeferredValue(raisedHandCount);
useEffect(() => {
- if (audioEngineCtx && previousRaisedHandCount < raisedHandCount) {
- audioEngineCtx.playSound("raiseHand");
+ if (audioEngineRef.current && previousRaisedHandCount < raisedHandCount) {
+ audioEngineRef.current.playSound("raiseHand");
}
- }, [audioEngineCtx, previousRaisedHandCount, raisedHandCount]);
+ }, [audioEngineRef, previousRaisedHandCount, raisedHandCount]);
useEffect(() => {
- if (!audioEngineCtx) {
- return;
- }
-
const joinSub = vm.memberChanges
.pipe(
filter(
@@ -73,11 +70,9 @@ export function CallEventAudioRenderer({
ids.length <= MAX_PARTICIPANT_COUNT_FOR_SOUND && joined.length > 0,
),
throttle(() => interval(THROTTLE_SOUND_EFFECT_MS)),
- debounce(() => interval(DEBOUNCE_SOUND_EFFECT_MS)),
)
- .subscribe((prev) => {
- console.log("Playing join sound for", ...prev.joined, "|", prev);
- audioEngineCtx.playSound("join");
+ .subscribe(() => {
+ audioEngineRef.current?.playSound("join");
});
const leftSub = vm.memberChanges
@@ -87,17 +82,16 @@ export function CallEventAudioRenderer({
ids.length <= MAX_PARTICIPANT_COUNT_FOR_SOUND && left.length > 0,
),
throttle(() => interval(THROTTLE_SOUND_EFFECT_MS)),
- debounce(() => interval(DEBOUNCE_SOUND_EFFECT_MS)),
)
.subscribe(() => {
- audioEngineCtx.playSound("left");
+ audioEngineRef.current?.playSound("left");
});
return (): void => {
joinSub.unsubscribe();
leftSub.unsubscribe();
};
- }, [audioEngineCtx, vm]);
+ }, [audioEngineRef, vm]);
return <>>;
}
diff --git a/src/room/ReactionAudioRenderer.test.tsx b/src/room/ReactionAudioRenderer.test.tsx
index 80ae2b38..9d631d18 100644
--- a/src/room/ReactionAudioRenderer.test.tsx
+++ b/src/room/ReactionAudioRenderer.test.tsx
@@ -6,7 +6,7 @@ Please see LICENSE in the repository root for full details.
*/
import { render } from "@testing-library/react";
-import { afterAll, expect, test } from "vitest";
+import { afterAll, expect, test, vitest } from "vitest";
import { TooltipProvider } from "@vector-im/compound-web";
import { act, ReactNode } from "react";
@@ -16,12 +16,18 @@ import {
TestReactionsWrapper,
} from "../utils/testReactions";
import { ReactionsAudioRenderer } from "./ReactionAudioRenderer";
-import { GenericReaction, ReactionSet } from "../reactions";
import {
playReactionsSound,
soundEffectVolumeSetting,
} from "../settings/settings";
-import { mockMediaPlay } from "../utils/test";
+import {
+ prefetchSounds,
+ // We're using this from our mock, but it doesn't exist in the actual module.
+ //@ts-ignore
+ playSound,
+} from "../useAudioContext";
+import { GenericReaction, ReactionSet } from "../reactions";
+import { afterEach } from "node:test";
const memberUserIdAlice = "@alice:example.org";
const memberUserIdBob = "@bob:example.org";
@@ -50,11 +56,27 @@ function TestComponent({
);
}
-const originalPlayFn = window.HTMLMediaElement.prototype.play;
-afterAll(() => {
+vitest.mock("../useAudioContext", async () => {
+ const playSound = vitest.fn();
+ return {
+ prefetchSounds: vitest.fn().mockReturnValueOnce({
+ sound: new ArrayBuffer(0),
+ }),
+ playSound,
+ useAudioContext: () => ({
+ playSound,
+ }),
+ };
+});
+
+afterEach(() => {
+ vitest.clearAllMocks();
playReactionsSound.setValue(playReactionsSound.defaultValue);
soundEffectVolumeSetting.setValue(soundEffectVolumeSetting.defaultValue);
- window.HTMLMediaElement.prototype.play = originalPlayFn;
+});
+
+afterAll(() => {
+ vitest.restoreAllMocks();
});
test("preloads all audio elements", () => {
@@ -63,25 +85,11 @@ test("preloads all audio elements", () => {
new MockRoom(memberUserIdAlice),
membership,
);
- const { container } = render();
- expect(container.getElementsByTagName("audio")).toHaveLength(
- // All reactions plus the generic sound
- ReactionSet.filter((r) => r.sound).length + 1,
- );
-});
-
-test("loads no audio elements when disabled in settings", () => {
- playReactionsSound.setValue(false);
- const rtcSession = new MockRTCSession(
- new MockRoom(memberUserIdAlice),
- membership,
- );
- const { container } = render();
- expect(container.getElementsByTagName("audio")).toHaveLength(0);
+ render();
+ expect(prefetchSounds).toHaveBeenCalledOnce();
});
test("will play an audio sound when there is a reaction", () => {
- const audioIsPlaying: string[] = mockMediaPlay();
playReactionsSound.setValue(true);
const room = new MockRoom(memberUserIdAlice);
const rtcSession = new MockRTCSession(room, membership);
@@ -97,12 +105,10 @@ test("will play an audio sound when there is a reaction", () => {
act(() => {
room.testSendReaction(memberEventAlice, chosenReaction, membership);
});
- expect(audioIsPlaying).toHaveLength(1);
- expect(audioIsPlaying[0]).toContain(chosenReaction.sound?.ogg);
+ expect(playSound).toHaveBeenCalledWith(chosenReaction.name);
});
test("will play the generic audio sound when there is soundless reaction", () => {
- const audioIsPlaying: string[] = mockMediaPlay();
playReactionsSound.setValue(true);
const room = new MockRoom(memberUserIdAlice);
const rtcSession = new MockRTCSession(room, membership);
@@ -118,34 +124,10 @@ test("will play the generic audio sound when there is soundless reaction", () =>
act(() => {
room.testSendReaction(memberEventAlice, chosenReaction, membership);
});
- expect(audioIsPlaying).toHaveLength(1);
- expect(audioIsPlaying[0]).toContain(GenericReaction.sound?.ogg);
-});
-
-test("will play an audio sound with the correct volume", () => {
- playReactionsSound.setValue(true);
- soundEffectVolumeSetting.setValue(0.5);
- const room = new MockRoom(memberUserIdAlice);
- const rtcSession = new MockRTCSession(room, membership);
- const { getByTestId } = render();
-
- // Find the first reaction with a sound effect
- const chosenReaction = ReactionSet.find((r) => !!r.sound);
- if (!chosenReaction) {
- throw Error(
- "No reactions have sounds configured, this test cannot succeed",
- );
- }
- act(() => {
- room.testSendReaction(memberEventAlice, chosenReaction, membership);
- });
- expect((getByTestId(chosenReaction.name) as HTMLAudioElement).volume).toEqual(
- 0.5,
- );
+ expect(playSound).toHaveBeenCalledWith(GenericReaction.name);
});
test("will play multiple audio sounds when there are multiple different reactions", () => {
- const audioIsPlaying: string[] = mockMediaPlay();
playReactionsSound.setValue(true);
const room = new MockRoom(memberUserIdAlice);
@@ -164,7 +146,6 @@ test("will play multiple audio sounds when there are multiple different reaction
room.testSendReaction(memberEventBob, reaction2, membership);
room.testSendReaction(memberEventCharlie, reaction1, membership);
});
- expect(audioIsPlaying).toHaveLength(2);
- expect(audioIsPlaying[0]).toContain(reaction1.sound?.ogg);
- expect(audioIsPlaying[1]).toContain(reaction2.sound?.ogg);
+ expect(playSound).toHaveBeenCalledWith(reaction1.name);
+ expect(playSound).toHaveBeenCalledWith(reaction2.name);
});
diff --git a/src/room/ReactionAudioRenderer.tsx b/src/room/ReactionAudioRenderer.tsx
index 7f1ca6f7..487157f7 100644
--- a/src/room/ReactionAudioRenderer.tsx
+++ b/src/room/ReactionAudioRenderer.tsx
@@ -11,6 +11,7 @@ import { useReactions } from "../useReactions";
import { playReactionsSound, useSetting } from "../settings/settings";
import { ReactionSet } from "../reactions";
import { prefetchSounds, useAudioContext } from "../useAudioContext";
+import { useLatest } from "../useLatest";
const SoundMap = Object.fromEntries(
ReactionSet.filter((v) => v.sound !== undefined).map((v) => [
@@ -28,14 +29,11 @@ export function ReactionsAudioRenderer(): ReactNode {
sounds: Sounds,
latencyHint: "interactive",
});
+ const audioEngineRef = useLatest(audioEngineCtx);
const oldReactions = useDeferredValue(reactions);
useEffect(() => {
- if (!audioEngineCtx) {
- return;
- }
-
- if (!shouldPlay) {
+ if (!shouldPlay || !audioEngineRef.current) {
return;
}
const oldReactionSet = new Set(
@@ -48,13 +46,14 @@ export function ReactionsAudioRenderer(): ReactNode {
// Don't replay old reactions
return;
}
+ console.log("playing sound", reactionName);
if (SoundMap[reactionName]) {
- audioEngineCtx.playSound(reactionName);
+ audioEngineRef.current.playSound(reactionName);
} else {
// Fallback sounds.
- audioEngineCtx.playSound("generic");
+ audioEngineRef.current.playSound("generic");
}
}
- }, [shouldPlay, oldReactions, reactions]);
+ }, [audioEngineRef, shouldPlay, oldReactions, reactions]);
return <>>;
}
diff --git a/src/useAudioContext.test.tsx b/src/useAudioContext.test.tsx
new file mode 100644
index 00000000..99a5f725
--- /dev/null
+++ b/src/useAudioContext.test.tsx
@@ -0,0 +1,126 @@
+import { expect, test, vitest } from "vitest";
+import { useAudioContext } from "./useAudioContext";
+import { FC } from "react";
+import { render } from "@testing-library/react";
+import { deviceStub, MediaDevicesContext } from "./livekit/MediaDevicesContext";
+import { afterEach } from "node:test";
+import { soundEffectVolumeSetting } from "./settings/settings";
+
+/**
+ * Test explanation.
+ * This test suite checks that the useReactions hook appropriately reacts
+ * to new reactions, redactions and membership changesin the room. There is
+ * a large amount of test structure used to construct a mock environment.
+ */
+
+const TestComponent: FC = () => {
+ const audioCtx = useAudioContext({
+ sounds: Promise.resolve({
+ aSound: new ArrayBuffer(32),
+ }),
+ latencyHint: "balanced",
+ });
+ if (!audioCtx) {
+ return null;
+ }
+ return (
+ <>
+
+
+ >
+ );
+};
+
+class MockAudioContext {
+ static testContext: MockAudioContext;
+
+ constructor() {
+ MockAudioContext.testContext = this;
+ }
+
+ public gain = vitest.mocked(
+ {
+ connect: () => {},
+ gain: {
+ setValueAtTime: vitest.fn(),
+ },
+ },
+ true,
+ );
+
+ public setSinkId = vitest.fn().mockResolvedValue(undefined);
+ public decodeAudioData = vitest.fn().mockReturnValue(1);
+ public createBufferSource = vitest.fn().mockReturnValue(
+ vitest.mocked({
+ connect: (v: unknown) => v,
+ start: () => {},
+ }),
+ );
+ public createGain = vitest.fn().mockReturnValue(this.gain);
+ public close = vitest.fn().mockResolvedValue(undefined);
+}
+
+afterEach(() => {
+ vitest.unstubAllGlobals();
+});
+
+test("can play a single sound", async () => {
+ vitest.stubGlobal("AudioContext", MockAudioContext);
+ const { findByText } = render();
+ (await findByText("Valid sound")).click();
+ expect(
+ MockAudioContext.testContext.createBufferSource,
+ ).toHaveBeenCalledOnce();
+});
+test("will ignore sounds that are not registered", async () => {
+ vitest.stubGlobal("AudioContext", MockAudioContext);
+ const { findByText } = render();
+ (await findByText("Invalid sound")).click();
+ expect(
+ MockAudioContext.testContext.createBufferSource,
+ ).not.toHaveBeenCalled();
+});
+
+test("will use the correct device", async () => {
+ vitest.stubGlobal("AudioContext", MockAudioContext);
+ render(
+ {},
+ },
+ videoInput: deviceStub,
+ startUsingDeviceNames: () => {},
+ stopUsingDeviceNames: () => {},
+ }}
+ >
+
+ ,
+ );
+ expect(
+ MockAudioContext.testContext.createBufferSource,
+ ).not.toHaveBeenCalled();
+ expect(MockAudioContext.testContext.setSinkId).toHaveBeenCalledWith(
+ "chosen-device",
+ );
+});
+
+test("will use the correct volume", async () => {
+ vitest.stubGlobal("AudioContext", MockAudioContext);
+ soundEffectVolumeSetting.setValue(0.33);
+ const { findByText } = render();
+ (await findByText("Valid sound")).click();
+ expect(
+ MockAudioContext.testContext.gain.gain.setValueAtTime,
+ ).toHaveBeenCalledWith(0.33, 0);
+});
diff --git a/src/useAudioContext.tsx b/src/useAudioContext.tsx
index c07b6736..8e5b0e4c 100644
--- a/src/useAudioContext.tsx
+++ b/src/useAudioContext.tsx
@@ -5,6 +5,7 @@ import {
useSetting,
} from "./settings/settings";
import { useMediaDevices } from "./livekit/MediaDevicesContext";
+import { useInitial } from "./useInitial";
type SoundDefinition = { mp3?: string; ogg: string };
@@ -43,7 +44,7 @@ function getPreferredAudioFormat() {
return "mp3";
}
-type PrefetchedSounds = Promise>;
+type PrefetchedSounds = Promise>;
// We prefer to load these sounds ahead of time, so there
// is no delay on call join.
@@ -82,6 +83,12 @@ interface UseAudioContext {
playSound(soundName: S): void;
}
+/**
+ * Add an audio context which can be used to play
+ * a set of preloaded sounds.
+ * @param props
+ * @returns Either an instance that can be used to play sounds, or null if not ready.
+ */
export function useAudioContext(
props: Props,
): UseAudioContext | null {
@@ -89,6 +96,7 @@ export function useAudioContext(
const devices = useMediaDevices();
const [audioContext, setAudioContext] = useState();
const [audioBuffers, setAudioBuffers] = useState>();
+ const soundCache = useInitial(() => props.sounds);
useEffect(() => {
const ctx = new AudioContext({
@@ -101,13 +109,15 @@ export function useAudioContext(
(async () => {
const buffers: Record = {};
controller.signal.throwIfAborted();
- for (const [name, buffer] of Object.entries(await props.sounds)) {
+ for (const [name, buffer] of Object.entries(await soundCache)) {
controller.signal.throwIfAborted();
- const audioBuffer = await ctx.decodeAudioData(buffer.slice(0));
+ // Type quirk, this is *definitely* a ArrayBuffer.
+ const audioBuffer = await ctx.decodeAudioData(
+ (buffer as ArrayBuffer).slice(0),
+ );
buffers[name] = audioBuffer;
- // Store as we go.
- setAudioBuffers(buffers as Record);
}
+ setAudioBuffers(buffers as Record);
})().catch((ex) => {
logger.debug("Failed to setup audio context", ex);
});
@@ -120,7 +130,7 @@ export function useAudioContext(
});
setAudioContext(undefined);
};
- }, []);
+ }, [soundCache, props.latencyHint]);
// Update the sink ID whenever we change devices.
useEffect(() => {
diff --git a/src/utils/test.ts b/src/utils/test.ts
index 99a9264b..607e1b12 100644
--- a/src/utils/test.ts
+++ b/src/utils/test.ts
@@ -235,12 +235,3 @@ export function mockConfig(config: Partial = {}): void {
...config,
});
}
-
-export function mockMediaPlay(): string[] {
- const audioIsPlaying: string[] = [];
- window.HTMLMediaElement.prototype.play = async function (): Promise {
- audioIsPlaying.push((this.children[0] as HTMLSourceElement).src);
- return Promise.resolve();
- };
- return audioIsPlaying;
-}
diff --git a/src/utils/testReactions.tsx b/src/utils/testReactions.tsx
index 84ff217b..4e4aff5e 100644
--- a/src/utils/testReactions.tsx
+++ b/src/utils/testReactions.tsx
@@ -203,4 +203,8 @@ export class MockRoom extends EventEmitter {
});
return evt.getId()!;
}
+
+ public getMember() {
+ return undefined;
+ }
}