Start to build out tests

This commit is contained in:
Half-Shot
2024-12-03 17:00:42 +00:00
parent 4d5a543330
commit a5c3d5c29d
9 changed files with 257 additions and 161 deletions

View File

@@ -115,12 +115,12 @@ function useMediaDevice(
}, [available, preferredId, select, alwaysDefault]);
}
const deviceStub: MediaDevice = {
export const deviceStub: MediaDevice = {
available: [],
selectedId: undefined,
select: () => {},
};
const devicesStub: MediaDevices = {
export const devicesStub: MediaDevices = {
audioInput: deviceStub,
audioOutput: deviceStub,
videoInput: deviceStub,

View File

@@ -6,12 +6,11 @@ Please see LICENSE in the repository root for full details.
*/
import { render } from "@testing-library/react";
import { beforeEach, expect, test } from "vitest";
import { beforeEach, expect, test, vitest } from "vitest";
import { MatrixClient } from "matrix-js-sdk/src/client";
import { ConnectionState, RemoteParticipant, Room } from "livekit-client";
import { of } from "rxjs";
import { afterEach } from "node:test";
import { act } from "react";
import { act, ReactNode } from "react";
import { soundEffectVolumeSetting } from "../settings/settings";
import {
@@ -20,7 +19,6 @@ import {
mockLocalParticipant,
mockMatrixRoom,
mockMatrixRoomMember,
mockMediaPlay,
mockRemoteParticipant,
} from "../utils/test";
import { E2eeType } from "../e2ee/e2eeType";
@@ -29,6 +27,17 @@ import {
CallEventAudioRenderer,
MAX_PARTICIPANT_COUNT_FOR_SOUND,
} from "./CallEventAudioRenderer";
import {
prefetchSounds,
// We're using this from our mock, but it doesn't exist in the actual module.
//@ts-ignore
playSound,
} from "../useAudioContext";
import {
MockRoom,
MockRTCSession,
TestReactionsWrapper,
} from "../utils/testReactions";
const alice = mockMatrixRoomMember({ userId: "@alice:example.org" });
const bob = mockMatrixRoomMember({ userId: "@bob:example.org" });
@@ -37,20 +46,39 @@ const bobId = `${bob.userId}:BBBB`;
const localParticipant = mockLocalParticipant({ identity: "" });
const aliceParticipant = mockRemoteParticipant({ identity: aliceId });
const bobParticipant = mockRemoteParticipant({ identity: bobId });
const originalPlayFn = window.HTMLMediaElement.prototype.play;
const enterSound = "http://localhost:3000/src/sound/join_call.ogg";
const leaveSound = "http://localhost:3000/src/sound/left_call.ogg";
beforeEach(() => {
soundEffectVolumeSetting.setValue(soundEffectVolumeSetting.defaultValue);
});
afterEach(() => {
window.HTMLMediaElement.prototype.play = originalPlayFn;
vitest.mock("../useAudioContext", async () => {
const playSound = vitest.fn();
return {
prefetchSounds: vitest.fn().mockReturnValueOnce({
sound: new ArrayBuffer(0),
}),
playSound,
useAudioContext: () => ({
playSound,
}),
};
});
function TestComponent({
room,
vm,
}: {
room: MockRoom;
vm: CallViewModel;
}): ReactNode {
return (
<TestReactionsWrapper rtcSession={new MockRTCSession(room, {})}>
<CallEventAudioRenderer vm={vm} />
</TestReactionsWrapper>
);
}
/**
* We don't want to play a sound when loading the call state
* because typically this occurs in two stages. We first join
@@ -59,21 +87,15 @@ afterEach(() => {
* a noise every time.
*/
test("does NOT play a sound when entering a call", () => {
const audioIsPlaying: string[] = mockMediaPlay();
const members = new Map([alice, bob].map((p) => [p.userId, p]));
const remoteParticipants = of([aliceParticipant]);
const liveKitRoom = mockLivekitRoom(
{ localParticipant },
{ remoteParticipants },
);
const room = new MockRoom(alice.userId);
const vm = new CallViewModel(
mockMatrixRoom({
client: {
getUserId: () => "@carol:example.org",
} as Partial<MatrixClient> as MatrixClient,
getMember: (userId) => members.get(userId) ?? null,
}),
room as any,
liveKitRoom,
{
kind: E2eeType.PER_PARTICIPANT,
@@ -81,13 +103,12 @@ test("does NOT play a sound when entering a call", () => {
of(ConnectionState.Connected),
);
render(<CallEventAudioRenderer vm={vm} />);
expect(audioIsPlaying).toHaveLength(0);
render(<TestComponent room={room} vm={vm} />);
expect(playSound).not.toBeCalled();
});
test("plays no sound when muted", () => {
soundEffectVolumeSetting.setValue(0);
const audioIsPlaying: string[] = mockMediaPlay();
const members = new Map([alice, bob].map((p) => [p.userId, p]));
const remoteParticipants = of([aliceParticipant, bobParticipant]);
const liveKitRoom = mockLivekitRoom(
@@ -95,13 +116,9 @@ test("plays no sound when muted", () => {
{ remoteParticipants },
);
const room = new MockRoom(alice.userId);
const vm = new CallViewModel(
mockMatrixRoom({
client: {
getUserId: () => "@carol:example.org",
} as Partial<MatrixClient> as MatrixClient,
getMember: (userId) => members.get(userId) ?? null,
}),
room as any,
liveKitRoom,
{
kind: E2eeType.PER_PARTICIPANT,
@@ -109,14 +126,12 @@ test("plays no sound when muted", () => {
of(ConnectionState.Connected),
);
render(<CallEventAudioRenderer vm={vm} />);
render(<TestComponent room={room} vm={vm} />);
// Play a sound when joining a call.
expect(audioIsPlaying).toHaveLength(0);
expect(playSound).not.toBeCalled();
});
test("plays a sound when a user joins", () => {
const audioIsPlaying: string[] = mockMediaPlay();
const members = new Map([alice].map((p) => [p.userId, p]));
const remoteParticipants = new Map(
[aliceParticipant].map((p) => [p.identity, p]),
);
@@ -125,34 +140,25 @@ test("plays a sound when a user joins", () => {
remoteParticipants,
});
const room = new MockRoom(alice.userId);
const vm = new CallViewModel(
mockMatrixRoom({
client: {
getUserId: () => "@carol:example.org",
} as Partial<MatrixClient> as MatrixClient,
getMember: (userId) => members.get(userId) ?? null,
}),
room as any,
liveKitRoom as unknown as Room,
{
kind: E2eeType.PER_PARTICIPANT,
},
of(ConnectionState.Connected),
);
render(<CallEventAudioRenderer vm={vm} />);
render(<TestComponent room={room} vm={vm} />);
act(() => {
liveKitRoom.addParticipant(bobParticipant);
});
// Play a sound when joining a call.
expect(audioIsPlaying).toEqual([
// Bob leaves
enterSound,
]);
expect(playSound).toBeCalledWith("join");
});
test("plays a sound when a user leaves", () => {
const audioIsPlaying: string[] = mockMediaPlay();
const members = new Map([alice].map((p) => [p.userId, p]));
const remoteParticipants = new Map(
[aliceParticipant].map((p) => [p.identity, p]),
);
@@ -160,34 +166,25 @@ test("plays a sound when a user leaves", () => {
localParticipant,
remoteParticipants,
});
const room = new MockRoom(alice.userId);
const vm = new CallViewModel(
mockMatrixRoom({
client: {
getUserId: () => "@carol:example.org",
} as Partial<MatrixClient> as MatrixClient,
getMember: (userId) => members.get(userId) ?? null,
}),
room as any,
liveKitRoom as unknown as Room,
{
kind: E2eeType.PER_PARTICIPANT,
},
of(ConnectionState.Connected),
);
render(<CallEventAudioRenderer vm={vm} />);
render(<TestComponent room={room} vm={vm} />);
act(() => {
liveKitRoom.removeParticipant(aliceParticipant);
});
expect(audioIsPlaying).toEqual([
// Alice leaves
leaveSound,
]);
expect(playSound).toBeCalledWith("leave");
});
test("plays no sound when the participant list is more than the maximum size", () => {
const audioIsPlaying: string[] = mockMediaPlay();
const members = new Map([alice].map((p) => [p.userId, p]));
const remoteParticipants = new Map<string, RemoteParticipant>([
[aliceParticipant.identity, aliceParticipant],
...Array.from({ length: MAX_PARTICIPANT_COUNT_FOR_SOUND - 1 }).map<
@@ -201,25 +198,19 @@ test("plays no sound when the participant list is more than the maximum size", (
localParticipant,
remoteParticipants,
});
const room = new MockRoom(alice.userId);
const vm = new CallViewModel(
mockMatrixRoom({
client: {
getUserId: () => "@carol:example.org",
} as Partial<MatrixClient> as MatrixClient,
getMember: (userId) => members.get(userId) ?? null,
}),
room as any,
liveKitRoom as unknown as Room,
{
kind: E2eeType.PER_PARTICIPANT,
},
of(ConnectionState.Connected),
);
render(<CallEventAudioRenderer vm={vm} />);
expect(audioIsPlaying).toEqual([]);
render(<TestComponent room={room} vm={vm} />);
// When the count drops
act(() => {
liveKitRoom.removeParticipant(aliceParticipant);
});
expect(audioIsPlaying).toEqual([leaveSound]);
expect(playSound).not.toBeCalled();
});

View File

@@ -6,7 +6,7 @@ Please see LICENSE in the repository root for full details.
*/
import { ReactNode, useDeferredValue, useEffect, useMemo } from "react";
import { debounce, filter, interval, throttle } from "rxjs";
import { debounce, filter, interval, tap, throttle } from "rxjs";
import { CallViewModel } from "../state/CallViewModel";
import joinCallSoundMp3 from "../sound/join_call.mp3";
import joinCallSoundOgg from "../sound/join_call.ogg";
@@ -16,12 +16,12 @@ import handSoundOgg from "../sound/raise_hand.ogg?url";
import handSoundMp3 from "../sound/raise_hand.mp3?url";
import { prefetchSounds, useAudioContext } from "../useAudioContext";
import { useReactions } from "../useReactions";
import { useLatest } from "../useLatest";
// Do not play any sounds if the participant count has exceeded this
// number.
export const MAX_PARTICIPANT_COUNT_FOR_SOUND = 8;
export const THROTTLE_SOUND_EFFECT_MS = 500;
export const DEBOUNCE_SOUND_EFFECT_MS = 150;
const Sounds = prefetchSounds({
join: {
@@ -47,6 +47,7 @@ export function CallEventAudioRenderer({
sounds: Sounds,
latencyHint: "interactive",
});
const audioEngineRef = useLatest(audioEngineCtx);
const { raisedHands } = useReactions();
const raisedHandCount = useMemo(
@@ -56,16 +57,12 @@ export function CallEventAudioRenderer({
const previousRaisedHandCount = useDeferredValue(raisedHandCount);
useEffect(() => {
if (audioEngineCtx && previousRaisedHandCount < raisedHandCount) {
audioEngineCtx.playSound("raiseHand");
if (audioEngineRef.current && previousRaisedHandCount < raisedHandCount) {
audioEngineRef.current.playSound("raiseHand");
}
}, [audioEngineCtx, previousRaisedHandCount, raisedHandCount]);
}, [audioEngineRef, previousRaisedHandCount, raisedHandCount]);
useEffect(() => {
if (!audioEngineCtx) {
return;
}
const joinSub = vm.memberChanges
.pipe(
filter(
@@ -73,11 +70,9 @@ export function CallEventAudioRenderer({
ids.length <= MAX_PARTICIPANT_COUNT_FOR_SOUND && joined.length > 0,
),
throttle(() => interval(THROTTLE_SOUND_EFFECT_MS)),
debounce(() => interval(DEBOUNCE_SOUND_EFFECT_MS)),
)
.subscribe((prev) => {
console.log("Playing join sound for", ...prev.joined, "|", prev);
audioEngineCtx.playSound("join");
.subscribe(() => {
audioEngineRef.current?.playSound("join");
});
const leftSub = vm.memberChanges
@@ -87,17 +82,16 @@ export function CallEventAudioRenderer({
ids.length <= MAX_PARTICIPANT_COUNT_FOR_SOUND && left.length > 0,
),
throttle(() => interval(THROTTLE_SOUND_EFFECT_MS)),
debounce(() => interval(DEBOUNCE_SOUND_EFFECT_MS)),
)
.subscribe(() => {
audioEngineCtx.playSound("left");
audioEngineRef.current?.playSound("left");
});
return (): void => {
joinSub.unsubscribe();
leftSub.unsubscribe();
};
}, [audioEngineCtx, vm]);
}, [audioEngineRef, vm]);
return <></>;
}

View File

@@ -6,7 +6,7 @@ Please see LICENSE in the repository root for full details.
*/
import { render } from "@testing-library/react";
import { afterAll, expect, test } from "vitest";
import { afterAll, expect, test, vitest } from "vitest";
import { TooltipProvider } from "@vector-im/compound-web";
import { act, ReactNode } from "react";
@@ -16,12 +16,18 @@ import {
TestReactionsWrapper,
} from "../utils/testReactions";
import { ReactionsAudioRenderer } from "./ReactionAudioRenderer";
import { GenericReaction, ReactionSet } from "../reactions";
import {
playReactionsSound,
soundEffectVolumeSetting,
} from "../settings/settings";
import { mockMediaPlay } from "../utils/test";
import {
prefetchSounds,
// We're using this from our mock, but it doesn't exist in the actual module.
//@ts-ignore
playSound,
} from "../useAudioContext";
import { GenericReaction, ReactionSet } from "../reactions";
import { afterEach } from "node:test";
const memberUserIdAlice = "@alice:example.org";
const memberUserIdBob = "@bob:example.org";
@@ -50,11 +56,27 @@ function TestComponent({
);
}
const originalPlayFn = window.HTMLMediaElement.prototype.play;
afterAll(() => {
vitest.mock("../useAudioContext", async () => {
const playSound = vitest.fn();
return {
prefetchSounds: vitest.fn().mockReturnValueOnce({
sound: new ArrayBuffer(0),
}),
playSound,
useAudioContext: () => ({
playSound,
}),
};
});
afterEach(() => {
vitest.clearAllMocks();
playReactionsSound.setValue(playReactionsSound.defaultValue);
soundEffectVolumeSetting.setValue(soundEffectVolumeSetting.defaultValue);
window.HTMLMediaElement.prototype.play = originalPlayFn;
});
afterAll(() => {
vitest.restoreAllMocks();
});
test("preloads all audio elements", () => {
@@ -63,25 +85,11 @@ test("preloads all audio elements", () => {
new MockRoom(memberUserIdAlice),
membership,
);
const { container } = render(<TestComponent rtcSession={rtcSession} />);
expect(container.getElementsByTagName("audio")).toHaveLength(
// All reactions plus the generic sound
ReactionSet.filter((r) => r.sound).length + 1,
);
});
test("loads no audio elements when disabled in settings", () => {
playReactionsSound.setValue(false);
const rtcSession = new MockRTCSession(
new MockRoom(memberUserIdAlice),
membership,
);
const { container } = render(<TestComponent rtcSession={rtcSession} />);
expect(container.getElementsByTagName("audio")).toHaveLength(0);
render(<TestComponent rtcSession={rtcSession} />);
expect(prefetchSounds).toHaveBeenCalledOnce();
});
test("will play an audio sound when there is a reaction", () => {
const audioIsPlaying: string[] = mockMediaPlay();
playReactionsSound.setValue(true);
const room = new MockRoom(memberUserIdAlice);
const rtcSession = new MockRTCSession(room, membership);
@@ -97,12 +105,10 @@ test("will play an audio sound when there is a reaction", () => {
act(() => {
room.testSendReaction(memberEventAlice, chosenReaction, membership);
});
expect(audioIsPlaying).toHaveLength(1);
expect(audioIsPlaying[0]).toContain(chosenReaction.sound?.ogg);
expect(playSound).toHaveBeenCalledWith(chosenReaction.name);
});
test("will play the generic audio sound when there is soundless reaction", () => {
const audioIsPlaying: string[] = mockMediaPlay();
playReactionsSound.setValue(true);
const room = new MockRoom(memberUserIdAlice);
const rtcSession = new MockRTCSession(room, membership);
@@ -118,34 +124,10 @@ test("will play the generic audio sound when there is soundless reaction", () =>
act(() => {
room.testSendReaction(memberEventAlice, chosenReaction, membership);
});
expect(audioIsPlaying).toHaveLength(1);
expect(audioIsPlaying[0]).toContain(GenericReaction.sound?.ogg);
});
test("will play an audio sound with the correct volume", () => {
playReactionsSound.setValue(true);
soundEffectVolumeSetting.setValue(0.5);
const room = new MockRoom(memberUserIdAlice);
const rtcSession = new MockRTCSession(room, membership);
const { getByTestId } = render(<TestComponent rtcSession={rtcSession} />);
// Find the first reaction with a sound effect
const chosenReaction = ReactionSet.find((r) => !!r.sound);
if (!chosenReaction) {
throw Error(
"No reactions have sounds configured, this test cannot succeed",
);
}
act(() => {
room.testSendReaction(memberEventAlice, chosenReaction, membership);
});
expect((getByTestId(chosenReaction.name) as HTMLAudioElement).volume).toEqual(
0.5,
);
expect(playSound).toHaveBeenCalledWith(GenericReaction.name);
});
test("will play multiple audio sounds when there are multiple different reactions", () => {
const audioIsPlaying: string[] = mockMediaPlay();
playReactionsSound.setValue(true);
const room = new MockRoom(memberUserIdAlice);
@@ -164,7 +146,6 @@ test("will play multiple audio sounds when there are multiple different reaction
room.testSendReaction(memberEventBob, reaction2, membership);
room.testSendReaction(memberEventCharlie, reaction1, membership);
});
expect(audioIsPlaying).toHaveLength(2);
expect(audioIsPlaying[0]).toContain(reaction1.sound?.ogg);
expect(audioIsPlaying[1]).toContain(reaction2.sound?.ogg);
expect(playSound).toHaveBeenCalledWith(reaction1.name);
expect(playSound).toHaveBeenCalledWith(reaction2.name);
});

View File

@@ -11,6 +11,7 @@ import { useReactions } from "../useReactions";
import { playReactionsSound, useSetting } from "../settings/settings";
import { ReactionSet } from "../reactions";
import { prefetchSounds, useAudioContext } from "../useAudioContext";
import { useLatest } from "../useLatest";
const SoundMap = Object.fromEntries(
ReactionSet.filter((v) => v.sound !== undefined).map((v) => [
@@ -28,14 +29,11 @@ export function ReactionsAudioRenderer(): ReactNode {
sounds: Sounds,
latencyHint: "interactive",
});
const audioEngineRef = useLatest(audioEngineCtx);
const oldReactions = useDeferredValue(reactions);
useEffect(() => {
if (!audioEngineCtx) {
return;
}
if (!shouldPlay) {
if (!shouldPlay || !audioEngineRef.current) {
return;
}
const oldReactionSet = new Set(
@@ -48,13 +46,14 @@ export function ReactionsAudioRenderer(): ReactNode {
// Don't replay old reactions
return;
}
console.log("playing sound", reactionName);
if (SoundMap[reactionName]) {
audioEngineCtx.playSound(reactionName);
audioEngineRef.current.playSound(reactionName);
} else {
// Fallback sounds.
audioEngineCtx.playSound("generic");
audioEngineRef.current.playSound("generic");
}
}
}, [shouldPlay, oldReactions, reactions]);
}, [audioEngineRef, shouldPlay, oldReactions, reactions]);
return <></>;
}

View File

@@ -0,0 +1,126 @@
import { expect, test, vitest } from "vitest";
import { useAudioContext } from "./useAudioContext";
import { FC } from "react";
import { render } from "@testing-library/react";
import { deviceStub, MediaDevicesContext } from "./livekit/MediaDevicesContext";
import { afterEach } from "node:test";
import { soundEffectVolumeSetting } from "./settings/settings";
/**
* Test explanation.
* This test suite checks that the useReactions hook appropriately reacts
* to new reactions, redactions and membership changesin the room. There is
* a large amount of test structure used to construct a mock environment.
*/
const TestComponent: FC = () => {
const audioCtx = useAudioContext({
sounds: Promise.resolve({
aSound: new ArrayBuffer(32),
}),
latencyHint: "balanced",
});
if (!audioCtx) {
return null;
}
return (
<>
<button role="button" onClick={() => audioCtx.playSound("aSound")}>
Valid sound
</button>
<button
role="button"
onClick={() => audioCtx.playSound("not-valid" as any)}
>
Invalid sound
</button>
</>
);
};
class MockAudioContext {
static testContext: MockAudioContext;
constructor() {
MockAudioContext.testContext = this;
}
public gain = vitest.mocked(
{
connect: () => {},
gain: {
setValueAtTime: vitest.fn(),
},
},
true,
);
public setSinkId = vitest.fn().mockResolvedValue(undefined);
public decodeAudioData = vitest.fn().mockReturnValue(1);
public createBufferSource = vitest.fn().mockReturnValue(
vitest.mocked({
connect: (v: unknown) => v,
start: () => {},
}),
);
public createGain = vitest.fn().mockReturnValue(this.gain);
public close = vitest.fn().mockResolvedValue(undefined);
}
afterEach(() => {
vitest.unstubAllGlobals();
});
test("can play a single sound", async () => {
vitest.stubGlobal("AudioContext", MockAudioContext);
const { findByText } = render(<TestComponent />);
(await findByText("Valid sound")).click();
expect(
MockAudioContext.testContext.createBufferSource,
).toHaveBeenCalledOnce();
});
test("will ignore sounds that are not registered", async () => {
vitest.stubGlobal("AudioContext", MockAudioContext);
const { findByText } = render(<TestComponent />);
(await findByText("Invalid sound")).click();
expect(
MockAudioContext.testContext.createBufferSource,
).not.toHaveBeenCalled();
});
test("will use the correct device", async () => {
vitest.stubGlobal("AudioContext", MockAudioContext);
render(
<MediaDevicesContext.Provider
value={{
audioInput: deviceStub,
audioOutput: {
selectedId: "chosen-device",
available: [],
select: () => {},
},
videoInput: deviceStub,
startUsingDeviceNames: () => {},
stopUsingDeviceNames: () => {},
}}
>
<TestComponent />
</MediaDevicesContext.Provider>,
);
expect(
MockAudioContext.testContext.createBufferSource,
).not.toHaveBeenCalled();
expect(MockAudioContext.testContext.setSinkId).toHaveBeenCalledWith(
"chosen-device",
);
});
test("will use the correct volume", async () => {
vitest.stubGlobal("AudioContext", MockAudioContext);
soundEffectVolumeSetting.setValue(0.33);
const { findByText } = render(<TestComponent />);
(await findByText("Valid sound")).click();
expect(
MockAudioContext.testContext.gain.gain.setValueAtTime,
).toHaveBeenCalledWith(0.33, 0);
});

View File

@@ -5,6 +5,7 @@ import {
useSetting,
} from "./settings/settings";
import { useMediaDevices } from "./livekit/MediaDevicesContext";
import { useInitial } from "./useInitial";
type SoundDefinition = { mp3?: string; ogg: string };
@@ -43,7 +44,7 @@ function getPreferredAudioFormat() {
return "mp3";
}
type PrefetchedSounds<S extends string> = Promise<Record<string, ArrayBuffer>>;
type PrefetchedSounds<S extends string> = Promise<Record<S, ArrayBuffer>>;
// We prefer to load these sounds ahead of time, so there
// is no delay on call join.
@@ -82,6 +83,12 @@ interface UseAudioContext<S> {
playSound(soundName: S): void;
}
/**
* Add an audio context which can be used to play
* a set of preloaded sounds.
* @param props
* @returns Either an instance that can be used to play sounds, or null if not ready.
*/
export function useAudioContext<S extends string>(
props: Props<S>,
): UseAudioContext<S> | null {
@@ -89,6 +96,7 @@ export function useAudioContext<S extends string>(
const devices = useMediaDevices();
const [audioContext, setAudioContext] = useState<AudioContext>();
const [audioBuffers, setAudioBuffers] = useState<Record<S, AudioBuffer>>();
const soundCache = useInitial(() => props.sounds);
useEffect(() => {
const ctx = new AudioContext({
@@ -101,13 +109,15 @@ export function useAudioContext<S extends string>(
(async () => {
const buffers: Record<string, AudioBuffer> = {};
controller.signal.throwIfAborted();
for (const [name, buffer] of Object.entries(await props.sounds)) {
for (const [name, buffer] of Object.entries(await soundCache)) {
controller.signal.throwIfAborted();
const audioBuffer = await ctx.decodeAudioData(buffer.slice(0));
// Type quirk, this is *definitely* a ArrayBuffer.
const audioBuffer = await ctx.decodeAudioData(
(buffer as ArrayBuffer).slice(0),
);
buffers[name] = audioBuffer;
// Store as we go.
setAudioBuffers(buffers as Record<S, AudioBuffer>);
}
setAudioBuffers(buffers as Record<S, AudioBuffer>);
})().catch((ex) => {
logger.debug("Failed to setup audio context", ex);
});
@@ -120,7 +130,7 @@ export function useAudioContext<S extends string>(
});
setAudioContext(undefined);
};
}, []);
}, [soundCache, props.latencyHint]);
// Update the sink ID whenever we change devices.
useEffect(() => {

View File

@@ -235,12 +235,3 @@ export function mockConfig(config: Partial<ResolvedConfigOptions> = {}): void {
...config,
});
}
export function mockMediaPlay(): string[] {
const audioIsPlaying: string[] = [];
window.HTMLMediaElement.prototype.play = async function (): Promise<void> {
audioIsPlaying.push((this.children[0] as HTMLSourceElement).src);
return Promise.resolve();
};
return audioIsPlaying;
}

View File

@@ -203,4 +203,8 @@ export class MockRoom extends EventEmitter {
});
return evt.getId()!;
}
public getMember() {
return undefined;
}
}