diff --git a/src/livekit/MatrixAudioRenderer.test.tsx b/src/livekit/MatrixAudioRenderer.test.tsx
index 637e02ed..502c8c65 100644
--- a/src/livekit/MatrixAudioRenderer.test.tsx
+++ b/src/livekit/MatrixAudioRenderer.test.tsx
@@ -16,15 +16,18 @@ import { type RemoteAudioTrack } from "livekit-client";
import { type ReactNode } from "react";
import { useTracks } from "@livekit/components-react";
-import { testAudioContext } from "../useAudioContext.test";
+import {
+ TestAudioConstructor,
+ testAudioContext,
+ TestAudioContextConstructor,
+} from "../useAudioContext.test";
import * as MediaDevicesContext from "./MediaDevicesContext";
import { MatrixAudioRenderer } from "./MatrixAudioRenderer";
import { mockTrack } from "../utils/test";
-export const TestAudioContextConstructor = vi.fn(() => testAudioContext);
-
beforeEach(() => {
vi.stubGlobal("AudioContext", TestAudioContextConstructor);
+ vi.stubGlobal("Audio", TestAudioConstructor);
});
afterEach(() => {
diff --git a/src/useAudioContext.test.tsx b/src/useAudioContext.test.tsx
index 814df8e9..3fdea8a5 100644
--- a/src/useAudioContext.test.tsx
+++ b/src/useAudioContext.test.tsx
@@ -87,12 +87,19 @@ export const testAudioContext = {
createGain: vi.fn().mockReturnValue(gainNode),
createStereoPanner: vi.fn().mockReturnValue(panNode),
close: vi.fn().mockResolvedValue(undefined),
+ createMediaStreamDestination: vi.fn().mockReturnValue({ stream: undefined }),
};
export const TestAudioContextConstructor = vi.fn(() => testAudioContext);
+export const testAudioElement = {
+ setSinkId: vi.fn().mockResolvedValue(null),
+};
+export const TestAudioConstructor = vi.fn(() => testAudioElement);
let user: UserEvent;
beforeEach(() => {
vi.stubGlobal("AudioContext", TestAudioContextConstructor);
+ vi.stubGlobal("Audio", TestAudioConstructor);
+
user = userEvent.setup();
});
@@ -114,6 +121,7 @@ test("will ignore sounds that are not registered", async () => {
});
test("will use the correct device", () => {
+ testAudioElement.setSinkId.mockClear();
render(
{
,
);
expect(testAudioContext.createBufferSource).not.toHaveBeenCalled();
- expect(testAudioContext.setSinkId).toHaveBeenCalledWith("chosen-device");
+ expect(testAudioElement.setSinkId).toHaveBeenCalledWith("chosen-device");
});
test("will use the correct volume level", async () => {
diff --git a/src/useAudioContext.tsx b/src/useAudioContext.tsx
index 23df0dbe..00cea2fc 100644
--- a/src/useAudioContext.tsx
+++ b/src/useAudioContext.tsx
@@ -18,6 +18,7 @@ import {
} from "./livekit/MediaDevicesContext";
import { type PrefetchedSounds } from "./soundUtils";
import { useUrlParams } from "./UrlParams";
+import { useInitial } from "./useInitial";
/**
* Play a sound though a given AudioContext. Will take
@@ -76,15 +77,18 @@ export function useAudioContext(
const [audioContext, setAudioContext] = useState();
const [audioBuffers, setAudioBuffers] = useState>();
+ const htmlAudioElement = useInitial((): HTMLAudioElement => new Audio());
+
useEffect(() => {
const sounds = props.sounds;
- if (!sounds) {
+ if (!sounds || !htmlAudioElement) {
return;
}
const ctx = new AudioContext({
// We want low latency for these effects.
latencyHint: props.latencyHint,
});
+ htmlAudioElement.srcObject = ctx.createMediaStreamDestination().stream;
// We want to clone the content of our preloaded
// sound buffers into this context. The context may
@@ -107,22 +111,16 @@ export function useAudioContext(
});
setAudioContext(undefined);
};
- }, [props.sounds, props.latencyHint]);
+ }, [props.sounds, props.latencyHint, htmlAudioElement]);
// Update the sink ID whenever we change devices.
useEffect(() => {
- if (
- audioContext &&
- "setSinkId" in audioContext &&
- !controlledAudioDevices
- ) {
- // https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/setSinkId
- // @ts-expect-error - setSinkId doesn't exist yet in types, maybe because it's not supported everywhere.
- audioContext.setSinkId(audioOutput.selectedId).catch((ex) => {
+ if (!controlledAudioDevices && audioOutput.selectedId) {
+ htmlAudioElement.setSinkId(audioOutput.selectedId).catch((ex) => {
logger.warn("Unable to change sink for audio context", ex);
});
}
- }, [audioContext, audioOutput.selectedId, controlledAudioDevices]);
+ }, [audioOutput.selectedId, controlledAudioDevices, htmlAudioElement]);
const { pan: earpiecePan, volume: earpieceVolume } = useEarpieceAudioConfig();
// Don't return a function until we're ready.