mirror of
https://github.com/vector-im/element-call.git
synced 2026-03-31 07:00:26 +00:00
Port to useAudioContext
This commit is contained in:
@@ -5,147 +5,59 @@ SPDX-License-Identifier: AGPL-3.0-only
|
||||
Please see LICENSE in the repository root for full details.
|
||||
*/
|
||||
|
||||
import { ReactNode, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { filter, interval, skip, throttle } from "rxjs";
|
||||
import { logger } from "matrix-js-sdk/src/logger";
|
||||
|
||||
import {
|
||||
soundEffectVolumeSetting as effectSoundVolumeSetting,
|
||||
useSetting,
|
||||
} from "../settings/settings";
|
||||
import { ReactNode, useEffect } from "react";
|
||||
import { debounce, filter, interval, skip, throttle } from "rxjs";
|
||||
import { CallViewModel } from "../state/CallViewModel";
|
||||
import joinCallSoundMp3 from "../sound/join_call.mp3";
|
||||
import joinCallSoundOgg from "../sound/join_call.ogg";
|
||||
import leftCallSoundMp3 from "../sound/left_call.mp3";
|
||||
import leftCallSoundOgg from "../sound/left_call.ogg";
|
||||
import { useMediaDevices } from "../livekit/MediaDevicesContext";
|
||||
import { useLatest } from "../useLatest";
|
||||
import { prefetchSounds, useAudioContext } from "../useAudioContext";
|
||||
|
||||
// Do not play any sounds if the participant count has exceeded this
|
||||
// number.
|
||||
export const MAX_PARTICIPANT_COUNT_FOR_SOUND = 8;
|
||||
export const DEBOUNCE_SOUND_EFFECT_MS = 500;
|
||||
export const THROTTLE_SOUND_EFFECT_MS = 500;
|
||||
export const DEBOUNCE_SOUND_EFFECT_MS = 150;
|
||||
|
||||
async function loadAudioBuffer(filename: string) {
|
||||
// Load an audio file
|
||||
const response = await fetch(filename);
|
||||
if (!response.ok) {
|
||||
throw Error("Could not load sound, resposne was not okay");
|
||||
}
|
||||
// Decode it
|
||||
return await await response.arrayBuffer();
|
||||
}
|
||||
|
||||
function playSound(
|
||||
volume: number,
|
||||
ctx?: AudioContext,
|
||||
buffer?: AudioBuffer,
|
||||
): void {
|
||||
if (!ctx || !buffer) {
|
||||
return;
|
||||
}
|
||||
const gain = ctx.createGain();
|
||||
gain.gain.setValueAtTime(volume, 0);
|
||||
const src = ctx.createBufferSource();
|
||||
src.buffer = buffer;
|
||||
src.connect(gain).connect(ctx.destination);
|
||||
src.start();
|
||||
}
|
||||
|
||||
function getPreferredAudioFormat() {
|
||||
const a = document.createElement("audio");
|
||||
if (a.canPlayType("audio/ogg") === "maybe") {
|
||||
return "ogg";
|
||||
}
|
||||
// Otherwise just assume MP3, as that has a chance of being more widely supported.
|
||||
return "mp3";
|
||||
}
|
||||
|
||||
// We prefer to load these sounds ahead of time, so there
|
||||
// is no delay on call join.
|
||||
const preferredFormat = getPreferredAudioFormat();
|
||||
const JoinSoundBufferPromise = loadAudioBuffer(
|
||||
preferredFormat === "ogg" ? joinCallSoundOgg : joinCallSoundMp3,
|
||||
);
|
||||
const LeftSoundBufferPromise = loadAudioBuffer(
|
||||
preferredFormat === "ogg" ? leftCallSoundOgg : leftCallSoundMp3,
|
||||
);
|
||||
const Sounds = prefetchSounds({
|
||||
join: {
|
||||
mp3: joinCallSoundMp3,
|
||||
ogg: joinCallSoundOgg,
|
||||
},
|
||||
left: {
|
||||
mp3: leftCallSoundMp3,
|
||||
ogg: leftCallSoundOgg,
|
||||
},
|
||||
});
|
||||
|
||||
export function CallEventAudioRenderer({
|
||||
vm,
|
||||
}: {
|
||||
vm: CallViewModel;
|
||||
}): ReactNode {
|
||||
const [effectSoundVolume] = useSetting(effectSoundVolumeSetting);
|
||||
const devices = useMediaDevices();
|
||||
const audioSourceElement = useRef<HTMLAudioElement>(null);
|
||||
const [audioContext, setAudioContext] = useState<AudioContext>();
|
||||
const [joinCallBuffer, setJoinSoundNode] = useState<AudioBuffer>();
|
||||
const [leaveCallBuffer, setLeaveSoundNode] = useState<AudioBuffer>();
|
||||
const audioEngineCtx = useAudioContext({
|
||||
sounds: Sounds,
|
||||
latencyHint: "interactive",
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
const ctx = new AudioContext({
|
||||
// We want low latency for these effects.
|
||||
latencyHint: "interactive",
|
||||
// XXX: Types don't include this yet.
|
||||
...{ sinkId: devices.audioOutput.selectedId },
|
||||
});
|
||||
const controller = new AbortController();
|
||||
(async () => {
|
||||
controller.signal.throwIfAborted();
|
||||
const enterCall = await ctx.decodeAudioData(
|
||||
(await JoinSoundBufferPromise).slice(0),
|
||||
);
|
||||
controller.signal.throwIfAborted();
|
||||
const leaveCall = await ctx.decodeAudioData(
|
||||
(await LeftSoundBufferPromise).slice(0),
|
||||
);
|
||||
controller.signal.throwIfAborted();
|
||||
setJoinSoundNode(enterCall);
|
||||
setLeaveSoundNode(leaveCall);
|
||||
})().catch((ex) => {
|
||||
logger.debug("Failed to setup audio context", ex);
|
||||
});
|
||||
|
||||
setAudioContext(ctx);
|
||||
return () => {
|
||||
controller.abort("Closing");
|
||||
void ctx.close().catch((ex) => {
|
||||
logger.debug("Failed to close audio engine", ex);
|
||||
});
|
||||
setAudioContext(undefined);
|
||||
};
|
||||
}, []);
|
||||
|
||||
// Update the sink ID whenever we change devices.
|
||||
useEffect(() => {
|
||||
if (audioContext && "setSinkId" in audioContext) {
|
||||
// setSinkId doesn't exist in types but does exist for some browsers.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/setSinkId
|
||||
// @ts-ignore
|
||||
audioContext.setSinkId(devices.audioOutput.selectedId).catch((ex) => {
|
||||
logger.warn("Unable to change sink for audio context", ex);
|
||||
});
|
||||
if (!audioEngineCtx) {
|
||||
return;
|
||||
}
|
||||
}, [audioContext, devices]);
|
||||
|
||||
// Prevent a rerender when t he
|
||||
const soundVolume = useLatest(effectSoundVolume);
|
||||
|
||||
useEffect(() => {
|
||||
const joinSub = vm.memberChanges
|
||||
.pipe(
|
||||
filter(
|
||||
({ joined, ids }) =>
|
||||
// Only play when more than one person is in the room.
|
||||
ids.length > 1 &&
|
||||
ids.length <= MAX_PARTICIPANT_COUNT_FOR_SOUND &&
|
||||
joined.length > 0,
|
||||
ids.length <= MAX_PARTICIPANT_COUNT_FOR_SOUND && joined.length > 0,
|
||||
),
|
||||
throttle((_) => interval(DEBOUNCE_SOUND_EFFECT_MS)),
|
||||
throttle((_) => interval(THROTTLE_SOUND_EFFECT_MS)),
|
||||
debounce((_) => interval(DEBOUNCE_SOUND_EFFECT_MS)),
|
||||
)
|
||||
.subscribe(() => {
|
||||
playSound(soundVolume.current, audioContext, joinCallBuffer);
|
||||
.subscribe((prev) => {
|
||||
console.log("Playing join sound for", ...prev.joined, "|", prev);
|
||||
audioEngineCtx.playSound("join");
|
||||
});
|
||||
|
||||
const leftSub = vm.memberChanges
|
||||
@@ -154,17 +66,18 @@ export function CallEventAudioRenderer({
|
||||
({ ids, left }) =>
|
||||
ids.length <= MAX_PARTICIPANT_COUNT_FOR_SOUND && left.length > 0,
|
||||
),
|
||||
throttle((_) => interval(DEBOUNCE_SOUND_EFFECT_MS)),
|
||||
throttle((_) => interval(THROTTLE_SOUND_EFFECT_MS)),
|
||||
debounce((_) => interval(DEBOUNCE_SOUND_EFFECT_MS)),
|
||||
)
|
||||
.subscribe(() => {
|
||||
playSound(soundVolume.current, audioContext, leaveCallBuffer);
|
||||
audioEngineCtx.playSound("left");
|
||||
});
|
||||
|
||||
return (): void => {
|
||||
joinSub.unsubscribe();
|
||||
leftSub.unsubscribe();
|
||||
};
|
||||
}, [joinCallBuffer, leaveCallBuffer, soundVolume, vm]);
|
||||
}, [audioEngineCtx, vm]);
|
||||
|
||||
return <audio ref={audioSourceElement} hidden />;
|
||||
return <></>;
|
||||
}
|
||||
|
||||
143
src/useAudioContext.tsx
Normal file
143
src/useAudioContext.tsx
Normal file
@@ -0,0 +1,143 @@
|
||||
import { logger } from "matrix-js-sdk/src/logger";
|
||||
import { useState, useEffect } from "react";
|
||||
import {
|
||||
soundEffectVolumeSetting as effectSoundVolumeSetting,
|
||||
useSetting,
|
||||
} from "./settings/settings";
|
||||
import { useMediaDevices } from "./livekit/MediaDevicesContext";
|
||||
|
||||
type SoundDefinition = { mp3: string; ogg: string };
|
||||
|
||||
async function fetchBuffer(filename: string) {
|
||||
// Load an audio file
|
||||
const response = await fetch(filename);
|
||||
if (!response.ok) {
|
||||
throw Error("Could not load sound, resposne was not okay");
|
||||
}
|
||||
// Decode it
|
||||
return await await response.arrayBuffer();
|
||||
}
|
||||
|
||||
function playSound(
|
||||
volume: number,
|
||||
ctx?: AudioContext,
|
||||
buffer?: AudioBuffer,
|
||||
): void {
|
||||
if (!ctx || !buffer) {
|
||||
return;
|
||||
}
|
||||
const gain = ctx.createGain();
|
||||
gain.gain.setValueAtTime(volume, 0);
|
||||
const src = ctx.createBufferSource();
|
||||
src.buffer = buffer;
|
||||
src.connect(gain).connect(ctx.destination);
|
||||
src.start();
|
||||
}
|
||||
|
||||
function getPreferredAudioFormat() {
|
||||
const a = document.createElement("audio");
|
||||
if (a.canPlayType("audio/ogg") === "maybe") {
|
||||
return "ogg";
|
||||
}
|
||||
// Otherwise just assume MP3, as that has a chance of being more widely supported.
|
||||
return "mp3";
|
||||
}
|
||||
|
||||
type PrefetchedSounds<S extends string> = Promise<Record<string, ArrayBuffer>>;
|
||||
|
||||
// We prefer to load these sounds ahead of time, so there
|
||||
// is no delay on call join.
|
||||
const PreferredFormat = getPreferredAudioFormat();
|
||||
|
||||
/**
|
||||
* Prefetch sounds to be used by the AudioContext. This should
|
||||
* be called outside the scope of a component to ensure the
|
||||
* sounds load ahead of time.
|
||||
* @param sounds A set of sound files that may be played.
|
||||
* @returns A map of sound files to buffers.
|
||||
*/
|
||||
export async function prefetchSounds<S extends string>(
|
||||
sounds: Record<S, SoundDefinition>,
|
||||
): PrefetchedSounds<S> {
|
||||
logger.debug(`Loading sounds`);
|
||||
const buffers: Record<string, ArrayBuffer> = {};
|
||||
await Promise.all(
|
||||
Object.entries(sounds).map(async ([name, file]) => {
|
||||
const { mp3, ogg } = file as SoundDefinition;
|
||||
buffers[name] = await fetchBuffer(PreferredFormat === "ogg" ? ogg : mp3);
|
||||
}),
|
||||
);
|
||||
return buffers as Record<S, ArrayBuffer>;
|
||||
}
|
||||
|
||||
interface Props<S extends string> {
|
||||
sounds: PrefetchedSounds<S>;
|
||||
latencyHint: AudioContextLatencyCategory;
|
||||
}
|
||||
|
||||
interface UseAudioContext<S> {
|
||||
playSound(soundName: S): void;
|
||||
}
|
||||
|
||||
export function useAudioContext<S extends string>(
|
||||
props: Props<S>,
|
||||
): UseAudioContext<S> | null {
|
||||
const [effectSoundVolume] = useSetting(effectSoundVolumeSetting);
|
||||
const devices = useMediaDevices();
|
||||
const [audioContext, setAudioContext] = useState<AudioContext>();
|
||||
const [audioBuffers, setAudioBuffers] = useState<Record<S, AudioBuffer>>();
|
||||
|
||||
useEffect(() => {
|
||||
const ctx = new AudioContext({
|
||||
// We want low latency for these effects.
|
||||
latencyHint: props.latencyHint,
|
||||
// XXX: Types don't include this yet.
|
||||
...{ sinkId: devices.audioOutput.selectedId },
|
||||
});
|
||||
const controller = new AbortController();
|
||||
(async () => {
|
||||
const buffers: Record<string, AudioBuffer> = {};
|
||||
controller.signal.throwIfAborted();
|
||||
for (const [name, buffer] of Object.entries(await props.sounds)) {
|
||||
controller.signal.throwIfAborted();
|
||||
const audioBuffer = await ctx.decodeAudioData(buffer.slice(0));
|
||||
buffers[name] = audioBuffer;
|
||||
// Store as we go.
|
||||
setAudioBuffers(buffers as Record<S, AudioBuffer>);
|
||||
}
|
||||
})().catch((ex) => {
|
||||
logger.debug("Failed to setup audio context", ex);
|
||||
});
|
||||
|
||||
setAudioContext(ctx);
|
||||
return () => {
|
||||
controller.abort("Closing");
|
||||
void ctx.close().catch((ex) => {
|
||||
logger.debug("Failed to close audio engine", ex);
|
||||
});
|
||||
setAudioContext(undefined);
|
||||
};
|
||||
}, []);
|
||||
|
||||
// Update the sink ID whenever we change devices.
|
||||
useEffect(() => {
|
||||
if (audioContext && "setSinkId" in audioContext) {
|
||||
// setSinkId doesn't exist in types but does exist for some browsers.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/setSinkId
|
||||
// @ts-ignore
|
||||
audioContext.setSinkId(devices.audioOutput.selectedId).catch((ex) => {
|
||||
logger.warn("Unable to change sink for audio context", ex);
|
||||
});
|
||||
}
|
||||
}, [audioContext, devices]);
|
||||
|
||||
if (!audioContext || !audioBuffers) {
|
||||
logger.debug("Audio not ready yet");
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
playSound: (name) => {
|
||||
playSound(effectSoundVolume, audioContext, audioBuffers[name]);
|
||||
},
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user