mirror of
https://github.com/vector-im/element-call.git
synced 2026-03-13 06:07:04 +00:00
Add custom audio renderer to only render joined participants & add ios earpice workaround
fix left right to match chromium + safari (firefox is swapped) earpice as setting Simpler code and documentation The doc explains, what this class actually does and why it is so complicated. Signed-off-by: Timo K <toger5@hotmail.de> use only one audioContext, remove (non working) standby fallback
This commit is contained in:
@@ -61,6 +61,7 @@
|
||||
"video": "Video"
|
||||
},
|
||||
"developer_mode": {
|
||||
"always_show_iphone_earpiece": "Show iPhone earpiece option on all platforms",
|
||||
"crypto_version": "Crypto version: {{version}}",
|
||||
"debug_tile_layout_label": "Debug tile layout",
|
||||
"device_id": "Device ID: {{id}}",
|
||||
@@ -174,6 +175,7 @@
|
||||
"camera_numbered": "Camera {{n}}",
|
||||
"default": "Default",
|
||||
"default_named": "Default <2>({{name}})</2>",
|
||||
"earpiece": "Earpiece",
|
||||
"microphone": "Microphone",
|
||||
"microphone_numbered": "Microphone {{n}}",
|
||||
"speaker": "Speaker",
|
||||
|
||||
201
src/livekit/MatrixAudioRenderer.tsx
Normal file
201
src/livekit/MatrixAudioRenderer.tsx
Normal file
@@ -0,0 +1,201 @@
|
||||
/*
|
||||
Copyright 2025 New Vector Ltd.
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
|
||||
Please see LICENSE in the repository root for full details.
|
||||
*/
|
||||
|
||||
import { getTrackReferenceId } from "@livekit/components-core";
|
||||
import { type RemoteAudioTrack, Track } from "livekit-client";
|
||||
import { useEffect, useMemo, useRef, type ReactNode } from "react";
|
||||
import {
|
||||
useTracks,
|
||||
AudioTrack,
|
||||
type AudioTrackProps,
|
||||
} from "@livekit/components-react";
|
||||
import { type CallMembership } from "matrix-js-sdk/lib/matrixrtc";
|
||||
import { logger } from "matrix-js-sdk/lib/logger";
|
||||
|
||||
import { useEarpieceAudioConfig } from "./MediaDevicesContext";
|
||||
import { useReactiveState } from "../useReactiveState";
|
||||
|
||||
export interface MatrixAudioRendererProps {
|
||||
/**
|
||||
* The list of participants to render audio for.
|
||||
* This list needs to be composed based on the matrixRTC members so that we do not play audio from users
|
||||
* that are not expected to be in the rtc session.
|
||||
*/
|
||||
members: CallMembership[];
|
||||
/**
|
||||
* If set to `true`, mutes all audio tracks rendered by the component.
|
||||
* @remarks
|
||||
* If set to `true`, the server will stop sending audio track data to the client.
|
||||
*/
|
||||
muted?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* The `MatrixAudioRenderer` component is a drop-in solution for adding audio to your LiveKit app.
|
||||
* It takes care of handling remote participants’ audio tracks and makes sure that microphones and screen share are audible.
|
||||
*
|
||||
* It also takes care of the earpiece audio configuration for iOS devices.
|
||||
* This is done by using the WebAudio API to create a stereo pan effect that mimics the earpiece audio.
|
||||
* @example
|
||||
* ```tsx
|
||||
* <LiveKitRoom>
|
||||
* <MatrixAudioRenderer />
|
||||
* </LiveKitRoom>
|
||||
* ```
|
||||
* @public
|
||||
*/
|
||||
export function MatrixAudioRenderer({
|
||||
members,
|
||||
muted,
|
||||
}: MatrixAudioRendererProps): ReactNode {
|
||||
const validIdentities = useMemo(
|
||||
() =>
|
||||
new Set(members?.map((member) => `${member.sender}:${member.deviceId}`)),
|
||||
[members],
|
||||
);
|
||||
|
||||
const loggedInvalidIdentities = useRef(new Set<string>());
|
||||
/**
|
||||
* Log an invalid livekit track identity.
|
||||
* A invalid identity is one that does not match any of the matrix rtc members.
|
||||
*
|
||||
* @param identity The identity of the track that is invalid
|
||||
* @param validIdentities The list of valid identities
|
||||
*/
|
||||
const logInvalid = (identity: string, validIdentities: Set<string>): void => {
|
||||
if (loggedInvalidIdentities.current.has(identity)) return;
|
||||
logger.warn(
|
||||
`Audio track ${identity} has no matching matrix call member`,
|
||||
`current members: ${Array.from(validIdentities.values())}`,
|
||||
`track will not get rendered`,
|
||||
);
|
||||
loggedInvalidIdentities.current.add(identity);
|
||||
};
|
||||
|
||||
const tracks = useTracks(
|
||||
[
|
||||
Track.Source.Microphone,
|
||||
Track.Source.ScreenShareAudio,
|
||||
Track.Source.Unknown,
|
||||
],
|
||||
{
|
||||
updateOnlyOn: [],
|
||||
onlySubscribed: true,
|
||||
},
|
||||
).filter((ref) => {
|
||||
const isValid = validIdentities?.has(ref.participant.identity);
|
||||
if (!isValid) logInvalid(ref.participant.identity, validIdentities);
|
||||
return (
|
||||
!ref.participant.isLocal &&
|
||||
ref.publication.kind === Track.Kind.Audio &&
|
||||
isValid
|
||||
);
|
||||
});
|
||||
|
||||
// This component is also (in addition to the "only play audio for connected members" logic above)
|
||||
// to mimic earpice audio on iphones.
|
||||
// The safari audio devices enumeration does not expose an earpice audio device.
|
||||
// We alternatively use the audioContext pan node to only use one of the stereo channels.
|
||||
|
||||
// This component does get additionally complicated because of a safari bug.
|
||||
// (see: https://bugs.webkit.org/show_bug.cgi?id=251532
|
||||
// and the related issues: https://bugs.webkit.org/show_bug.cgi?id=237878
|
||||
// and https://bugs.webkit.org/show_bug.cgi?id=231105)
|
||||
//
|
||||
// AudioContext gets stopped if the webview gets moved into the background.
|
||||
// Once the phone is in standby audio playback will stop.
|
||||
// So we can only use the pan trick only works is the phone is not in standby.
|
||||
// If earpice mode is not used we do not use audioContext to allow standby playback.
|
||||
// shouldUseAudioContext is set to false if stereoPan === 0 to allow standby bluetooth playback.
|
||||
|
||||
const { pan: stereoPan, volume: volumeFactor } = useEarpieceAudioConfig();
|
||||
const shouldUseAudioContext = stereoPan !== 0;
|
||||
|
||||
// initialize the potentially used audio context.
|
||||
const audioContext = useMemo(() => new AudioContext(), []);
|
||||
const audioNodes = useMemo(
|
||||
() => ({
|
||||
gain: audioContext.createGain(),
|
||||
pan: audioContext.createStereoPanner(),
|
||||
}),
|
||||
[audioContext],
|
||||
);
|
||||
|
||||
// Simple effects to update the gain and pan node based on the props
|
||||
useEffect(() => {
|
||||
audioNodes.pan.pan.value = stereoPan;
|
||||
}, [audioNodes.pan.pan, stereoPan]);
|
||||
useEffect(() => {
|
||||
// *4 to balance the transition from audio context to normal audio playback.
|
||||
// probably needed due to gain behaving differently than el.volume
|
||||
audioNodes.gain.gain.value = volumeFactor;
|
||||
}, [audioNodes.gain.gain, volumeFactor]);
|
||||
|
||||
return (
|
||||
// We add all audio elements into one <div> for the browser developer tool experience/tidyness.
|
||||
<div style={{ display: "none" }}>
|
||||
{tracks.map((trackRef) => (
|
||||
<AudioTrackWithAudioNodes
|
||||
key={getTrackReferenceId(trackRef)}
|
||||
trackRef={trackRef}
|
||||
muted={muted}
|
||||
audioContext={shouldUseAudioContext ? audioContext : undefined}
|
||||
audioNodes={audioNodes}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
interface StereoPanAudioTrackProps {
|
||||
muted?: boolean;
|
||||
audioContext?: AudioContext;
|
||||
audioNodes: {
|
||||
gain: GainNode;
|
||||
pan: StereoPannerNode;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* This wraps `livekit.AudioTrack` to allow adding audio nodes to a track.
|
||||
* It main purpose is to remount the AudioTrack component when switching from
|
||||
* audiooContext to normal audio playback.
|
||||
* As of now the AudioTrack component does not support adding audio nodes while being mounted.
|
||||
* @param param0
|
||||
* @returns
|
||||
*/
|
||||
function AudioTrackWithAudioNodes({
|
||||
trackRef,
|
||||
muted,
|
||||
audioContext,
|
||||
audioNodes,
|
||||
...props
|
||||
}: StereoPanAudioTrackProps &
|
||||
AudioTrackProps &
|
||||
React.RefAttributes<HTMLAudioElement>): ReactNode {
|
||||
// This is used to unmount/remount the AudioTrack component.
|
||||
// Mounting needs to happen after the audioContext is set.
|
||||
// (adding the audio context when already mounted did not work outside strict mode)
|
||||
const [trackReady, setTrackReady] = useReactiveState(
|
||||
() => false,
|
||||
[audioContext || audioNodes],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (!trackRef || trackReady) return;
|
||||
const track = trackRef.publication.track as RemoteAudioTrack;
|
||||
track.setAudioContext(audioContext);
|
||||
track.setWebAudioPlugins(
|
||||
audioContext ? [audioNodes.gain, audioNodes.pan] : [],
|
||||
);
|
||||
setTrackReady(true);
|
||||
}, [audioContext, audioNodes, setTrackReady, trackReady, trackRef]);
|
||||
|
||||
return (
|
||||
trackReady && <AudioTrack trackRef={trackRef} muted={muted} {...props} />
|
||||
);
|
||||
}
|
||||
@@ -27,11 +27,15 @@ import {
|
||||
audioOutput as audioOutputSetting,
|
||||
videoInput as videoInputSetting,
|
||||
type Setting,
|
||||
alwaysShowIphoneEarpieceSetting,
|
||||
} from "../settings/settings";
|
||||
|
||||
export const EARPIECE_CONFIG_ID = "earpiece-id";
|
||||
|
||||
export type DeviceLabel =
|
||||
| { type: "name"; name: string }
|
||||
| { type: "number"; number: number }
|
||||
| { type: "earpiece" }
|
||||
| { type: "default"; name: string | null };
|
||||
|
||||
export interface MediaDevice {
|
||||
@@ -40,6 +44,11 @@ export interface MediaDevice {
|
||||
*/
|
||||
available: Map<string, DeviceLabel>;
|
||||
selectedId: string | undefined;
|
||||
/**
|
||||
* An additional device configuration that makes us use only one channel of the
|
||||
* output device and a reduced volume.
|
||||
*/
|
||||
useAsEarpiece: boolean | undefined;
|
||||
/**
|
||||
* The group ID of the selected device.
|
||||
*/
|
||||
@@ -65,6 +74,7 @@ function useMediaDevice(
|
||||
): MediaDevice {
|
||||
// Make sure we don't needlessly reset to a device observer without names,
|
||||
// once permissions are already given
|
||||
const [alwaysShowIphoneEarpice] = useSetting(alwaysShowIphoneEarpieceSetting);
|
||||
const hasRequestedPermissions = useRef(false);
|
||||
const requestPermissions = usingNames || hasRequestedPermissions.current;
|
||||
hasRequestedPermissions.current ||= usingNames;
|
||||
@@ -102,27 +112,39 @@ function useMediaDevice(
|
||||
// Create a virtual default audio output for browsers that don't have one.
|
||||
// Its device ID must be the empty string because that's what setSinkId
|
||||
// recognizes.
|
||||
// We also create this if we do not have any available devices, so that
|
||||
// we can use the default or the earpiece.
|
||||
const showEarpiece =
|
||||
navigator.userAgent.match("iPhone") || alwaysShowIphoneEarpice;
|
||||
if (
|
||||
kind === "audiooutput" &&
|
||||
available.size &&
|
||||
!available.has("") &&
|
||||
!available.has("default")
|
||||
!available.has("default") &&
|
||||
(available.size || showEarpiece)
|
||||
)
|
||||
available = new Map([
|
||||
["", { type: "default", name: availableRaw[0]?.label || null }],
|
||||
...available,
|
||||
]);
|
||||
if (kind === "audiooutput" && showEarpiece)
|
||||
// On IPhones we have to create a virtual earpiece device, because
|
||||
// the earpiece is not available as a device ID.
|
||||
available = new Map([
|
||||
...available,
|
||||
[EARPIECE_CONFIG_ID, { type: "earpiece" }],
|
||||
]);
|
||||
// Note: creating virtual default input devices would be another problem
|
||||
// entirely, because requesting a media stream from deviceId "" won't
|
||||
// automatically track the default device.
|
||||
return available;
|
||||
}),
|
||||
),
|
||||
[kind, deviceObserver$],
|
||||
[alwaysShowIphoneEarpice, deviceObserver$, kind],
|
||||
),
|
||||
);
|
||||
|
||||
const [preferredId, select] = useSetting(setting);
|
||||
const [preferredId, setPreferredId] = useSetting(setting);
|
||||
const [asEarpice, setAsEarpiece] = useState(false);
|
||||
const selectedId = useMemo(() => {
|
||||
if (available.size) {
|
||||
// If the preferred device is available, use it. Or if every available
|
||||
@@ -138,6 +160,7 @@ function useMediaDevice(
|
||||
}
|
||||
return undefined;
|
||||
}, [available, preferredId]);
|
||||
|
||||
const selectedGroupId = useObservableEagerState(
|
||||
useMemo(
|
||||
() =>
|
||||
@@ -151,14 +174,27 @@ function useMediaDevice(
|
||||
),
|
||||
);
|
||||
|
||||
const select = useCallback(
|
||||
(id: string) => {
|
||||
if (id === EARPIECE_CONFIG_ID) {
|
||||
setAsEarpiece(true);
|
||||
} else {
|
||||
setAsEarpiece(false);
|
||||
setPreferredId(id);
|
||||
}
|
||||
},
|
||||
[setPreferredId],
|
||||
);
|
||||
|
||||
return useMemo(
|
||||
() => ({
|
||||
available,
|
||||
selectedId,
|
||||
useAsEarpiece: asEarpice,
|
||||
selectedGroupId,
|
||||
select,
|
||||
}),
|
||||
[available, selectedId, selectedGroupId, select],
|
||||
[available, selectedId, asEarpice, selectedGroupId, select],
|
||||
);
|
||||
}
|
||||
|
||||
@@ -167,6 +203,7 @@ export const deviceStub: MediaDevice = {
|
||||
selectedId: undefined,
|
||||
selectedGroupId: undefined,
|
||||
select: () => {},
|
||||
useAsEarpiece: false,
|
||||
};
|
||||
export const devicesStub: MediaDevices = {
|
||||
audioInput: deviceStub,
|
||||
@@ -255,3 +292,30 @@ export const useMediaDeviceNames = (
|
||||
return context.stopUsingDeviceNames;
|
||||
}
|
||||
}, [context, enabled]);
|
||||
|
||||
/**
|
||||
* A convenience hook to get the audio node configuration for the earpiece.
|
||||
* It will check the `useAsEarpiece` of the `audioOutput` device and return
|
||||
* the appropriate pan and volume values.
|
||||
*
|
||||
* @returns pan and volume values for the earpiece audio node configuration.
|
||||
*/
|
||||
export const useEarpieceAudioConfig = (): {
|
||||
pan: number;
|
||||
volume: number;
|
||||
} => {
|
||||
const { audioOutput } = useMediaDevices();
|
||||
// We use only the right speaker (pan = 1) for the earpiece.
|
||||
// This mimics the behavior of the native earpiece speaker (only the top speaker on an iPhone)
|
||||
const pan = useMemo(
|
||||
() => (audioOutput.useAsEarpiece ? 1 : 0),
|
||||
[audioOutput.useAsEarpiece],
|
||||
);
|
||||
// We also do lower the volume by a factor of 10 to optimize for the usecase where
|
||||
// a user is holding the phone to their ear.
|
||||
const volume = useMemo(
|
||||
() => (audioOutput.useAsEarpiece ? 0.1 : 1),
|
||||
[audioOutput.useAsEarpiece],
|
||||
);
|
||||
return { pan, volume };
|
||||
};
|
||||
|
||||
@@ -5,11 +5,7 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
|
||||
Please see LICENSE in the repository root for full details.
|
||||
*/
|
||||
|
||||
import {
|
||||
RoomAudioRenderer,
|
||||
RoomContext,
|
||||
useLocalParticipant,
|
||||
} from "@livekit/components-react";
|
||||
import { RoomContext, useLocalParticipant } from "@livekit/components-react";
|
||||
import { Text } from "@vector-im/compound-web";
|
||||
import { ConnectionState, type Room } from "livekit-client";
|
||||
import { type MatrixClient } from "matrix-js-sdk";
|
||||
@@ -107,6 +103,7 @@ import {
|
||||
import { ReactionsReader } from "../reactions/ReactionsReader";
|
||||
import { ConnectionLostError } from "../utils/errors.ts";
|
||||
import { useTypedEventEmitter } from "../useEvents.ts";
|
||||
import { MatrixAudioRenderer } from "../livekit/MatrixAudioRenderer.tsx";
|
||||
|
||||
const canScreenshare = "getDisplayMedia" in (navigator.mediaDevices ?? {});
|
||||
|
||||
@@ -713,7 +710,10 @@ export const InCallView: FC<InCallViewProps> = ({
|
||||
</Text>
|
||||
)
|
||||
}
|
||||
<RoomAudioRenderer muted={muteAllAudio} />
|
||||
<MatrixAudioRenderer
|
||||
members={rtcSession.memberships}
|
||||
muted={muteAllAudio}
|
||||
/>
|
||||
{renderContent()}
|
||||
<CallEventAudioRenderer vm={vm} muted={muteAllAudio} />
|
||||
<ReactionsAudioRenderer vm={vm} muted={muteAllAudio} />
|
||||
|
||||
@@ -18,6 +18,7 @@ import {
|
||||
useNewMembershipManager as useNewMembershipManagerSetting,
|
||||
useExperimentalToDeviceTransport as useExperimentalToDeviceTransportSetting,
|
||||
muteAllAudio as muteAllAudioSetting,
|
||||
alwaysShowIphoneEarpiece as alwaysShowIphoneEarpieceSetting,
|
||||
} from "./settings";
|
||||
import type { MatrixClient } from "matrix-js-sdk";
|
||||
import type { Room as LivekitRoom } from "livekit-client";
|
||||
@@ -46,6 +47,9 @@ export const DeveloperSettingsTab: FC<Props> = ({ client, livekitRoom }) => {
|
||||
useNewMembershipManagerSetting,
|
||||
);
|
||||
|
||||
const [alwaysShowIphoneEarpiece, setAlwaysShowIphoneEarpiece] = useSetting(
|
||||
alwaysShowIphoneEarpieceSetting,
|
||||
);
|
||||
const [
|
||||
useExperimentalToDeviceTransport,
|
||||
setUseExperimentalToDeviceTransport,
|
||||
@@ -192,6 +196,20 @@ export const DeveloperSettingsTab: FC<Props> = ({ client, livekitRoom }) => {
|
||||
[setMuteAllAudio],
|
||||
)}
|
||||
/>
|
||||
</FieldRow>{" "}
|
||||
<FieldRow>
|
||||
<InputField
|
||||
id="alwaysShowIphoneEarpiece"
|
||||
type="checkbox"
|
||||
label={t("developer_mode.always_show_iphone_earpiece")}
|
||||
checked={alwaysShowIphoneEarpiece}
|
||||
onChange={useCallback(
|
||||
(event: ChangeEvent<HTMLInputElement>): void => {
|
||||
setAlwaysShowIphoneEarpiece(event.target.checked);
|
||||
},
|
||||
[setAlwaysShowIphoneEarpiece],
|
||||
)}
|
||||
/>{" "}
|
||||
</FieldRow>
|
||||
{livekitRoom ? (
|
||||
<>
|
||||
|
||||
@@ -22,17 +22,20 @@ import {
|
||||
} from "@vector-im/compound-web";
|
||||
import { Trans, useTranslation } from "react-i18next";
|
||||
|
||||
import { type MediaDevice } from "../livekit/MediaDevicesContext";
|
||||
import {
|
||||
EARPIECE_CONFIG_ID,
|
||||
type MediaDevice,
|
||||
} from "../livekit/MediaDevicesContext";
|
||||
import styles from "./DeviceSelection.module.css";
|
||||
|
||||
interface Props {
|
||||
devices: MediaDevice;
|
||||
device: MediaDevice;
|
||||
title: string;
|
||||
numberedLabel: (number: number) => string;
|
||||
}
|
||||
|
||||
export const DeviceSelection: FC<Props> = ({
|
||||
devices,
|
||||
device,
|
||||
title,
|
||||
numberedLabel,
|
||||
}) => {
|
||||
@@ -40,12 +43,13 @@ export const DeviceSelection: FC<Props> = ({
|
||||
const groupId = useId();
|
||||
const onChange = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
devices.select(e.target.value);
|
||||
device.select(e.target.value);
|
||||
},
|
||||
[devices],
|
||||
[device],
|
||||
);
|
||||
|
||||
if (devices.available.size == 0) return null;
|
||||
// There is no need to show the menu if there is no choice that can be made.
|
||||
if (device.available.size == 1) return null;
|
||||
|
||||
return (
|
||||
<div className={styles.selection}>
|
||||
@@ -60,7 +64,7 @@ export const DeviceSelection: FC<Props> = ({
|
||||
</Heading>
|
||||
<Separator className={styles.separator} />
|
||||
<div className={styles.options}>
|
||||
{[...devices.available].map(([id, label]) => {
|
||||
{[...device.available].map(([id, label]) => {
|
||||
let labelText: ReactNode;
|
||||
switch (label.type) {
|
||||
case "name":
|
||||
@@ -85,6 +89,16 @@ export const DeviceSelection: FC<Props> = ({
|
||||
</Trans>
|
||||
);
|
||||
break;
|
||||
case "earpiece":
|
||||
labelText = t("settings.devices.earpiece");
|
||||
break;
|
||||
}
|
||||
|
||||
let isSelected = false;
|
||||
if (device.useAsEarpiece) {
|
||||
isSelected = id === EARPIECE_CONFIG_ID;
|
||||
} else {
|
||||
isSelected = id === device.selectedId;
|
||||
}
|
||||
|
||||
return (
|
||||
@@ -93,7 +107,7 @@ export const DeviceSelection: FC<Props> = ({
|
||||
name={groupId}
|
||||
control={
|
||||
<RadioControl
|
||||
checked={id === devices.selectedId}
|
||||
checked={isSelected}
|
||||
onChange={onChange}
|
||||
value={id}
|
||||
/>
|
||||
|
||||
@@ -98,7 +98,6 @@ export const SettingsModal: FC<Props> = ({
|
||||
useMediaDeviceNames(devices, open);
|
||||
const [soundVolume, setSoundVolume] = useSetting(soundEffectVolumeSetting);
|
||||
const [soundVolumeRaw, setSoundVolumeRaw] = useState(soundVolume);
|
||||
|
||||
const [showDeveloperSettingsTab] = useSetting(developerMode);
|
||||
|
||||
const { available: isRageshakeAvailable } = useSubmitRageshake();
|
||||
@@ -110,17 +109,18 @@ export const SettingsModal: FC<Props> = ({
|
||||
<>
|
||||
<Form>
|
||||
<DeviceSelection
|
||||
devices={devices.audioInput}
|
||||
device={devices.audioInput}
|
||||
title={t("settings.devices.microphone")}
|
||||
numberedLabel={(n) =>
|
||||
t("settings.devices.microphone_numbered", { n })
|
||||
}
|
||||
/>
|
||||
<DeviceSelection
|
||||
devices={devices.audioOutput}
|
||||
device={devices.audioOutput}
|
||||
title={t("settings.devices.speaker")}
|
||||
numberedLabel={(n) => t("settings.devices.speaker_numbered", { n })}
|
||||
/>
|
||||
|
||||
<div className={styles.volumeSlider}>
|
||||
<label>{t("settings.audio_tab.effect_volume_label")}</label>
|
||||
<p>{t("settings.audio_tab.effect_volume_description")}</p>
|
||||
@@ -146,7 +146,7 @@ export const SettingsModal: FC<Props> = ({
|
||||
<>
|
||||
<Form>
|
||||
<DeviceSelection
|
||||
devices={devices.videoInput}
|
||||
device={devices.videoInput}
|
||||
title={t("settings.devices.camera")}
|
||||
numberedLabel={(n) => t("settings.devices.camera_numbered", { n })}
|
||||
/>
|
||||
|
||||
@@ -44,6 +44,9 @@ export class Setting<T> {
|
||||
this._value$.next(value);
|
||||
localStorage.setItem(this.key, JSON.stringify(value));
|
||||
};
|
||||
public readonly getValue = (): T => {
|
||||
return this._value$.getValue();
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -128,3 +131,8 @@ export const useExperimentalToDeviceTransport = new Setting<boolean>(
|
||||
export const muteAllAudio = new Setting<boolean>("mute-all-audio", false);
|
||||
|
||||
export const alwaysShowSelf = new Setting<boolean>("always-show-self", true);
|
||||
|
||||
export const alwaysShowIphoneEarpiece = new Setting<boolean>(
|
||||
"always-show-iphone-earpice",
|
||||
false,
|
||||
);
|
||||
|
||||
@@ -12,7 +12,10 @@ import {
|
||||
soundEffectVolume as soundEffectVolumeSetting,
|
||||
useSetting,
|
||||
} from "./settings/settings";
|
||||
import { useMediaDevices } from "./livekit/MediaDevicesContext";
|
||||
import {
|
||||
useEarpieceAudioConfig,
|
||||
useMediaDevices,
|
||||
} from "./livekit/MediaDevicesContext";
|
||||
import { type PrefetchedSounds } from "./soundUtils";
|
||||
|
||||
/**
|
||||
@@ -28,12 +31,15 @@ async function playSound(
|
||||
ctx: AudioContext,
|
||||
buffer: AudioBuffer,
|
||||
volume: number,
|
||||
stereoPan: number,
|
||||
): Promise<void> {
|
||||
const gain = ctx.createGain();
|
||||
gain.gain.setValueAtTime(volume, 0);
|
||||
const pan = ctx.createStereoPanner();
|
||||
pan.pan.setValueAtTime(stereoPan, 0);
|
||||
const src = ctx.createBufferSource();
|
||||
src.buffer = buffer;
|
||||
src.connect(gain).connect(ctx.destination);
|
||||
src.connect(gain).connect(pan).connect(ctx.destination);
|
||||
const p = new Promise<void>((r) => src.addEventListener("ended", () => r()));
|
||||
src.start();
|
||||
return p;
|
||||
@@ -63,8 +69,9 @@ interface UseAudioContext<S> {
|
||||
export function useAudioContext<S extends string>(
|
||||
props: Props<S>,
|
||||
): UseAudioContext<S> | null {
|
||||
const [effectSoundVolume] = useSetting(soundEffectVolumeSetting);
|
||||
const devices = useMediaDevices();
|
||||
const [soundEffectVolume] = useSetting(soundEffectVolumeSetting);
|
||||
const { audioOutput } = useMediaDevices();
|
||||
|
||||
const [audioContext, setAudioContext] = useState<AudioContext>();
|
||||
const [audioBuffers, setAudioBuffers] = useState<Record<S, AudioBuffer>>();
|
||||
|
||||
@@ -106,23 +113,30 @@ export function useAudioContext<S extends string>(
|
||||
if (audioContext && "setSinkId" in audioContext) {
|
||||
// https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/setSinkId
|
||||
// @ts-expect-error - setSinkId doesn't exist yet in types, maybe because it's not supported everywhere.
|
||||
audioContext.setSinkId(devices.audioOutput.selectedId).catch((ex) => {
|
||||
audioContext.setSinkId(audioOutput.selectedId).catch((ex) => {
|
||||
logger.warn("Unable to change sink for audio context", ex);
|
||||
});
|
||||
}
|
||||
}, [audioContext, devices]);
|
||||
}, [audioContext, audioOutput.selectedId]);
|
||||
const { pan: earpiecePan, volume: earpieceVolume } = useEarpieceAudioConfig();
|
||||
|
||||
// Don't return a function until we're ready.
|
||||
if (!audioContext || !audioBuffers || props.muted) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
playSound: async (name): Promise<void> => {
|
||||
if (!audioBuffers[name]) {
|
||||
logger.debug(`Tried to play a sound that wasn't buffered (${name})`);
|
||||
return;
|
||||
}
|
||||
return playSound(audioContext, audioBuffers[name], effectSoundVolume);
|
||||
return playSound(
|
||||
audioContext,
|
||||
audioBuffers[name],
|
||||
soundEffectVolume * earpieceVolume,
|
||||
earpiecePan,
|
||||
);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user