mirror of
https://github.com/vector-im/element-call.git
synced 2026-02-02 04:05:56 +00:00
Refactor media devices to live outside React as Observables (#3334)
* Refactor media devices to live outside React as Observables This moves the media devices state out of React to further our transition to a MVVM architecture in which we can more easily model and store complex application state. I have created an AppViewModel to act as the overarching state holder for any future non-React state we end up creating, and the MediaDevices reside within this. We should move more application logic (including the CallViewModel itself) there in the future. * Address review feedback * Fixes from ios debugging session: (#3342) - dont use preferred vs selected concept in controlled media. Its not needed since we dont use the id for actual browser media devices (the id's are not even actual browser media devices) - add more logging - add more conditions to not accidently set a deviceId that is not a browser deviceId but one provided via controlled. --------- Co-authored-by: Timo <16718859+toger5@users.noreply.github.com>
This commit is contained in:
13
src/App.tsx
13
src/App.tsx
@@ -19,10 +19,11 @@ import { ClientProvider } from "./ClientContext";
|
||||
import { ErrorPage, LoadingPage } from "./FullScreenView";
|
||||
import { DisconnectedBanner } from "./DisconnectedBanner";
|
||||
import { Initializer } from "./initializer";
|
||||
import { MediaDevicesProvider } from "./livekit/MediaDevicesContext";
|
||||
import { widget } from "./widget";
|
||||
import { useTheme } from "./useTheme";
|
||||
import { ProcessorProvider } from "./livekit/TrackProcessorContext";
|
||||
import { type AppViewModel } from "./state/AppViewModel";
|
||||
import { MediaDevicesContext } from "./MediaDevicesContext";
|
||||
|
||||
const SentryRoute = Sentry.withSentryReactRouterV7Routing(Route);
|
||||
|
||||
@@ -50,7 +51,11 @@ const ThemeProvider: FC<SimpleProviderProps> = ({ children }) => {
|
||||
return children;
|
||||
};
|
||||
|
||||
export const App: FC = () => {
|
||||
interface Props {
|
||||
vm: AppViewModel;
|
||||
}
|
||||
|
||||
export const App: FC<Props> = ({ vm }) => {
|
||||
const [loaded, setLoaded] = useState(false);
|
||||
useEffect(() => {
|
||||
Initializer.init()
|
||||
@@ -72,7 +77,7 @@ export const App: FC = () => {
|
||||
{loaded ? (
|
||||
<Suspense fallback={null}>
|
||||
<ClientProvider>
|
||||
<MediaDevicesProvider>
|
||||
<MediaDevicesContext.Provider value={vm.mediaDevices}>
|
||||
<ProcessorProvider>
|
||||
<Sentry.ErrorBoundary
|
||||
fallback={(error) => (
|
||||
@@ -91,7 +96,7 @@ export const App: FC = () => {
|
||||
</Routes>
|
||||
</Sentry.ErrorBoundary>
|
||||
</ProcessorProvider>
|
||||
</MediaDevicesProvider>
|
||||
</MediaDevicesContext.Provider>
|
||||
</ClientProvider>
|
||||
</Suspense>
|
||||
) : (
|
||||
|
||||
52
src/MediaDevicesContext.ts
Normal file
52
src/MediaDevicesContext.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
Copyright 2025 New Vector Ltd.
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
|
||||
Please see LICENSE in the repository root for full details.
|
||||
*/
|
||||
|
||||
import { createContext, useContext, useMemo } from "react";
|
||||
import { useObservableEagerState } from "observable-hooks";
|
||||
|
||||
import { type MediaDevices } from "./state/MediaDevices";
|
||||
|
||||
export const MediaDevicesContext = createContext<MediaDevices | undefined>(
|
||||
undefined,
|
||||
);
|
||||
|
||||
export function useMediaDevices(): MediaDevices {
|
||||
const mediaDevices = useContext(MediaDevicesContext);
|
||||
if (mediaDevices === undefined)
|
||||
throw new Error(
|
||||
"useMediaDevices must be used within a MediaDevices context provider",
|
||||
);
|
||||
return mediaDevices;
|
||||
}
|
||||
|
||||
/**
|
||||
* A convenience hook to get the audio node configuration for the earpiece.
|
||||
* It will check the `useAsEarpiece` of the `audioOutput` device and return
|
||||
* the appropriate pan and volume values.
|
||||
*
|
||||
* @returns pan and volume values for the earpiece audio node configuration.
|
||||
*/
|
||||
export const useEarpieceAudioConfig = (): {
|
||||
pan: number;
|
||||
volume: number;
|
||||
} => {
|
||||
const devices = useMediaDevices();
|
||||
const audioOutput = useObservableEagerState(devices.audioOutput.selected$);
|
||||
// We use only the right speaker (pan = 1) for the earpiece.
|
||||
// This mimics the behavior of the native earpiece speaker (only the top speaker on an iPhone)
|
||||
const pan = useMemo(
|
||||
() => (audioOutput?.virtualEarpiece ? 1 : 0),
|
||||
[audioOutput?.virtualEarpiece],
|
||||
);
|
||||
// We also do lower the volume by a factor of 10 to optimize for the usecase where
|
||||
// a user is holding the phone to their ear.
|
||||
const volume = useMemo(
|
||||
() => (audioOutput?.virtualEarpiece ? 0.1 : 1),
|
||||
[audioOutput?.virtualEarpiece],
|
||||
);
|
||||
return { pan, volume };
|
||||
};
|
||||
@@ -5,7 +5,10 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
|
||||
Please see LICENSE in the repository root for full details.
|
||||
*/
|
||||
|
||||
import { BehaviorSubject, Subject } from "rxjs";
|
||||
import { Subject } from "rxjs";
|
||||
import { logger as rootLogger } from "matrix-js-sdk/lib/logger";
|
||||
|
||||
const logger = rootLogger.getChild("[controlled-output]");
|
||||
|
||||
export interface Controls {
|
||||
canEnterPip(): boolean;
|
||||
@@ -42,12 +45,11 @@ export interface OutputDevice {
|
||||
* If pipMode is enabled, EC will render a adapted call view layout.
|
||||
*/
|
||||
export const setPipEnabled$ = new Subject<boolean>();
|
||||
// BehaviorSubject since the client might set this before we have subscribed (GroupCallView still in "loading" state)
|
||||
// We want the devices that have been set during loading to be available immediately once loaded.
|
||||
export const availableOutputDevices$ = new BehaviorSubject<OutputDevice[]>([]);
|
||||
// BehaviorSubject since the client might set this before we have subscribed (GroupCallView still in "loading" state)
|
||||
// We want the device that has been set during loading to be available immediately once loaded.
|
||||
export const outputDevice$ = new BehaviorSubject<string | undefined>(undefined);
|
||||
|
||||
export const availableOutputDevices$ = new Subject<OutputDevice[]>();
|
||||
|
||||
export const outputDevice$ = new Subject<string | undefined>();
|
||||
|
||||
/**
|
||||
* This allows the os to mute the call if the user
|
||||
* presses the volume down button when it is at the minimum volume.
|
||||
@@ -75,12 +77,15 @@ window.controls = {
|
||||
setPipEnabled$.next(false);
|
||||
},
|
||||
setAvailableAudioDevices(devices: OutputDevice[]): void {
|
||||
logger.info("setAvailableAudioDevices called from native:", devices);
|
||||
availableOutputDevices$.next(devices);
|
||||
},
|
||||
setAudioDevice(id: string): void {
|
||||
logger.info("setAudioDevice called from native", id);
|
||||
outputDevice$.next(id);
|
||||
},
|
||||
setAudioEnabled(enabled: boolean): void {
|
||||
logger.info("setAudioEnabled called from native:", enabled);
|
||||
if (!setAudioEnabled$.observed)
|
||||
throw new Error(
|
||||
"Output controls are disabled. No setAudioEnabled$ observer",
|
||||
|
||||
@@ -17,12 +17,14 @@ import { type ReactNode } from "react";
|
||||
import { useTracks } from "@livekit/components-react";
|
||||
|
||||
import { testAudioContext } from "../useAudioContext.test";
|
||||
import * as MediaDevicesContext from "./MediaDevicesContext";
|
||||
import * as MediaDevicesContext from "../MediaDevicesContext";
|
||||
import { MatrixAudioRenderer } from "./MatrixAudioRenderer";
|
||||
import { mockTrack } from "../utils/test";
|
||||
import { mockMediaDevices, mockTrack } from "../utils/test";
|
||||
|
||||
export const TestAudioContextConstructor = vi.fn(() => testAudioContext);
|
||||
|
||||
const MediaDevicesProvider = MediaDevicesContext.MediaDevicesContext.Provider;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.stubGlobal("AudioContext", TestAudioContextConstructor);
|
||||
});
|
||||
@@ -51,9 +53,11 @@ vi.mocked(useTracks).mockReturnValue(tracks);
|
||||
|
||||
it("should render for member", () => {
|
||||
const { container, queryAllByTestId } = render(
|
||||
<MatrixAudioRenderer
|
||||
members={[{ sender: "test", deviceId: "123" }] as CallMembership[]}
|
||||
/>,
|
||||
<MediaDevicesProvider value={mockMediaDevices({})}>
|
||||
<MatrixAudioRenderer
|
||||
members={[{ sender: "test", deviceId: "123" }] as CallMembership[]}
|
||||
/>
|
||||
</MediaDevicesProvider>,
|
||||
);
|
||||
expect(container).toBeTruthy();
|
||||
expect(queryAllByTestId("audio")).toHaveLength(1);
|
||||
@@ -64,7 +68,9 @@ it("should not render without member", () => {
|
||||
{ sender: "othermember", deviceId: "123" },
|
||||
] as CallMembership[];
|
||||
const { container, queryAllByTestId } = render(
|
||||
<MatrixAudioRenderer members={memberships} />,
|
||||
<MediaDevicesProvider value={mockMediaDevices({})}>
|
||||
<MatrixAudioRenderer members={memberships} />
|
||||
</MediaDevicesProvider>,
|
||||
);
|
||||
expect(container).toBeTruthy();
|
||||
expect(queryAllByTestId("audio")).toHaveLength(0);
|
||||
@@ -72,9 +78,11 @@ it("should not render without member", () => {
|
||||
|
||||
it("should not setup audioContext gain and pan if there is no need to.", () => {
|
||||
render(
|
||||
<MatrixAudioRenderer
|
||||
members={[{ sender: "test", deviceId: "123" }] as CallMembership[]}
|
||||
/>,
|
||||
<MediaDevicesProvider value={mockMediaDevices({})}>
|
||||
<MatrixAudioRenderer
|
||||
members={[{ sender: "test", deviceId: "123" }] as CallMembership[]}
|
||||
/>
|
||||
</MediaDevicesProvider>,
|
||||
);
|
||||
const audioTrack = tracks[0].publication.track! as RemoteAudioTrack;
|
||||
|
||||
@@ -93,9 +101,11 @@ it("should setup audioContext gain and pan", () => {
|
||||
volume: 0.1,
|
||||
});
|
||||
render(
|
||||
<MatrixAudioRenderer
|
||||
members={[{ sender: "test", deviceId: "123" }] as CallMembership[]}
|
||||
/>,
|
||||
<MediaDevicesProvider value={mockMediaDevices({})}>
|
||||
<MatrixAudioRenderer
|
||||
members={[{ sender: "test", deviceId: "123" }] as CallMembership[]}
|
||||
/>
|
||||
</MediaDevicesProvider>,
|
||||
);
|
||||
|
||||
const audioTrack = tracks[0].publication.track! as RemoteAudioTrack;
|
||||
|
||||
@@ -16,7 +16,7 @@ import {
|
||||
import { type CallMembership } from "matrix-js-sdk/lib/matrixrtc";
|
||||
import { logger as rootLogger } from "matrix-js-sdk/lib/logger";
|
||||
|
||||
import { useEarpieceAudioConfig } from "./MediaDevicesContext";
|
||||
import { useEarpieceAudioConfig } from "../MediaDevicesContext";
|
||||
import { useReactiveState } from "../useReactiveState";
|
||||
import * as controls from "../controls";
|
||||
|
||||
|
||||
@@ -1,445 +0,0 @@
|
||||
/*
|
||||
Copyright 2023-2025 New Vector Ltd.
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
|
||||
Please see LICENSE in the repository root for full details.
|
||||
*/
|
||||
|
||||
import {
|
||||
type FC,
|
||||
createContext,
|
||||
useCallback,
|
||||
useContext,
|
||||
useEffect,
|
||||
useMemo,
|
||||
useRef,
|
||||
useState,
|
||||
type JSX,
|
||||
} from "react";
|
||||
import { createMediaDeviceObserver } from "@livekit/components-core";
|
||||
import { combineLatest, distinctUntilChanged, map, startWith } from "rxjs";
|
||||
import { useObservable, useObservableEagerState } from "observable-hooks";
|
||||
import { logger } from "matrix-js-sdk/lib/logger";
|
||||
import { deepCompare } from "matrix-js-sdk/lib/utils";
|
||||
|
||||
import {
|
||||
useSetting,
|
||||
audioInput as audioInputSetting,
|
||||
audioOutput as audioOutputSetting,
|
||||
videoInput as videoInputSetting,
|
||||
alwaysShowIphoneEarpiece as alwaysShowIphoneEarpieceSetting,
|
||||
type Setting,
|
||||
} from "../settings/settings";
|
||||
import { outputDevice$, availableOutputDevices$ } from "../controls";
|
||||
import { useUrlParams } from "../UrlParams";
|
||||
|
||||
// This hardcoded id is used in EX ios! It can only be changed in coordination with
|
||||
// the ios swift team.
|
||||
export const EARPIECE_CONFIG_ID = "earpiece-id";
|
||||
|
||||
export type DeviceLabel =
|
||||
| { type: "name"; name: string }
|
||||
| { type: "number"; number: number }
|
||||
| { type: "earpiece" }
|
||||
| { type: "default"; name: string | null };
|
||||
|
||||
export interface MediaDeviceHandle {
|
||||
/**
|
||||
* A map from available device IDs to labels.
|
||||
*/
|
||||
available: Map<string, DeviceLabel>;
|
||||
selectedId: string | undefined;
|
||||
/**
|
||||
* An additional device configuration that makes us use only one channel of the
|
||||
* output device and a reduced volume.
|
||||
*/
|
||||
useAsEarpiece: boolean | undefined;
|
||||
/**
|
||||
* The group ID of the selected device.
|
||||
*/
|
||||
// This is exposed sort of ad-hoc because it's only needed for knowing when to
|
||||
// restart the tracks of default input devices, and ideally this behavior
|
||||
// would be encapsulated somehow…
|
||||
selectedGroupId: string | undefined;
|
||||
select: (deviceId: string) => void;
|
||||
}
|
||||
|
||||
interface InputDevices {
|
||||
audioInput: MediaDeviceHandle;
|
||||
videoInput: MediaDeviceHandle;
|
||||
startUsingDeviceNames: () => void;
|
||||
stopUsingDeviceNames: () => void;
|
||||
usingNames: boolean;
|
||||
}
|
||||
|
||||
export interface MediaDevices extends Omit<InputDevices, "usingNames"> {
|
||||
audioOutput: MediaDeviceHandle;
|
||||
}
|
||||
|
||||
/**
|
||||
* An observable that represents if we should display the devices menu for iOS.
|
||||
* This implies the following
|
||||
* - hide any input devices (they do not work anyhow on ios)
|
||||
* - Show a button to show the native output picker instead.
|
||||
* - Only show the earpiece toggle option if the earpiece is available:
|
||||
* `availableOutputDevices$.includes((d)=>d.forEarpiece)`
|
||||
*/
|
||||
export const iosDeviceMenu$ = alwaysShowIphoneEarpieceSetting.value$.pipe(
|
||||
map((v) => v || navigator.userAgent.includes("iPhone")),
|
||||
);
|
||||
|
||||
function useSelectedId(
|
||||
available: Map<string, DeviceLabel>,
|
||||
preferredId: string | undefined,
|
||||
): string | undefined {
|
||||
return useMemo(() => {
|
||||
if (available.size) {
|
||||
// If the preferred device is available, use it. Or if every available
|
||||
// device ID is falsy, the browser is probably just being paranoid about
|
||||
// fingerprinting and we should still try using the preferred device.
|
||||
// Worst case it is not available and the browser will gracefully fall
|
||||
// back to some other device for us when requesting the media stream.
|
||||
// Otherwise, select the first available device.
|
||||
return (preferredId !== undefined && available.has(preferredId)) ||
|
||||
(available.size === 1 && available.has(""))
|
||||
? preferredId
|
||||
: available.keys().next().value;
|
||||
}
|
||||
return undefined;
|
||||
}, [available, preferredId]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook to get access to a mediaDevice handle for a kind. This allows to list
|
||||
* the available devices, read and set the selected device.
|
||||
* @param kind Audio input, output or video output.
|
||||
* @param setting The setting this handle's selection should be synced with.
|
||||
* @param usingNames If the hook should query device names for the associated
|
||||
* list.
|
||||
* @returns A handle for the chosen kind.
|
||||
*/
|
||||
function useMediaDeviceHandle(
|
||||
kind: MediaDeviceKind,
|
||||
setting: Setting<string | undefined>,
|
||||
usingNames: boolean,
|
||||
): MediaDeviceHandle {
|
||||
const hasRequestedPermissions = useRef(false);
|
||||
const requestPermissions = usingNames || hasRequestedPermissions.current;
|
||||
// Make sure we don't needlessly reset to a device observer without names,
|
||||
// once permissions are already given
|
||||
hasRequestedPermissions.current ||= usingNames;
|
||||
|
||||
// We use a bare device observer here rather than one of the fancy device
|
||||
// selection hooks from @livekit/components-react, because
|
||||
// useMediaDeviceSelect expects a room or track, which we don't have here, and
|
||||
// useMediaDevices provides no way to request device names.
|
||||
// Tragically, the only way to get device names out of LiveKit is to specify a
|
||||
// kind, which then results in multiple permissions requests.
|
||||
const deviceObserver$ = useMemo(
|
||||
() =>
|
||||
createMediaDeviceObserver(
|
||||
kind,
|
||||
() => logger.error("Error creating MediaDeviceObserver"),
|
||||
requestPermissions,
|
||||
).pipe(
|
||||
startWith([]),
|
||||
// This Observable emits new values whenever the browser fires a
|
||||
// MediaDevices 'devicechange' event. One would think, innocently, that
|
||||
// a 'devicechange' event means the devices have changed. But as of the
|
||||
// time of writing, we are seeing mobile Safari firing spurious
|
||||
// 'devicechange' events (where no change has actually occurred) when
|
||||
// we call MediaDevices.getUserMedia. So, filter by deep equality.
|
||||
distinctUntilChanged<MediaDeviceInfo[]>(deepCompare),
|
||||
),
|
||||
[kind, requestPermissions],
|
||||
);
|
||||
const available = useObservableEagerState(
|
||||
useMemo(
|
||||
() =>
|
||||
deviceObserver$.pipe(
|
||||
map((availableRaw) => {
|
||||
// Sometimes browsers (particularly Firefox) can return multiple device
|
||||
// entries for the exact same device ID; using a map deduplicates them
|
||||
let available = new Map<string, DeviceLabel>(
|
||||
availableRaw.map((d, i) => [
|
||||
d.deviceId,
|
||||
d.label
|
||||
? { type: "name", name: d.label }
|
||||
: { type: "number", number: i + 1 },
|
||||
]),
|
||||
);
|
||||
// Create a virtual default audio output for browsers that don't have one.
|
||||
// Its device ID must be the empty string because that's what setSinkId
|
||||
// recognizes.
|
||||
// We also create this if we do not have any available devices, so that
|
||||
// we can use the default or the earpiece.
|
||||
if (
|
||||
kind === "audiooutput" &&
|
||||
!available.has("") &&
|
||||
!available.has("default") &&
|
||||
available.size
|
||||
)
|
||||
available = new Map([
|
||||
["", { type: "default", name: availableRaw[0]?.label || null }],
|
||||
...available,
|
||||
]);
|
||||
// Note: creating virtual default input devices would be another problem
|
||||
// entirely, because requesting a media stream from deviceId "" won't
|
||||
// automatically track the default device.
|
||||
return available;
|
||||
}),
|
||||
),
|
||||
[deviceObserver$, kind],
|
||||
),
|
||||
);
|
||||
|
||||
const [preferredId, select] = useSetting(setting);
|
||||
const selectedId = useSelectedId(available, preferredId);
|
||||
|
||||
const selectedGroupId = useObservableEagerState(
|
||||
useMemo(
|
||||
() =>
|
||||
deviceObserver$.pipe(
|
||||
map(
|
||||
(availableRaw) =>
|
||||
availableRaw.find((d) => d.deviceId === selectedId)?.groupId,
|
||||
),
|
||||
),
|
||||
[deviceObserver$, selectedId],
|
||||
),
|
||||
);
|
||||
|
||||
return useMemo(
|
||||
() => ({
|
||||
available,
|
||||
selectedId,
|
||||
useAsEarpiece: false,
|
||||
selectedGroupId,
|
||||
select,
|
||||
}),
|
||||
[available, selectedId, selectedGroupId, select],
|
||||
);
|
||||
}
|
||||
|
||||
export const deviceStub: MediaDeviceHandle = {
|
||||
available: new Map(),
|
||||
selectedId: undefined,
|
||||
selectedGroupId: undefined,
|
||||
select: () => {},
|
||||
useAsEarpiece: false,
|
||||
};
|
||||
|
||||
export const devicesStub: MediaDevices = {
|
||||
audioInput: deviceStub,
|
||||
audioOutput: deviceStub,
|
||||
videoInput: deviceStub,
|
||||
startUsingDeviceNames: () => {},
|
||||
stopUsingDeviceNames: () => {},
|
||||
};
|
||||
|
||||
export const MediaDevicesContext = createContext<MediaDevices>(devicesStub);
|
||||
|
||||
function useInputDevices(): InputDevices {
|
||||
// Counts the number of callers currently using device names.
|
||||
const [numCallersUsingNames, setNumCallersUsingNames] = useState(0);
|
||||
const usingNames = numCallersUsingNames > 0;
|
||||
|
||||
const audioInput = useMediaDeviceHandle(
|
||||
"audioinput",
|
||||
audioInputSetting,
|
||||
usingNames,
|
||||
);
|
||||
const videoInput = useMediaDeviceHandle(
|
||||
"videoinput",
|
||||
videoInputSetting,
|
||||
usingNames,
|
||||
);
|
||||
|
||||
const startUsingDeviceNames = useCallback(
|
||||
() => setNumCallersUsingNames((n) => n + 1),
|
||||
[setNumCallersUsingNames],
|
||||
);
|
||||
const stopUsingDeviceNames = useCallback(
|
||||
() => setNumCallersUsingNames((n) => n - 1),
|
||||
[setNumCallersUsingNames],
|
||||
);
|
||||
|
||||
return {
|
||||
audioInput,
|
||||
videoInput,
|
||||
startUsingDeviceNames,
|
||||
stopUsingDeviceNames,
|
||||
usingNames,
|
||||
};
|
||||
}
|
||||
|
||||
interface Props {
|
||||
children: JSX.Element;
|
||||
}
|
||||
|
||||
export const MediaDevicesProvider: FC<Props> = ({ children }) => {
|
||||
const {
|
||||
audioInput,
|
||||
videoInput,
|
||||
startUsingDeviceNames,
|
||||
stopUsingDeviceNames,
|
||||
usingNames,
|
||||
} = useInputDevices();
|
||||
|
||||
const { controlledAudioDevices } = useUrlParams();
|
||||
|
||||
const webViewAudioOutput = useMediaDeviceHandle(
|
||||
"audiooutput",
|
||||
audioOutputSetting,
|
||||
usingNames,
|
||||
);
|
||||
const controlledAudioOutput = useControlledOutput();
|
||||
|
||||
const context: MediaDevices = useMemo(
|
||||
() => ({
|
||||
audioInput,
|
||||
audioOutput: controlledAudioDevices
|
||||
? controlledAudioOutput
|
||||
: webViewAudioOutput,
|
||||
videoInput,
|
||||
startUsingDeviceNames,
|
||||
stopUsingDeviceNames,
|
||||
}),
|
||||
[
|
||||
audioInput,
|
||||
controlledAudioDevices,
|
||||
controlledAudioOutput,
|
||||
webViewAudioOutput,
|
||||
videoInput,
|
||||
startUsingDeviceNames,
|
||||
stopUsingDeviceNames,
|
||||
],
|
||||
);
|
||||
|
||||
return (
|
||||
<MediaDevicesContext.Provider value={context}>
|
||||
{children}
|
||||
</MediaDevicesContext.Provider>
|
||||
);
|
||||
};
|
||||
|
||||
function useControlledOutput(): MediaDeviceHandle {
|
||||
const { available } = useObservableEagerState(
|
||||
useObservable(() => {
|
||||
const outputDeviceData$ = availableOutputDevices$.pipe(
|
||||
map((devices) => {
|
||||
const deviceForEarpiece = devices.find((d) => d.forEarpiece);
|
||||
const deviceMapTuple: [string, DeviceLabel][] = devices.map(
|
||||
({ id, name, isEarpiece, isSpeaker /*,isExternalHeadset*/ }) => {
|
||||
let deviceLabel: DeviceLabel = { type: "name", name };
|
||||
// if (isExternalHeadset) // Do we want this?
|
||||
if (isEarpiece) deviceLabel = { type: "earpiece" };
|
||||
if (isSpeaker) deviceLabel = { type: "default", name };
|
||||
return [id, deviceLabel];
|
||||
},
|
||||
);
|
||||
return {
|
||||
devicesMap: new Map<string, DeviceLabel>(deviceMapTuple),
|
||||
deviceForEarpiece,
|
||||
};
|
||||
}),
|
||||
);
|
||||
|
||||
return combineLatest(
|
||||
[outputDeviceData$, iosDeviceMenu$],
|
||||
({ devicesMap, deviceForEarpiece }, iosShowEarpiece) => {
|
||||
let available = devicesMap;
|
||||
if (iosShowEarpiece && !!deviceForEarpiece) {
|
||||
available = new Map([
|
||||
...devicesMap.entries(),
|
||||
[EARPIECE_CONFIG_ID, { type: "earpiece" }],
|
||||
]);
|
||||
}
|
||||
return { available, deviceForEarpiece };
|
||||
},
|
||||
);
|
||||
}),
|
||||
);
|
||||
const [preferredId, setPreferredId] = useSetting(audioOutputSetting);
|
||||
useEffect(() => {
|
||||
const subscription = outputDevice$.subscribe((id) => {
|
||||
if (id) setPreferredId(id);
|
||||
});
|
||||
return (): void => subscription.unsubscribe();
|
||||
}, [setPreferredId]);
|
||||
|
||||
const selectedId = useSelectedId(available, preferredId);
|
||||
|
||||
const [asEarpiece, setAsEarpiece] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
// Let the hosting application know which output device has been selected.
|
||||
// This information is probably only of interest if the earpiece mode has been
|
||||
// selected - for example, Element X iOS listens to this to determine whether it
|
||||
// should enable the proximity sensor.
|
||||
if (selectedId) {
|
||||
window.controls.onAudioDeviceSelect?.(selectedId);
|
||||
// Call deprecated method for backwards compatibility.
|
||||
window.controls.onOutputDeviceSelect?.(selectedId);
|
||||
}
|
||||
setAsEarpiece(selectedId === EARPIECE_CONFIG_ID);
|
||||
}, [selectedId]);
|
||||
|
||||
return useMemo(
|
||||
() => ({
|
||||
available: available,
|
||||
selectedId,
|
||||
selectedGroupId: undefined,
|
||||
select: setPreferredId,
|
||||
useAsEarpiece: asEarpiece,
|
||||
}),
|
||||
[available, selectedId, setPreferredId, asEarpiece],
|
||||
);
|
||||
}
|
||||
|
||||
export const useMediaDevices = (): MediaDevices =>
|
||||
useContext(MediaDevicesContext);
|
||||
|
||||
/**
|
||||
* React hook that requests for the media devices context to be populated with
|
||||
* real device names while this component is mounted. This is not done by
|
||||
* default because it may involve requesting additional permissions from the
|
||||
* user.
|
||||
*/
|
||||
export const useMediaDeviceNames = (
|
||||
context: MediaDevices,
|
||||
enabled = true,
|
||||
): void =>
|
||||
useEffect(() => {
|
||||
if (enabled) {
|
||||
context.startUsingDeviceNames();
|
||||
return context.stopUsingDeviceNames;
|
||||
}
|
||||
}, [context, enabled]);
|
||||
|
||||
/**
|
||||
* A convenience hook to get the audio node configuration for the earpiece.
|
||||
* It will check the `useAsEarpiece` of the `audioOutput` device and return
|
||||
* the appropriate pan and volume values.
|
||||
*
|
||||
* @returns pan and volume values for the earpiece audio node configuration.
|
||||
*/
|
||||
export const useEarpieceAudioConfig = (): {
|
||||
pan: number;
|
||||
volume: number;
|
||||
} => {
|
||||
const { audioOutput } = useMediaDevices();
|
||||
// We use only the right speaker (pan = 1) for the earpiece.
|
||||
// This mimics the behavior of the native earpiece speaker (only the top speaker on an iPhone)
|
||||
const pan = useMemo(
|
||||
() => (audioOutput.useAsEarpiece ? 1 : 0),
|
||||
[audioOutput.useAsEarpiece],
|
||||
);
|
||||
// We also do lower the volume by a factor of 10 to optimize for the usecase where
|
||||
// a user is holding the phone to their ear.
|
||||
const volume = useMemo(
|
||||
() => (audioOutput.useAsEarpiece ? 0.1 : 1),
|
||||
[audioOutput.useAsEarpiece],
|
||||
);
|
||||
return { pan, volume };
|
||||
};
|
||||
@@ -19,12 +19,18 @@ import E2EEWorker from "livekit-client/e2ee-worker?worker";
|
||||
import { logger } from "matrix-js-sdk/lib/logger";
|
||||
import { type MatrixRTCSession } from "matrix-js-sdk/lib/matrixrtc";
|
||||
import { useObservable, useObservableEagerState } from "observable-hooks";
|
||||
import { map } from "rxjs";
|
||||
import {
|
||||
map,
|
||||
NEVER,
|
||||
type Observable,
|
||||
type Subscription,
|
||||
switchMap,
|
||||
} from "rxjs";
|
||||
|
||||
import { defaultLiveKitOptions } from "./options";
|
||||
import { type SFUConfig } from "./openIDSFU";
|
||||
import { type MuteStates } from "../room/MuteStates";
|
||||
import { type MediaDeviceHandle, useMediaDevices } from "./MediaDevicesContext";
|
||||
import { useMediaDevices } from "../MediaDevicesContext";
|
||||
import {
|
||||
type ECConnectionState,
|
||||
useECConnectionState,
|
||||
@@ -39,6 +45,8 @@ import {
|
||||
import { observeTrackReference$ } from "../state/MediaViewModel";
|
||||
import { useUrlParams } from "../UrlParams";
|
||||
import { useInitial } from "../useInitial";
|
||||
import { getValue } from "../utils/observable";
|
||||
import { type SelectedDevice } from "../state/MediaDevices";
|
||||
|
||||
interface UseLivekitResult {
|
||||
livekitRoom?: Room;
|
||||
@@ -56,7 +64,9 @@ export function useLivekit(
|
||||
const initialMuteStates = useInitial(() => muteStates);
|
||||
|
||||
const devices = useMediaDevices();
|
||||
const initialDevices = useInitial(() => devices);
|
||||
const initialAudioInputId = useInitial(
|
||||
() => getValue(devices.audioInput.selected$)?.id,
|
||||
);
|
||||
|
||||
// Store if audio/video are currently updating. If to prohibit unnecessary calls
|
||||
// to setMicrophoneEnabled/setCameraEnabled
|
||||
@@ -94,15 +104,20 @@ export function useLivekit(
|
||||
...defaultLiveKitOptions,
|
||||
videoCaptureDefaults: {
|
||||
...defaultLiveKitOptions.videoCaptureDefaults,
|
||||
deviceId: initialDevices.videoInput.selectedId,
|
||||
deviceId: getValue(devices.videoInput.selected$)?.id,
|
||||
processor,
|
||||
},
|
||||
audioCaptureDefaults: {
|
||||
...defaultLiveKitOptions.audioCaptureDefaults,
|
||||
deviceId: initialDevices.audioInput.selectedId,
|
||||
deviceId: initialAudioInputId,
|
||||
},
|
||||
audioOutput: {
|
||||
deviceId: initialDevices.audioOutput.selectedId,
|
||||
// When using controlled audio devices, we don't want to set the
|
||||
// deviceId here, because it will be set by the native app.
|
||||
// (also the id does not need to match a browser device id)
|
||||
deviceId: controlledAudioDevices
|
||||
? undefined
|
||||
: getValue(devices.audioOutput.selected$)?.id,
|
||||
},
|
||||
e2ee,
|
||||
};
|
||||
@@ -157,7 +172,7 @@ export function useLivekit(
|
||||
);
|
||||
|
||||
const connectionState = useECConnectionState(
|
||||
initialDevices.audioInput.selectedId,
|
||||
initialAudioInputId,
|
||||
initialMuteStates.audio.enabled,
|
||||
room,
|
||||
sfuConfig,
|
||||
@@ -312,62 +327,65 @@ export function useLivekit(
|
||||
) {
|
||||
const syncDevice = (
|
||||
kind: MediaDeviceKind,
|
||||
device: MediaDeviceHandle,
|
||||
): void => {
|
||||
const id = device.selectedId;
|
||||
|
||||
// Detect if we're trying to use chrome's default device, in which case
|
||||
// we need to to see if the default device has changed to a different device
|
||||
// by comparing the group ID of the device we're using against the group ID
|
||||
// of what the default device is *now*.
|
||||
// This is special-cased for only audio inputs because we need to dig around
|
||||
// in the LocalParticipant object for the track object and there's not a nice
|
||||
// way to do that generically. There is usually no OS-level default video capture
|
||||
// device anyway, and audio outputs work differently.
|
||||
if (
|
||||
id === "default" &&
|
||||
kind === "audioinput" &&
|
||||
room.options.audioCaptureDefaults?.deviceId === "default"
|
||||
) {
|
||||
const activeMicTrack = Array.from(
|
||||
room.localParticipant.audioTrackPublications.values(),
|
||||
).find((d) => d.source === Track.Source.Microphone)?.track;
|
||||
|
||||
selected$: Observable<SelectedDevice | undefined>,
|
||||
): Subscription =>
|
||||
selected$.subscribe((device) => {
|
||||
if (
|
||||
activeMicTrack &&
|
||||
// only restart if the stream is still running: LiveKit will detect
|
||||
// when a track stops & restart appropriately, so this is not our job.
|
||||
// Plus, we need to avoid restarting again if the track is already in
|
||||
// the process of being restarted.
|
||||
activeMicTrack.mediaStreamTrack.readyState !== "ended" &&
|
||||
device.selectedGroupId !==
|
||||
activeMicTrack.mediaStreamTrack.getSettings().groupId
|
||||
device !== undefined &&
|
||||
room.getActiveDevice(kind) !== device.id
|
||||
) {
|
||||
// It's different, so restart the track, ie. cause Livekit to do another
|
||||
// getUserMedia() call with deviceId: default to get the *new* default device.
|
||||
// Note that room.switchActiveDevice() won't work: Livekit will ignore it because
|
||||
// the deviceId hasn't changed (was & still is default).
|
||||
room.localParticipant
|
||||
.getTrackPublication(Track.Source.Microphone)
|
||||
?.audioTrack?.restartTrack()
|
||||
.catch((e) => {
|
||||
logger.error(`Failed to restart audio device track`, e);
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (id !== undefined && room.getActiveDevice(kind) !== id) {
|
||||
room
|
||||
.switchActiveDevice(kind, id)
|
||||
.switchActiveDevice(kind, device.id)
|
||||
.catch((e) =>
|
||||
logger.error(`Failed to sync ${kind} device with LiveKit`, e),
|
||||
);
|
||||
}
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
syncDevice("audioinput", devices.audioInput);
|
||||
syncDevice("audiooutput", devices.audioOutput);
|
||||
syncDevice("videoinput", devices.videoInput);
|
||||
const subscriptions = [
|
||||
syncDevice("audioinput", devices.audioInput.selected$),
|
||||
syncDevice("audiooutput", devices.audioOutput.selected$),
|
||||
syncDevice("videoinput", devices.videoInput.selected$),
|
||||
// Restart the audio input track whenever we detect that the active media
|
||||
// device has changed to refer to a different hardware device. We do this
|
||||
// for the sake of Chrome, which provides a "default" device that is meant
|
||||
// to match the system's default audio input, whatever that may be.
|
||||
// This is special-cased for only audio inputs because we need to dig around
|
||||
// in the LocalParticipant object for the track object and there's not a nice
|
||||
// way to do that generically. There is usually no OS-level default video capture
|
||||
// device anyway, and audio outputs work differently.
|
||||
devices.audioInput.selected$
|
||||
.pipe(switchMap((device) => device?.hardwareDeviceChange$ ?? NEVER))
|
||||
.subscribe(() => {
|
||||
const activeMicTrack = Array.from(
|
||||
room.localParticipant.audioTrackPublications.values(),
|
||||
).find((d) => d.source === Track.Source.Microphone)?.track;
|
||||
|
||||
if (
|
||||
activeMicTrack &&
|
||||
// only restart if the stream is still running: LiveKit will detect
|
||||
// when a track stops & restart appropriately, so this is not our job.
|
||||
// Plus, we need to avoid restarting again if the track is already in
|
||||
// the process of being restarted.
|
||||
activeMicTrack.mediaStreamTrack.readyState !== "ended"
|
||||
) {
|
||||
// Restart the track, which will cause Livekit to do another
|
||||
// getUserMedia() call with deviceId: default to get the *new* default device.
|
||||
// Note that room.switchActiveDevice() won't work: Livekit will ignore it because
|
||||
// the deviceId hasn't changed (was & still is default).
|
||||
room.localParticipant
|
||||
.getTrackPublication(Track.Source.Microphone)
|
||||
?.audioTrack?.restartTrack()
|
||||
.catch((e) => {
|
||||
logger.error(`Failed to restart audio device track`, e);
|
||||
});
|
||||
}
|
||||
}),
|
||||
];
|
||||
|
||||
return (): void => {
|
||||
for (const s of subscriptions) s.unsubscribe();
|
||||
};
|
||||
}
|
||||
}, [room, devices, connectionState, controlledAudioDevices]);
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ import {
|
||||
import { App } from "./App";
|
||||
import { init as initRageshake } from "./settings/rageshake";
|
||||
import { Initializer } from "./initializer";
|
||||
import { AppViewModel } from "./state/AppViewModel";
|
||||
|
||||
window.setLKLogLevel = setLKLogLevel;
|
||||
|
||||
@@ -60,7 +61,7 @@ Initializer.initBeforeReact()
|
||||
.then(() => {
|
||||
root.render(
|
||||
<StrictMode>
|
||||
<App />
|
||||
<App vm={new AppViewModel()} />
|
||||
</StrictMode>,
|
||||
);
|
||||
})
|
||||
|
||||
@@ -32,6 +32,7 @@ import {
|
||||
mockEmitter,
|
||||
mockMatrixRoom,
|
||||
mockMatrixRoomMember,
|
||||
mockMediaDevices,
|
||||
mockRtcMembership,
|
||||
MockRTCSession,
|
||||
} from "../utils/test";
|
||||
@@ -40,6 +41,7 @@ import { type WidgetHelpers } from "../widget";
|
||||
import { LazyEventEmitter } from "../LazyEventEmitter";
|
||||
import { MatrixRTCFocusMissingError } from "../utils/errors";
|
||||
import { ProcessorProvider } from "../livekit/TrackProcessorContext";
|
||||
import { MediaDevicesContext } from "../MediaDevicesContext";
|
||||
|
||||
vi.mock("../soundUtils");
|
||||
vi.mock("../useAudioContext");
|
||||
@@ -147,20 +149,22 @@ function createGroupCallView(
|
||||
const { getByText } = render(
|
||||
<BrowserRouter>
|
||||
<TooltipProvider>
|
||||
<ProcessorProvider>
|
||||
<GroupCallView
|
||||
client={client}
|
||||
isPasswordlessUser={false}
|
||||
confineToRoom={false}
|
||||
preload={false}
|
||||
skipLobby={false}
|
||||
hideHeader={true}
|
||||
rtcSession={rtcSession as unknown as MatrixRTCSession}
|
||||
isJoined={joined}
|
||||
muteStates={muteState}
|
||||
widget={widget}
|
||||
/>
|
||||
</ProcessorProvider>
|
||||
<MediaDevicesContext.Provider value={mockMediaDevices({})}>
|
||||
<ProcessorProvider>
|
||||
<GroupCallView
|
||||
client={client}
|
||||
isPasswordlessUser={false}
|
||||
confineToRoom={false}
|
||||
preload={false}
|
||||
skipLobby={false}
|
||||
hideHeader={true}
|
||||
rtcSession={rtcSession as unknown as MatrixRTCSession}
|
||||
isJoined={joined}
|
||||
muteStates={muteState}
|
||||
widget={widget}
|
||||
/>
|
||||
</ProcessorProvider>
|
||||
</MediaDevicesContext.Provider>
|
||||
</TooltipProvider>
|
||||
</BrowserRouter>,
|
||||
);
|
||||
|
||||
@@ -40,7 +40,7 @@ import { useProfile } from "../profile/useProfile";
|
||||
import { findDeviceByName } from "../utils/media";
|
||||
import { ActiveCall } from "./InCallView";
|
||||
import { MUTE_PARTICIPANT_COUNT, type MuteStates } from "./MuteStates";
|
||||
import { useMediaDevices } from "../livekit/MediaDevicesContext";
|
||||
import { useMediaDevices } from "../MediaDevicesContext";
|
||||
import { useMatrixRTCSessionMemberships } from "../useMatrixRTCSessionMemberships";
|
||||
import { enterRTCSession, leaveRTCSession } from "../rtcSessionHelpers";
|
||||
import {
|
||||
@@ -197,8 +197,7 @@ export const GroupCallView: FC<Props> = ({
|
||||
[memberships],
|
||||
);
|
||||
|
||||
const deviceContext = useMediaDevices();
|
||||
const latestDevices = useLatest(deviceContext);
|
||||
const mediaDevices = useMediaDevices();
|
||||
const latestMuteStates = useLatest(muteStates);
|
||||
|
||||
const enterRTCSessionOrError = useCallback(
|
||||
@@ -250,7 +249,7 @@ export const GroupCallView: FC<Props> = ({
|
||||
logger.debug(
|
||||
`Found audio input ID ${deviceId} for name ${audioInput}`,
|
||||
);
|
||||
latestDevices.current!.audioInput.select(deviceId);
|
||||
mediaDevices.audioInput.select(deviceId);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -264,7 +263,7 @@ export const GroupCallView: FC<Props> = ({
|
||||
logger.debug(
|
||||
`Found video input ID ${deviceId} for name ${videoInput}`,
|
||||
);
|
||||
latestDevices.current!.videoInput.select(deviceId);
|
||||
mediaDevices.videoInput.select(deviceId);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -306,7 +305,7 @@ export const GroupCallView: FC<Props> = ({
|
||||
preload,
|
||||
skipLobby,
|
||||
perParticipantE2EE,
|
||||
latestDevices,
|
||||
mediaDevices,
|
||||
latestMuteStates,
|
||||
enterRTCSessionOrError,
|
||||
useNewMembershipManager,
|
||||
|
||||
@@ -31,6 +31,7 @@ import {
|
||||
mockLocalParticipant,
|
||||
mockMatrixRoom,
|
||||
mockMatrixRoomMember,
|
||||
mockMediaDevices,
|
||||
mockRemoteParticipant,
|
||||
mockRtcMembership,
|
||||
type MockRTCSession,
|
||||
@@ -45,6 +46,7 @@ import {
|
||||
import { ReactionsSenderProvider } from "../reactions/useReactionsSender";
|
||||
import { useRoomEncryptionSystem } from "../e2ee/sharedKeyManagement";
|
||||
import { MatrixAudioRenderer } from "../livekit/MatrixAudioRenderer";
|
||||
import { MediaDevicesContext } from "../MediaDevicesContext";
|
||||
|
||||
// vi.hoisted(() => {
|
||||
// localStorage = {} as unknown as Storage;
|
||||
@@ -147,41 +149,43 @@ function createInCallView(): RenderResult & {
|
||||
rtcSession.joined = true;
|
||||
const renderResult = render(
|
||||
<BrowserRouter>
|
||||
<ReactionsSenderProvider
|
||||
vm={vm}
|
||||
rtcSession={rtcSession as unknown as MatrixRTCSession}
|
||||
>
|
||||
<TooltipProvider>
|
||||
<RoomContext.Provider value={livekitRoom}>
|
||||
<InCallView
|
||||
client={client}
|
||||
hideHeader={true}
|
||||
rtcSession={rtcSession as unknown as MatrixRTCSession}
|
||||
muteStates={muteState}
|
||||
vm={vm}
|
||||
matrixInfo={{
|
||||
userId: "",
|
||||
displayName: "",
|
||||
avatarUrl: "",
|
||||
roomId: "",
|
||||
roomName: "",
|
||||
roomAlias: null,
|
||||
roomAvatar: null,
|
||||
e2eeSystem: {
|
||||
kind: E2eeType.NONE,
|
||||
},
|
||||
}}
|
||||
livekitRoom={livekitRoom}
|
||||
participantCount={0}
|
||||
onLeave={function (): void {
|
||||
throw new Error("Function not implemented.");
|
||||
}}
|
||||
connState={ConnectionState.Connected}
|
||||
onShareClick={null}
|
||||
/>
|
||||
</RoomContext.Provider>
|
||||
</TooltipProvider>
|
||||
</ReactionsSenderProvider>
|
||||
<MediaDevicesContext.Provider value={mockMediaDevices({})}>
|
||||
<ReactionsSenderProvider
|
||||
vm={vm}
|
||||
rtcSession={rtcSession as unknown as MatrixRTCSession}
|
||||
>
|
||||
<TooltipProvider>
|
||||
<RoomContext.Provider value={livekitRoom}>
|
||||
<InCallView
|
||||
client={client}
|
||||
hideHeader={true}
|
||||
rtcSession={rtcSession as unknown as MatrixRTCSession}
|
||||
muteStates={muteState}
|
||||
vm={vm}
|
||||
matrixInfo={{
|
||||
userId: "",
|
||||
displayName: "",
|
||||
avatarUrl: "",
|
||||
roomId: "",
|
||||
roomName: "",
|
||||
roomAlias: null,
|
||||
roomAvatar: null,
|
||||
e2eeSystem: {
|
||||
kind: E2eeType.NONE,
|
||||
},
|
||||
}}
|
||||
livekitRoom={livekitRoom}
|
||||
participantCount={0}
|
||||
onLeave={function (): void {
|
||||
throw new Error("Function not implemented.");
|
||||
}}
|
||||
connState={ConnectionState.Connected}
|
||||
onShareClick={null}
|
||||
/>
|
||||
</RoomContext.Provider>
|
||||
</TooltipProvider>
|
||||
</ReactionsSenderProvider>
|
||||
</MediaDevicesContext.Provider>
|
||||
</BrowserRouter>,
|
||||
);
|
||||
return {
|
||||
|
||||
@@ -24,7 +24,7 @@ import {
|
||||
type LocalVideoTrack,
|
||||
Track,
|
||||
} from "livekit-client";
|
||||
import { useObservable } from "observable-hooks";
|
||||
import { useObservable, useObservableEagerState } from "observable-hooks";
|
||||
import { map } from "rxjs";
|
||||
import { useNavigate } from "react-router-dom";
|
||||
|
||||
@@ -45,7 +45,7 @@ import { SettingsModal, defaultSettingsTab } from "../settings/SettingsModal";
|
||||
import { useMediaQuery } from "../useMediaQuery";
|
||||
import { E2eeType } from "../e2ee/e2eeType";
|
||||
import { Link } from "../button/Link";
|
||||
import { useMediaDevices } from "../livekit/MediaDevicesContext";
|
||||
import { useMediaDevices } from "../MediaDevicesContext";
|
||||
import { useInitial } from "../useInitial";
|
||||
import { useSwitchCamera as useShowSwitchCamera } from "./useSwitchCamera";
|
||||
import {
|
||||
@@ -54,6 +54,7 @@ import {
|
||||
} from "../livekit/TrackProcessorContext";
|
||||
import { usePageTitle } from "../usePageTitle";
|
||||
import { useLatest } from "../useLatest";
|
||||
import { getValue } from "../utils/observable";
|
||||
|
||||
interface Props {
|
||||
client: MatrixClient;
|
||||
@@ -126,13 +127,18 @@ export const LobbyView: FC<Props> = ({
|
||||
);
|
||||
|
||||
const devices = useMediaDevices();
|
||||
const videoInputId = useObservableEagerState(
|
||||
devices.videoInput.selected$,
|
||||
)?.id;
|
||||
|
||||
// Capture the audio options as they were when we first mounted, because
|
||||
// we're not doing anything with the audio anyway so we don't need to
|
||||
// re-open the devices when they change (see below).
|
||||
const initialAudioOptions = useInitial(
|
||||
() =>
|
||||
muteStates.audio.enabled && { deviceId: devices.audioInput.selectedId },
|
||||
muteStates.audio.enabled && {
|
||||
deviceId: getValue(devices.audioInput.selected$)?.id,
|
||||
},
|
||||
);
|
||||
|
||||
const { processor } = useTrackProcessor();
|
||||
@@ -148,14 +154,14 @@ export const LobbyView: FC<Props> = ({
|
||||
// which would cause the devices to be re-opened on the next render.
|
||||
audio: Object.assign({}, initialAudioOptions),
|
||||
video: muteStates.video.enabled && {
|
||||
deviceId: devices.videoInput.selectedId,
|
||||
deviceId: videoInputId,
|
||||
processor: initialProcessor,
|
||||
},
|
||||
}),
|
||||
[
|
||||
initialAudioOptions,
|
||||
muteStates.video.enabled,
|
||||
devices.videoInput.selectedId,
|
||||
videoInputId,
|
||||
initialProcessor,
|
||||
],
|
||||
);
|
||||
|
||||
@@ -5,20 +5,29 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
|
||||
Please see LICENSE in the repository root for full details.
|
||||
*/
|
||||
|
||||
import { afterAll, afterEach, describe, expect, it, vi } from "vitest";
|
||||
import {
|
||||
afterAll,
|
||||
afterEach,
|
||||
describe,
|
||||
expect,
|
||||
it,
|
||||
onTestFinished,
|
||||
vi,
|
||||
} from "vitest";
|
||||
import { type FC, useCallback, useState } from "react";
|
||||
import { render, screen } from "@testing-library/react";
|
||||
import { MemoryRouter } from "react-router-dom";
|
||||
import userEvent from "@testing-library/user-event";
|
||||
import { createMediaDeviceObserver } from "@livekit/components-core";
|
||||
import { of } from "rxjs";
|
||||
|
||||
import { useMuteStates } from "./MuteStates";
|
||||
import {
|
||||
type DeviceLabel,
|
||||
type MediaDeviceHandle,
|
||||
type MediaDevices,
|
||||
MediaDevicesContext,
|
||||
} from "../livekit/MediaDevicesContext";
|
||||
import { MediaDevicesContext } from "../MediaDevicesContext";
|
||||
import { mockConfig } from "../utils/test";
|
||||
import { MediaDevices } from "../state/MediaDevices";
|
||||
import { ObservableScope } from "../state/ObservableScope";
|
||||
|
||||
vi.mock("@livekit/components-core");
|
||||
|
||||
interface TestComponentProps {
|
||||
isJoined?: boolean;
|
||||
@@ -73,16 +82,6 @@ const mockCamera: MediaDeviceInfo = {
|
||||
},
|
||||
};
|
||||
|
||||
function mockDevices(available: Map<string, DeviceLabel>): MediaDeviceHandle {
|
||||
return {
|
||||
available,
|
||||
selectedId: "",
|
||||
selectedGroupId: "",
|
||||
select: (): void => {},
|
||||
useAsEarpiece: false,
|
||||
};
|
||||
}
|
||||
|
||||
function mockMediaDevices(
|
||||
{
|
||||
microphone,
|
||||
@@ -94,21 +93,21 @@ function mockMediaDevices(
|
||||
camera?: boolean;
|
||||
} = { microphone: true, speaker: true, camera: true },
|
||||
): MediaDevices {
|
||||
return {
|
||||
audioInput: mockDevices(
|
||||
microphone
|
||||
? new Map([[mockMicrophone.deviceId, mockMicrophone]])
|
||||
: new Map(),
|
||||
),
|
||||
audioOutput: mockDevices(
|
||||
speaker ? new Map([[mockSpeaker.deviceId, mockSpeaker]]) : new Map(),
|
||||
),
|
||||
videoInput: mockDevices(
|
||||
camera ? new Map([[mockCamera.deviceId, mockCamera]]) : new Map(),
|
||||
),
|
||||
startUsingDeviceNames: (): void => {},
|
||||
stopUsingDeviceNames: (): void => {},
|
||||
};
|
||||
vi.mocked(createMediaDeviceObserver).mockImplementation((kind) => {
|
||||
switch (kind) {
|
||||
case "audioinput":
|
||||
return of(microphone ? [mockMicrophone] : []);
|
||||
case "audiooutput":
|
||||
return of(speaker ? [mockSpeaker] : []);
|
||||
case "videoinput":
|
||||
return of(camera ? [mockCamera] : []);
|
||||
case undefined:
|
||||
throw new Error("Unimplemented");
|
||||
}
|
||||
});
|
||||
const scope = new ObservableScope();
|
||||
onTestFinished(() => scope.end());
|
||||
return new MediaDevices(scope);
|
||||
}
|
||||
|
||||
describe("useMuteStates", () => {
|
||||
@@ -206,7 +205,12 @@ describe("useMuteStates", () => {
|
||||
const user = userEvent.setup();
|
||||
mockConfig();
|
||||
const noDevices = mockMediaDevices({ microphone: false, camera: false });
|
||||
// Warm up these Observables before making further changes to the
|
||||
// createMediaDevicesObserver mock
|
||||
noDevices.audioInput.available$.subscribe(() => {}).unsubscribe();
|
||||
noDevices.videoInput.available$.subscribe(() => {}).unsubscribe();
|
||||
const someDevices = mockMediaDevices();
|
||||
|
||||
const ReappearanceTest: FC = () => {
|
||||
const [devices, setDevices] = useState(someDevices);
|
||||
const onConnectDevicesClick = useCallback(
|
||||
|
||||
@@ -14,11 +14,14 @@ import {
|
||||
} from "react";
|
||||
import { type IWidgetApiRequest } from "matrix-widget-api";
|
||||
import { logger } from "matrix-js-sdk/lib/logger";
|
||||
import { useObservableEagerState } from "observable-hooks";
|
||||
|
||||
import {
|
||||
type MediaDeviceHandle,
|
||||
useMediaDevices,
|
||||
} from "../livekit/MediaDevicesContext";
|
||||
type DeviceLabel,
|
||||
type SelectedDevice,
|
||||
type MediaDevice,
|
||||
} from "../state/MediaDevices";
|
||||
import { useMediaDevices } from "../MediaDevicesContext";
|
||||
import { useReactiveState } from "../useReactiveState";
|
||||
import { ElementWidgetActions, widget } from "../widget";
|
||||
import { Config } from "../config/Config";
|
||||
@@ -53,24 +56,24 @@ export interface MuteStates {
|
||||
}
|
||||
|
||||
function useMuteState(
|
||||
device: MediaDeviceHandle,
|
||||
device: MediaDevice<DeviceLabel, SelectedDevice>,
|
||||
enabledByDefault: () => boolean,
|
||||
): MuteState {
|
||||
const available = useObservableEagerState(device.available$);
|
||||
const [enabled, setEnabled] = useReactiveState<boolean | undefined>(
|
||||
// Determine the default value once devices are actually connected
|
||||
(prev) =>
|
||||
prev ?? (device.available.size > 0 ? enabledByDefault() : undefined),
|
||||
[device.available.size],
|
||||
(prev) => prev ?? (available.size > 0 ? enabledByDefault() : undefined),
|
||||
[available.size],
|
||||
);
|
||||
return useMemo(
|
||||
() =>
|
||||
device.available.size === 0
|
||||
available.size === 0
|
||||
? deviceUnavailable
|
||||
: {
|
||||
enabled: enabled ?? false,
|
||||
setEnabled: setEnabled as Dispatch<SetStateAction<boolean>>,
|
||||
},
|
||||
[device.available.size, enabled, setEnabled],
|
||||
[available.size, enabled, setEnabled],
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ import {
|
||||
import { useObservable, useObservableEagerState } from "observable-hooks";
|
||||
import { logger } from "matrix-js-sdk/lib/logger";
|
||||
|
||||
import { useMediaDevices } from "../livekit/MediaDevicesContext";
|
||||
import { useMediaDevices } from "../MediaDevicesContext";
|
||||
import { platform } from "../Platform";
|
||||
import { useLatest } from "../useLatest";
|
||||
|
||||
|
||||
@@ -21,15 +21,18 @@ import {
|
||||
Separator,
|
||||
} from "@vector-im/compound-web";
|
||||
import { Trans, useTranslation } from "react-i18next";
|
||||
import { useObservableEagerState } from "observable-hooks";
|
||||
|
||||
import {
|
||||
EARPIECE_CONFIG_ID,
|
||||
type MediaDeviceHandle,
|
||||
} from "../livekit/MediaDevicesContext";
|
||||
type AudioOutputDeviceLabel,
|
||||
type DeviceLabel,
|
||||
type SelectedDevice,
|
||||
type MediaDevice,
|
||||
} from "../state/MediaDevices";
|
||||
import styles from "./DeviceSelection.module.css";
|
||||
|
||||
interface Props {
|
||||
device: MediaDeviceHandle;
|
||||
device: MediaDevice<DeviceLabel | AudioOutputDeviceLabel, SelectedDevice>;
|
||||
title: string;
|
||||
numberedLabel: (number: number) => string;
|
||||
}
|
||||
@@ -41,6 +44,8 @@ export const DeviceSelection: FC<Props> = ({
|
||||
}) => {
|
||||
const { t } = useTranslation();
|
||||
const groupId = useId();
|
||||
const available = useObservableEagerState(device.available$);
|
||||
const selectedId = useObservableEagerState(device.selected$)?.id;
|
||||
const onChange = useCallback(
|
||||
(e: ChangeEvent<HTMLInputElement>) => {
|
||||
device.select(e.target.value);
|
||||
@@ -49,7 +54,7 @@ export const DeviceSelection: FC<Props> = ({
|
||||
);
|
||||
|
||||
// There is no need to show the menu if there is no choice that can be made.
|
||||
if (device.available.size <= 1) return null;
|
||||
if (available.size <= 1) return null;
|
||||
|
||||
return (
|
||||
<div className={styles.selection}>
|
||||
@@ -64,7 +69,7 @@ export const DeviceSelection: FC<Props> = ({
|
||||
</Heading>
|
||||
<Separator className={styles.separator} />
|
||||
<div className={styles.options}>
|
||||
{[...device.available].map(([id, label]) => {
|
||||
{[...available].map(([id, label]) => {
|
||||
let labelText: ReactNode;
|
||||
switch (label.type) {
|
||||
case "name":
|
||||
@@ -94,20 +99,13 @@ export const DeviceSelection: FC<Props> = ({
|
||||
break;
|
||||
}
|
||||
|
||||
let isSelected = false;
|
||||
if (device.useAsEarpiece) {
|
||||
isSelected = id === EARPIECE_CONFIG_ID;
|
||||
} else {
|
||||
isSelected = id === device.selectedId;
|
||||
}
|
||||
|
||||
return (
|
||||
<InlineField
|
||||
key={id}
|
||||
name={groupId}
|
||||
control={
|
||||
<RadioControl
|
||||
checked={isSelected}
|
||||
checked={id === selectedId}
|
||||
onChange={onChange}
|
||||
value={id}
|
||||
/>
|
||||
|
||||
@@ -5,7 +5,7 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
|
||||
Please see LICENSE in the repository root for full details.
|
||||
*/
|
||||
|
||||
import { type FC, type ReactNode, useState } from "react";
|
||||
import { type FC, type ReactNode, useEffect, useState } from "react";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { type MatrixClient } from "matrix-js-sdk";
|
||||
import { Button, Root as Form, Separator } from "@vector-im/compound-web";
|
||||
@@ -17,11 +17,8 @@ import styles from "./SettingsModal.module.css";
|
||||
import { type Tab, TabContainer } from "../tabs/Tabs";
|
||||
import { ProfileSettingsTab } from "./ProfileSettingsTab";
|
||||
import { FeedbackSettingsTab } from "./FeedbackSettingsTab";
|
||||
import {
|
||||
useMediaDevices,
|
||||
useMediaDeviceNames,
|
||||
iosDeviceMenu$,
|
||||
} from "../livekit/MediaDevicesContext";
|
||||
import { iosDeviceMenu$ } from "../state/MediaDevices";
|
||||
import { useMediaDevices } from "../MediaDevicesContext";
|
||||
import { widget } from "../widget";
|
||||
import {
|
||||
useSetting,
|
||||
@@ -98,7 +95,10 @@ export const SettingsModal: FC<Props> = ({
|
||||
};
|
||||
|
||||
const devices = useMediaDevices();
|
||||
useMediaDeviceNames(devices, open);
|
||||
useEffect(() => {
|
||||
if (open) devices.requestDeviceNames();
|
||||
}, [open, devices]);
|
||||
|
||||
const [soundVolume, setSoundVolume] = useSetting(soundEffectVolumeSetting);
|
||||
const [soundVolumeRaw, setSoundVolumeRaw] = useState(soundVolume);
|
||||
const [showDeveloperSettingsTab] = useSetting(developerMode);
|
||||
|
||||
19
src/state/AppViewModel.ts
Normal file
19
src/state/AppViewModel.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2025 New Vector Ltd.
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
|
||||
Please see LICENSE in the repository root for full details.
|
||||
*/
|
||||
|
||||
import { MediaDevices } from "./MediaDevices";
|
||||
import { ViewModel } from "./ViewModel";
|
||||
|
||||
/**
|
||||
* The top-level state holder for the application.
|
||||
*/
|
||||
export class AppViewModel extends ViewModel {
|
||||
public readonly mediaDevices = new MediaDevices(this.scope);
|
||||
|
||||
// TODO: Move more application logic here. The CallViewModel, at the very
|
||||
// least, ought to be accessible from this object.
|
||||
}
|
||||
366
src/state/MediaDevices.ts
Normal file
366
src/state/MediaDevices.ts
Normal file
@@ -0,0 +1,366 @@
|
||||
/*
|
||||
Copyright 2025 New Vector Ltd.
|
||||
|
||||
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
|
||||
Please see LICENSE in the repository root for full details.
|
||||
*/
|
||||
|
||||
import {
|
||||
combineLatest,
|
||||
filter,
|
||||
map,
|
||||
merge,
|
||||
of,
|
||||
pairwise,
|
||||
startWith,
|
||||
Subject,
|
||||
switchMap,
|
||||
type Observable,
|
||||
} from "rxjs";
|
||||
import { createMediaDeviceObserver } from "@livekit/components-core";
|
||||
import { logger } from "matrix-js-sdk/lib/logger";
|
||||
|
||||
import {
|
||||
audioInput as audioInputSetting,
|
||||
audioOutput as audioOutputSetting,
|
||||
videoInput as videoInputSetting,
|
||||
alwaysShowIphoneEarpiece as alwaysShowIphoneEarpieceSetting,
|
||||
} from "../settings/settings";
|
||||
import { type ObservableScope } from "./ObservableScope";
|
||||
import {
|
||||
outputDevice$ as controlledOutputSelection$,
|
||||
availableOutputDevices$ as controlledAvailableOutputDevices$,
|
||||
} from "../controls";
|
||||
import { getUrlParams } from "../UrlParams";
|
||||
|
||||
// This hardcoded id is used in EX ios! It can only be changed in coordination with
|
||||
// the ios swift team.
|
||||
const EARPIECE_CONFIG_ID = "earpiece-id";
|
||||
|
||||
export type DeviceLabel =
|
||||
| { type: "name"; name: string }
|
||||
| { type: "number"; number: number }
|
||||
| { type: "default"; name: string | null };
|
||||
|
||||
export type AudioOutputDeviceLabel = DeviceLabel | { type: "earpiece" };
|
||||
|
||||
export interface SelectedDevice {
|
||||
id: string;
|
||||
}
|
||||
|
||||
export interface SelectedAudioInputDevice extends SelectedDevice {
|
||||
/**
|
||||
* Emits whenever we think that this audio input device has logically changed
|
||||
* to refer to a different hardware device.
|
||||
*/
|
||||
hardwareDeviceChange$: Observable<void>;
|
||||
}
|
||||
|
||||
export interface SelectedAudioOutputDevice extends SelectedDevice {
|
||||
/**
|
||||
* Whether this device is a "virtual earpiece" device. If so, we should output
|
||||
* on a single channel of the device at a reduced volume.
|
||||
*/
|
||||
virtualEarpiece: boolean;
|
||||
}
|
||||
|
||||
export interface MediaDevice<Label, Selected> {
|
||||
/**
|
||||
* A map from available device IDs to labels.
|
||||
*/
|
||||
available$: Observable<Map<string, Label>>;
|
||||
/**
|
||||
* The selected device.
|
||||
*/
|
||||
selected$: Observable<Selected | undefined>;
|
||||
/**
|
||||
* Selects a new device.
|
||||
*/
|
||||
select(id: string): void;
|
||||
}
|
||||
|
||||
/**
|
||||
* An observable that represents if we should display the devices menu for iOS.
|
||||
* This implies the following
|
||||
* - hide any input devices (they do not work anyhow on ios)
|
||||
* - Show a button to show the native output picker instead.
|
||||
* - Only show the earpiece toggle option if the earpiece is available:
|
||||
* `availableOutputDevices$.includes((d)=>d.forEarpiece)`
|
||||
*/
|
||||
export const iosDeviceMenu$ = navigator.userAgent.includes("iPhone")
|
||||
? of(true)
|
||||
: alwaysShowIphoneEarpieceSetting.value$;
|
||||
|
||||
function availableRawDevices$(
|
||||
kind: MediaDeviceKind,
|
||||
usingNames$: Observable<boolean>,
|
||||
scope: ObservableScope,
|
||||
): Observable<MediaDeviceInfo[]> {
|
||||
return usingNames$.pipe(
|
||||
switchMap((usingNames) =>
|
||||
createMediaDeviceObserver(
|
||||
kind,
|
||||
(e) => logger.error("Error creating MediaDeviceObserver", e),
|
||||
usingNames,
|
||||
),
|
||||
),
|
||||
startWith([]),
|
||||
scope.state(),
|
||||
);
|
||||
}
|
||||
|
||||
function buildDeviceMap(
|
||||
availableRaw: MediaDeviceInfo[],
|
||||
): Map<string, DeviceLabel> {
|
||||
return new Map<string, DeviceLabel>(
|
||||
availableRaw.map((d, i) => [
|
||||
d.deviceId,
|
||||
d.label
|
||||
? { type: "name", name: d.label }
|
||||
: { type: "number", number: i + 1 },
|
||||
]),
|
||||
);
|
||||
}
|
||||
|
||||
function selectDevice$<Label>(
|
||||
available$: Observable<Map<string, Label>>,
|
||||
preferredId$: Observable<string | undefined>,
|
||||
): Observable<string | undefined> {
|
||||
return combineLatest([available$, preferredId$], (available, preferredId) => {
|
||||
if (available.size) {
|
||||
// If the preferred device is available, use it. Or if every available
|
||||
// device ID is falsy, the browser is probably just being paranoid about
|
||||
// fingerprinting and we should still try using the preferred device.
|
||||
// Worst case it is not available and the browser will gracefully fall
|
||||
// back to some other device for us when requesting the media stream.
|
||||
// Otherwise, select the first available device.
|
||||
return (preferredId !== undefined && available.has(preferredId)) ||
|
||||
(available.size === 1 && available.has(""))
|
||||
? preferredId
|
||||
: available.keys().next().value;
|
||||
}
|
||||
return undefined;
|
||||
});
|
||||
}
|
||||
|
||||
class AudioInput implements MediaDevice<DeviceLabel, SelectedAudioInputDevice> {
|
||||
private readonly availableRaw$: Observable<MediaDeviceInfo[]> =
|
||||
availableRawDevices$("audioinput", this.usingNames$, this.scope);
|
||||
|
||||
public readonly available$ = this.availableRaw$.pipe(
|
||||
map(buildDeviceMap),
|
||||
this.scope.state(),
|
||||
);
|
||||
|
||||
public readonly selected$ = selectDevice$(
|
||||
this.available$,
|
||||
audioInputSetting.value$,
|
||||
).pipe(
|
||||
map((id) =>
|
||||
id === undefined
|
||||
? undefined
|
||||
: {
|
||||
id,
|
||||
// We can identify when the hardware device has changed by watching for
|
||||
// changes in the group ID
|
||||
hardwareDeviceChange$: this.availableRaw$.pipe(
|
||||
map((devices) => devices.find((d) => d.deviceId === id)?.groupId),
|
||||
pairwise(),
|
||||
filter(([before, after]) => before !== after),
|
||||
map(() => undefined),
|
||||
),
|
||||
},
|
||||
),
|
||||
this.scope.state(),
|
||||
);
|
||||
|
||||
public select(id: string): void {
|
||||
audioInputSetting.setValue(id);
|
||||
}
|
||||
|
||||
public constructor(
|
||||
private readonly usingNames$: Observable<boolean>,
|
||||
private readonly scope: ObservableScope,
|
||||
) {}
|
||||
}
|
||||
|
||||
class AudioOutput
|
||||
implements MediaDevice<AudioOutputDeviceLabel, SelectedAudioOutputDevice>
|
||||
{
|
||||
public readonly available$ = availableRawDevices$(
|
||||
"audiooutput",
|
||||
this.usingNames$,
|
||||
this.scope,
|
||||
).pipe(
|
||||
map((availableRaw) => {
|
||||
const available = buildDeviceMap(availableRaw);
|
||||
// Create a virtual default audio output for browsers that don't have one.
|
||||
// Its device ID must be the empty string because that's what setSinkId
|
||||
// recognizes.
|
||||
if (available.size && !available.has("") && !available.has("default"))
|
||||
available.set("", {
|
||||
type: "default",
|
||||
name: availableRaw[0]?.label || null,
|
||||
});
|
||||
// Note: creating virtual default input devices would be another problem
|
||||
// entirely, because requesting a media stream from deviceId "" won't
|
||||
// automatically track the default device.
|
||||
return available;
|
||||
}),
|
||||
this.scope.state(),
|
||||
);
|
||||
|
||||
public readonly selected$ = selectDevice$(
|
||||
this.available$,
|
||||
audioOutputSetting.value$,
|
||||
).pipe(
|
||||
map((id) =>
|
||||
id === undefined
|
||||
? undefined
|
||||
: {
|
||||
id,
|
||||
virtualEarpiece: false,
|
||||
},
|
||||
),
|
||||
this.scope.state(),
|
||||
);
|
||||
|
||||
public select(id: string): void {
|
||||
audioOutputSetting.setValue(id);
|
||||
}
|
||||
|
||||
public constructor(
|
||||
private readonly usingNames$: Observable<boolean>,
|
||||
private readonly scope: ObservableScope,
|
||||
) {}
|
||||
}
|
||||
|
||||
class ControlledAudioOutput
|
||||
implements MediaDevice<AudioOutputDeviceLabel, SelectedAudioOutputDevice>
|
||||
{
|
||||
public readonly available$ = combineLatest(
|
||||
[controlledAvailableOutputDevices$.pipe(startWith([])), iosDeviceMenu$],
|
||||
(availableRaw, iosDeviceMenu) => {
|
||||
const available = new Map<string, AudioOutputDeviceLabel>(
|
||||
availableRaw.map(
|
||||
({ id, name, isEarpiece, isSpeaker /*,isExternalHeadset*/ }) => {
|
||||
let deviceLabel: AudioOutputDeviceLabel;
|
||||
// if (isExternalHeadset) // Do we want this?
|
||||
if (isEarpiece) deviceLabel = { type: "earpiece" };
|
||||
else if (isSpeaker) deviceLabel = { type: "default", name };
|
||||
else deviceLabel = { type: "name", name };
|
||||
return [id, deviceLabel];
|
||||
},
|
||||
),
|
||||
);
|
||||
|
||||
// Create a virtual earpiece device in case a non-earpiece device is
|
||||
// designated for this purpose
|
||||
if (iosDeviceMenu && availableRaw.some((d) => d.forEarpiece))
|
||||
available.set(EARPIECE_CONFIG_ID, { type: "earpiece" });
|
||||
|
||||
return available;
|
||||
},
|
||||
).pipe(this.scope.state());
|
||||
|
||||
private readonly deviceSelection$ = new Subject<string>();
|
||||
|
||||
public select(id: string): void {
|
||||
this.deviceSelection$.next(id);
|
||||
}
|
||||
|
||||
public readonly selected$ = merge(
|
||||
this.deviceSelection$,
|
||||
controlledOutputSelection$,
|
||||
).pipe(
|
||||
startWith<string | undefined>(undefined),
|
||||
map((id) =>
|
||||
id === undefined
|
||||
? undefined
|
||||
: { id, virtualEarpiece: id === EARPIECE_CONFIG_ID },
|
||||
),
|
||||
this.scope.state(),
|
||||
);
|
||||
|
||||
public constructor(private readonly scope: ObservableScope) {
|
||||
this.selected$.subscribe((device) => {
|
||||
// Let the hosting application know which output device has been selected.
|
||||
// This information is probably only of interest if the earpiece mode has
|
||||
// been selected - for example, Element X iOS listens to this to determine
|
||||
// whether it should enable the proximity sensor.
|
||||
if (device !== undefined) {
|
||||
logger.info("[controlled-output] setAudioDeviceSelect called:", device);
|
||||
window.controls.onAudioDeviceSelect?.(device.id);
|
||||
// Also invoke the deprecated callback for backward compatibility
|
||||
window.controls.onOutputDeviceSelect?.(device.id);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
class VideoInput implements MediaDevice<DeviceLabel, SelectedDevice> {
|
||||
public readonly available$ = availableRawDevices$(
|
||||
"videoinput",
|
||||
this.usingNames$,
|
||||
this.scope,
|
||||
).pipe(map(buildDeviceMap));
|
||||
|
||||
public readonly selected$ = selectDevice$(
|
||||
this.available$,
|
||||
videoInputSetting.value$,
|
||||
).pipe(
|
||||
map((id) => (id === undefined ? undefined : { id })),
|
||||
this.scope.state(),
|
||||
);
|
||||
|
||||
public select(id: string): void {
|
||||
videoInputSetting.setValue(id);
|
||||
}
|
||||
|
||||
public constructor(
|
||||
private readonly usingNames$: Observable<boolean>,
|
||||
private readonly scope: ObservableScope,
|
||||
) {}
|
||||
}
|
||||
|
||||
export class MediaDevices {
|
||||
private readonly deviceNamesRequest$ = new Subject<void>();
|
||||
/**
|
||||
* Requests that the media devices be populated with the names of each
|
||||
* available device, rather than numbered identifiers. This may invoke a
|
||||
* permissions pop-up, so it should only be called when there is a clear user
|
||||
* intent to view the device list.
|
||||
*/
|
||||
public requestDeviceNames(): void {
|
||||
this.deviceNamesRequest$.next();
|
||||
}
|
||||
|
||||
// Start using device names as soon as requested. This will cause LiveKit to
|
||||
// briefly request device permissions and acquire media streams for each
|
||||
// device type while calling `enumerateDevices`, which is what browsers want
|
||||
// you to do to receive device names in lieu of a more explicit permissions
|
||||
// API. This flag never resets to false, because once permissions are granted
|
||||
// the first time, the user won't be prompted again until reload of the page.
|
||||
private readonly usingNames$ = this.deviceNamesRequest$.pipe(
|
||||
map(() => true),
|
||||
startWith(false),
|
||||
this.scope.state(),
|
||||
);
|
||||
|
||||
public readonly audioInput: MediaDevice<
|
||||
DeviceLabel,
|
||||
SelectedAudioInputDevice
|
||||
> = new AudioInput(this.usingNames$, this.scope);
|
||||
|
||||
public readonly audioOutput: MediaDevice<
|
||||
AudioOutputDeviceLabel,
|
||||
SelectedAudioOutputDevice
|
||||
> = getUrlParams().controlledAudioDevices
|
||||
? new ControlledAudioOutput(this.scope)
|
||||
: new AudioOutput(this.usingNames$, this.scope);
|
||||
|
||||
public readonly videoInput: MediaDevice<DeviceLabel, SelectedDevice> =
|
||||
new VideoInput(this.usingNames$, this.scope);
|
||||
|
||||
public constructor(private readonly scope: ObservableScope) {}
|
||||
}
|
||||
@@ -430,8 +430,6 @@ abstract class BaseUserMediaViewModel extends BaseMediaViewModel {
|
||||
}
|
||||
|
||||
/**
|
||||
},
|
||||
},
|
||||
* The local participant's user media.
|
||||
*/
|
||||
export class LocalUserMediaViewModel extends BaseUserMediaViewModel {
|
||||
|
||||
@@ -10,10 +10,12 @@ import { type FC } from "react";
|
||||
import { render } from "@testing-library/react";
|
||||
import userEvent, { type UserEvent } from "@testing-library/user-event";
|
||||
import { BrowserRouter } from "react-router-dom";
|
||||
import { of } from "rxjs";
|
||||
|
||||
import { deviceStub, MediaDevicesContext } from "./livekit/MediaDevicesContext";
|
||||
import { MediaDevicesContext } from "./MediaDevicesContext";
|
||||
import { useAudioContext } from "./useAudioContext";
|
||||
import { soundEffectVolume as soundEffectVolumeSetting } from "./settings/settings";
|
||||
import { mockMediaDevices } from "./utils/test";
|
||||
|
||||
const staticSounds = Promise.resolve({
|
||||
aSound: new ArrayBuffer(0),
|
||||
@@ -102,13 +104,21 @@ afterEach(() => {
|
||||
});
|
||||
|
||||
test("can play a single sound", async () => {
|
||||
const { findByText } = render(<TestComponentWrapper />);
|
||||
const { findByText } = render(
|
||||
<MediaDevicesContext.Provider value={mockMediaDevices({})}>
|
||||
<TestComponentWrapper />
|
||||
</MediaDevicesContext.Provider>,
|
||||
);
|
||||
await user.click(await findByText("Valid sound"));
|
||||
expect(testAudioContext.createBufferSource).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
test("will ignore sounds that are not registered", async () => {
|
||||
const { findByText } = render(<TestComponentWrapper />);
|
||||
const { findByText } = render(
|
||||
<MediaDevicesContext.Provider value={mockMediaDevices({})}>
|
||||
<TestComponentWrapper />
|
||||
</MediaDevicesContext.Provider>,
|
||||
);
|
||||
await user.click(await findByText("Invalid sound"));
|
||||
expect(testAudioContext.createBufferSource).not.toHaveBeenCalled();
|
||||
});
|
||||
@@ -116,19 +126,13 @@ test("will ignore sounds that are not registered", async () => {
|
||||
test("will use the correct device", () => {
|
||||
render(
|
||||
<MediaDevicesContext.Provider
|
||||
value={{
|
||||
audioInput: deviceStub,
|
||||
value={mockMediaDevices({
|
||||
audioOutput: {
|
||||
selectedId: "chosen-device",
|
||||
selectedGroupId: "",
|
||||
available: new Map(),
|
||||
available$: of(new Map<never, never>()),
|
||||
selected$: of({ id: "chosen-device", virtualEarpiece: false }),
|
||||
select: () => {},
|
||||
useAsEarpiece: false,
|
||||
},
|
||||
videoInput: deviceStub,
|
||||
startUsingDeviceNames: () => {},
|
||||
stopUsingDeviceNames: () => {},
|
||||
}}
|
||||
})}
|
||||
>
|
||||
<TestComponentWrapper />
|
||||
</MediaDevicesContext.Provider>,
|
||||
@@ -139,7 +143,11 @@ test("will use the correct device", () => {
|
||||
|
||||
test("will use the correct volume level", async () => {
|
||||
soundEffectVolumeSetting.setValue(0.33);
|
||||
const { findByText } = render(<TestComponentWrapper />);
|
||||
const { findByText } = render(
|
||||
<MediaDevicesContext.Provider value={mockMediaDevices({})}>
|
||||
<TestComponentWrapper />
|
||||
</MediaDevicesContext.Provider>,
|
||||
);
|
||||
await user.click(await findByText("Valid sound"));
|
||||
expect(testAudioContext.gain.gain.setValueAtTime).toHaveBeenCalledWith(
|
||||
0.33,
|
||||
@@ -151,19 +159,13 @@ test("will use the correct volume level", async () => {
|
||||
test("will use the pan if earpiece is selected", async () => {
|
||||
const { findByText } = render(
|
||||
<MediaDevicesContext.Provider
|
||||
value={{
|
||||
audioInput: deviceStub,
|
||||
value={mockMediaDevices({
|
||||
audioOutput: {
|
||||
selectedId: "chosen-device",
|
||||
selectedGroupId: "",
|
||||
available: new Map(),
|
||||
available$: of(new Map<never, never>()),
|
||||
selected$: of({ id: "chosen-device", virtualEarpiece: true }),
|
||||
select: () => {},
|
||||
useAsEarpiece: true,
|
||||
},
|
||||
videoInput: deviceStub,
|
||||
startUsingDeviceNames: () => {},
|
||||
stopUsingDeviceNames: () => {},
|
||||
}}
|
||||
})}
|
||||
>
|
||||
<TestComponentWrapper />
|
||||
</MediaDevicesContext.Provider>,
|
||||
|
||||
@@ -7,15 +7,13 @@ Please see LICENSE in the repository root for full details.
|
||||
|
||||
import { logger } from "matrix-js-sdk/lib/logger";
|
||||
import { useState, useEffect } from "react";
|
||||
import { useObservableEagerState } from "observable-hooks";
|
||||
|
||||
import {
|
||||
soundEffectVolume as soundEffectVolumeSetting,
|
||||
useSetting,
|
||||
} from "./settings/settings";
|
||||
import {
|
||||
useEarpieceAudioConfig,
|
||||
useMediaDevices,
|
||||
} from "./livekit/MediaDevicesContext";
|
||||
import { useEarpieceAudioConfig, useMediaDevices } from "./MediaDevicesContext";
|
||||
import { type PrefetchedSounds } from "./soundUtils";
|
||||
import { useUrlParams } from "./UrlParams";
|
||||
import * as controls from "./controls";
|
||||
@@ -73,8 +71,6 @@ export function useAudioContext<S extends string>(
|
||||
props: Props<S>,
|
||||
): UseAudioContext<S> | null {
|
||||
const [soundEffectVolume] = useSetting(soundEffectVolumeSetting);
|
||||
const { audioOutput } = useMediaDevices();
|
||||
const { controlledAudioDevices } = useUrlParams();
|
||||
const [audioContext, setAudioContext] = useState<AudioContext>();
|
||||
const [audioBuffers, setAudioBuffers] = useState<Record<S, AudioBuffer>>();
|
||||
|
||||
@@ -111,6 +107,11 @@ export function useAudioContext<S extends string>(
|
||||
};
|
||||
}, [props.sounds, props.latencyHint]);
|
||||
|
||||
const audioOutputId = useObservableEagerState(
|
||||
useMediaDevices().audioOutput.selected$,
|
||||
)?.id;
|
||||
const { controlledAudioDevices } = useUrlParams();
|
||||
|
||||
// Update the sink ID whenever we change devices.
|
||||
useEffect(() => {
|
||||
if (
|
||||
@@ -120,11 +121,11 @@ export function useAudioContext<S extends string>(
|
||||
) {
|
||||
// https://developer.mozilla.org/en-US/docs/Web/API/AudioContext/setSinkId
|
||||
// @ts-expect-error - setSinkId doesn't exist yet in types, maybe because it's not supported everywhere.
|
||||
audioContext.setSinkId(audioOutput.selectedId).catch((ex) => {
|
||||
audioContext.setSinkId(audioOutputId).catch((ex) => {
|
||||
logger.warn("Unable to change sink for audio context", ex);
|
||||
});
|
||||
}
|
||||
}, [audioContext, audioOutput.selectedId, controlledAudioDevices]);
|
||||
}, [audioContext, audioOutputId, controlledAudioDevices]);
|
||||
const { pan: earpiecePan, volume: earpieceVolume } = useEarpieceAudioConfig();
|
||||
|
||||
// Don't return a function until we're ready.
|
||||
|
||||
@@ -38,3 +38,18 @@ export function accumulate<State, Event>(
|
||||
return (events$: Observable<Event>): Observable<State> =>
|
||||
events$.pipe(scan(update, initial), startWith(initial));
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads the current value of a state Observable without reacting to future
|
||||
* changes.
|
||||
*
|
||||
* This function exists to help with certain cases of bridging Observables into
|
||||
* React, where an initial value is needed. You should never use it to create an
|
||||
* Observable derived from another Observable; use reactive operators instead.
|
||||
*/
|
||||
export function getValue<T>(state$: Observable<T>): T {
|
||||
let value: T | typeof nothing = nothing;
|
||||
state$.subscribe((x) => (value = x)).unsubscribe();
|
||||
if (value === nothing) throw new Error("Not a state Observable");
|
||||
return value;
|
||||
}
|
||||
|
||||
@@ -46,6 +46,7 @@ import {
|
||||
type ResolvedConfigOptions,
|
||||
} from "../config/ConfigOptions";
|
||||
import { Config } from "../config/Config";
|
||||
import { type MediaDevices } from "../state/MediaDevices";
|
||||
|
||||
export function withFakeTimers(continuation: () => void): void {
|
||||
vi.useFakeTimers();
|
||||
@@ -332,3 +333,18 @@ export const mockTrack = (identity: string): TrackReference =>
|
||||
track: {},
|
||||
source: {},
|
||||
}) as unknown as TrackReference;
|
||||
|
||||
export const deviceStub = {
|
||||
available$: of(new Map<never, never>()),
|
||||
selected$: of(undefined),
|
||||
select(): void {},
|
||||
};
|
||||
|
||||
export function mockMediaDevices(data: Partial<MediaDevices>): MediaDevices {
|
||||
return {
|
||||
audioInput: deviceStub,
|
||||
audioOutput: deviceStub,
|
||||
videoInput: deviceStub,
|
||||
...data,
|
||||
} as MediaDevices;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user