From 5e2e94d79409cfecc5ca9cc6bf91885db12dd5f6 Mon Sep 17 00:00:00 2001 From: Robin Date: Fri, 20 Jun 2025 12:37:25 -0400 Subject: [PATCH] Refactor media devices to live outside React as Observables (#3334) * Refactor media devices to live outside React as Observables This moves the media devices state out of React to further our transition to a MVVM architecture in which we can more easily model and store complex application state. I have created an AppViewModel to act as the overarching state holder for any future non-React state we end up creating, and the MediaDevices reside within this. We should move more application logic (including the CallViewModel itself) there in the future. * Address review feedback * Fixes from ios debugging session: (#3342) - dont use preferred vs selected concept in controlled media. Its not needed since we dont use the id for actual browser media devices (the id's are not even actual browser media devices) - add more logging - add more conditions to not accidently set a deviceId that is not a browser deviceId but one provided via controlled. --------- Co-authored-by: Timo <16718859+toger5@users.noreply.github.com> --- src/App.tsx | 13 +- src/MediaDevicesContext.ts | 52 +++ src/controls.ts | 19 +- src/livekit/MatrixAudioRenderer.test.tsx | 34 +- src/livekit/MatrixAudioRenderer.tsx | 2 +- src/livekit/MediaDevicesContext.tsx | 445 ----------------------- src/livekit/useLivekit.ts | 128 ++++--- src/main.tsx | 3 +- src/room/GroupCallView.test.tsx | 32 +- src/room/GroupCallView.tsx | 11 +- src/room/InCallView.test.tsx | 74 ++-- src/room/LobbyView.tsx | 16 +- src/room/MuteStates.test.tsx | 68 ++-- src/room/MuteStates.ts | 21 +- src/room/useSwitchCamera.ts | 2 +- src/settings/DeviceSelection.tsx | 26 +- src/settings/SettingsModal.tsx | 14 +- src/state/AppViewModel.ts | 19 + src/state/MediaDevices.ts | 366 +++++++++++++++++++ src/state/MediaViewModel.ts | 2 - src/useAudioContext.test.tsx | 50 +-- src/useAudioContext.tsx | 17 +- src/utils/observable.ts | 15 + src/utils/test.ts | 16 + 24 files changed, 763 insertions(+), 682 deletions(-) create mode 100644 src/MediaDevicesContext.ts delete mode 100644 src/livekit/MediaDevicesContext.tsx create mode 100644 src/state/AppViewModel.ts create mode 100644 src/state/MediaDevices.ts diff --git a/src/App.tsx b/src/App.tsx index 5dc8d29c..cf403299 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -19,10 +19,11 @@ import { ClientProvider } from "./ClientContext"; import { ErrorPage, LoadingPage } from "./FullScreenView"; import { DisconnectedBanner } from "./DisconnectedBanner"; import { Initializer } from "./initializer"; -import { MediaDevicesProvider } from "./livekit/MediaDevicesContext"; import { widget } from "./widget"; import { useTheme } from "./useTheme"; import { ProcessorProvider } from "./livekit/TrackProcessorContext"; +import { type AppViewModel } from "./state/AppViewModel"; +import { MediaDevicesContext } from "./MediaDevicesContext"; const SentryRoute = Sentry.withSentryReactRouterV7Routing(Route); @@ -50,7 +51,11 @@ const ThemeProvider: FC = ({ children }) => { return children; }; -export const App: FC = () => { +interface Props { + vm: AppViewModel; +} + +export const App: FC = ({ vm }) => { const [loaded, setLoaded] = useState(false); useEffect(() => { Initializer.init() @@ -72,7 +77,7 @@ export const App: FC = () => { {loaded ? ( - + ( @@ -91,7 +96,7 @@ export const App: FC = () => { - + ) : ( diff --git a/src/MediaDevicesContext.ts b/src/MediaDevicesContext.ts new file mode 100644 index 00000000..404815ba --- /dev/null +++ b/src/MediaDevicesContext.ts @@ -0,0 +1,52 @@ +/* +Copyright 2025 New Vector Ltd. + +SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial +Please see LICENSE in the repository root for full details. +*/ + +import { createContext, useContext, useMemo } from "react"; +import { useObservableEagerState } from "observable-hooks"; + +import { type MediaDevices } from "./state/MediaDevices"; + +export const MediaDevicesContext = createContext( + undefined, +); + +export function useMediaDevices(): MediaDevices { + const mediaDevices = useContext(MediaDevicesContext); + if (mediaDevices === undefined) + throw new Error( + "useMediaDevices must be used within a MediaDevices context provider", + ); + return mediaDevices; +} + +/** + * A convenience hook to get the audio node configuration for the earpiece. + * It will check the `useAsEarpiece` of the `audioOutput` device and return + * the appropriate pan and volume values. + * + * @returns pan and volume values for the earpiece audio node configuration. + */ +export const useEarpieceAudioConfig = (): { + pan: number; + volume: number; +} => { + const devices = useMediaDevices(); + const audioOutput = useObservableEagerState(devices.audioOutput.selected$); + // We use only the right speaker (pan = 1) for the earpiece. + // This mimics the behavior of the native earpiece speaker (only the top speaker on an iPhone) + const pan = useMemo( + () => (audioOutput?.virtualEarpiece ? 1 : 0), + [audioOutput?.virtualEarpiece], + ); + // We also do lower the volume by a factor of 10 to optimize for the usecase where + // a user is holding the phone to their ear. + const volume = useMemo( + () => (audioOutput?.virtualEarpiece ? 0.1 : 1), + [audioOutput?.virtualEarpiece], + ); + return { pan, volume }; +}; diff --git a/src/controls.ts b/src/controls.ts index b5209ab0..ba816348 100644 --- a/src/controls.ts +++ b/src/controls.ts @@ -5,7 +5,10 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial Please see LICENSE in the repository root for full details. */ -import { BehaviorSubject, Subject } from "rxjs"; +import { Subject } from "rxjs"; +import { logger as rootLogger } from "matrix-js-sdk/lib/logger"; + +const logger = rootLogger.getChild("[controlled-output]"); export interface Controls { canEnterPip(): boolean; @@ -42,12 +45,11 @@ export interface OutputDevice { * If pipMode is enabled, EC will render a adapted call view layout. */ export const setPipEnabled$ = new Subject(); -// BehaviorSubject since the client might set this before we have subscribed (GroupCallView still in "loading" state) -// We want the devices that have been set during loading to be available immediately once loaded. -export const availableOutputDevices$ = new BehaviorSubject([]); -// BehaviorSubject since the client might set this before we have subscribed (GroupCallView still in "loading" state) -// We want the device that has been set during loading to be available immediately once loaded. -export const outputDevice$ = new BehaviorSubject(undefined); + +export const availableOutputDevices$ = new Subject(); + +export const outputDevice$ = new Subject(); + /** * This allows the os to mute the call if the user * presses the volume down button when it is at the minimum volume. @@ -75,12 +77,15 @@ window.controls = { setPipEnabled$.next(false); }, setAvailableAudioDevices(devices: OutputDevice[]): void { + logger.info("setAvailableAudioDevices called from native:", devices); availableOutputDevices$.next(devices); }, setAudioDevice(id: string): void { + logger.info("setAudioDevice called from native", id); outputDevice$.next(id); }, setAudioEnabled(enabled: boolean): void { + logger.info("setAudioEnabled called from native:", enabled); if (!setAudioEnabled$.observed) throw new Error( "Output controls are disabled. No setAudioEnabled$ observer", diff --git a/src/livekit/MatrixAudioRenderer.test.tsx b/src/livekit/MatrixAudioRenderer.test.tsx index e2fa4e87..24a9a2e3 100644 --- a/src/livekit/MatrixAudioRenderer.test.tsx +++ b/src/livekit/MatrixAudioRenderer.test.tsx @@ -17,12 +17,14 @@ import { type ReactNode } from "react"; import { useTracks } from "@livekit/components-react"; import { testAudioContext } from "../useAudioContext.test"; -import * as MediaDevicesContext from "./MediaDevicesContext"; +import * as MediaDevicesContext from "../MediaDevicesContext"; import { MatrixAudioRenderer } from "./MatrixAudioRenderer"; -import { mockTrack } from "../utils/test"; +import { mockMediaDevices, mockTrack } from "../utils/test"; export const TestAudioContextConstructor = vi.fn(() => testAudioContext); +const MediaDevicesProvider = MediaDevicesContext.MediaDevicesContext.Provider; + beforeEach(() => { vi.stubGlobal("AudioContext", TestAudioContextConstructor); }); @@ -51,9 +53,11 @@ vi.mocked(useTracks).mockReturnValue(tracks); it("should render for member", () => { const { container, queryAllByTestId } = render( - , + + + , ); expect(container).toBeTruthy(); expect(queryAllByTestId("audio")).toHaveLength(1); @@ -64,7 +68,9 @@ it("should not render without member", () => { { sender: "othermember", deviceId: "123" }, ] as CallMembership[]; const { container, queryAllByTestId } = render( - , + + + , ); expect(container).toBeTruthy(); expect(queryAllByTestId("audio")).toHaveLength(0); @@ -72,9 +78,11 @@ it("should not render without member", () => { it("should not setup audioContext gain and pan if there is no need to.", () => { render( - , + + + , ); const audioTrack = tracks[0].publication.track! as RemoteAudioTrack; @@ -93,9 +101,11 @@ it("should setup audioContext gain and pan", () => { volume: 0.1, }); render( - , + + + , ); const audioTrack = tracks[0].publication.track! as RemoteAudioTrack; diff --git a/src/livekit/MatrixAudioRenderer.tsx b/src/livekit/MatrixAudioRenderer.tsx index ee4062c4..1afa53e5 100644 --- a/src/livekit/MatrixAudioRenderer.tsx +++ b/src/livekit/MatrixAudioRenderer.tsx @@ -16,7 +16,7 @@ import { import { type CallMembership } from "matrix-js-sdk/lib/matrixrtc"; import { logger as rootLogger } from "matrix-js-sdk/lib/logger"; -import { useEarpieceAudioConfig } from "./MediaDevicesContext"; +import { useEarpieceAudioConfig } from "../MediaDevicesContext"; import { useReactiveState } from "../useReactiveState"; import * as controls from "../controls"; diff --git a/src/livekit/MediaDevicesContext.tsx b/src/livekit/MediaDevicesContext.tsx deleted file mode 100644 index c836f82b..00000000 --- a/src/livekit/MediaDevicesContext.tsx +++ /dev/null @@ -1,445 +0,0 @@ -/* -Copyright 2023-2025 New Vector Ltd. - -SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial -Please see LICENSE in the repository root for full details. -*/ - -import { - type FC, - createContext, - useCallback, - useContext, - useEffect, - useMemo, - useRef, - useState, - type JSX, -} from "react"; -import { createMediaDeviceObserver } from "@livekit/components-core"; -import { combineLatest, distinctUntilChanged, map, startWith } from "rxjs"; -import { useObservable, useObservableEagerState } from "observable-hooks"; -import { logger } from "matrix-js-sdk/lib/logger"; -import { deepCompare } from "matrix-js-sdk/lib/utils"; - -import { - useSetting, - audioInput as audioInputSetting, - audioOutput as audioOutputSetting, - videoInput as videoInputSetting, - alwaysShowIphoneEarpiece as alwaysShowIphoneEarpieceSetting, - type Setting, -} from "../settings/settings"; -import { outputDevice$, availableOutputDevices$ } from "../controls"; -import { useUrlParams } from "../UrlParams"; - -// This hardcoded id is used in EX ios! It can only be changed in coordination with -// the ios swift team. -export const EARPIECE_CONFIG_ID = "earpiece-id"; - -export type DeviceLabel = - | { type: "name"; name: string } - | { type: "number"; number: number } - | { type: "earpiece" } - | { type: "default"; name: string | null }; - -export interface MediaDeviceHandle { - /** - * A map from available device IDs to labels. - */ - available: Map; - selectedId: string | undefined; - /** - * An additional device configuration that makes us use only one channel of the - * output device and a reduced volume. - */ - useAsEarpiece: boolean | undefined; - /** - * The group ID of the selected device. - */ - // This is exposed sort of ad-hoc because it's only needed for knowing when to - // restart the tracks of default input devices, and ideally this behavior - // would be encapsulated somehow… - selectedGroupId: string | undefined; - select: (deviceId: string) => void; -} - -interface InputDevices { - audioInput: MediaDeviceHandle; - videoInput: MediaDeviceHandle; - startUsingDeviceNames: () => void; - stopUsingDeviceNames: () => void; - usingNames: boolean; -} - -export interface MediaDevices extends Omit { - audioOutput: MediaDeviceHandle; -} - -/** - * An observable that represents if we should display the devices menu for iOS. - * This implies the following - * - hide any input devices (they do not work anyhow on ios) - * - Show a button to show the native output picker instead. - * - Only show the earpiece toggle option if the earpiece is available: - * `availableOutputDevices$.includes((d)=>d.forEarpiece)` - */ -export const iosDeviceMenu$ = alwaysShowIphoneEarpieceSetting.value$.pipe( - map((v) => v || navigator.userAgent.includes("iPhone")), -); - -function useSelectedId( - available: Map, - preferredId: string | undefined, -): string | undefined { - return useMemo(() => { - if (available.size) { - // If the preferred device is available, use it. Or if every available - // device ID is falsy, the browser is probably just being paranoid about - // fingerprinting and we should still try using the preferred device. - // Worst case it is not available and the browser will gracefully fall - // back to some other device for us when requesting the media stream. - // Otherwise, select the first available device. - return (preferredId !== undefined && available.has(preferredId)) || - (available.size === 1 && available.has("")) - ? preferredId - : available.keys().next().value; - } - return undefined; - }, [available, preferredId]); -} - -/** - * Hook to get access to a mediaDevice handle for a kind. This allows to list - * the available devices, read and set the selected device. - * @param kind Audio input, output or video output. - * @param setting The setting this handle's selection should be synced with. - * @param usingNames If the hook should query device names for the associated - * list. - * @returns A handle for the chosen kind. - */ -function useMediaDeviceHandle( - kind: MediaDeviceKind, - setting: Setting, - usingNames: boolean, -): MediaDeviceHandle { - const hasRequestedPermissions = useRef(false); - const requestPermissions = usingNames || hasRequestedPermissions.current; - // Make sure we don't needlessly reset to a device observer without names, - // once permissions are already given - hasRequestedPermissions.current ||= usingNames; - - // We use a bare device observer here rather than one of the fancy device - // selection hooks from @livekit/components-react, because - // useMediaDeviceSelect expects a room or track, which we don't have here, and - // useMediaDevices provides no way to request device names. - // Tragically, the only way to get device names out of LiveKit is to specify a - // kind, which then results in multiple permissions requests. - const deviceObserver$ = useMemo( - () => - createMediaDeviceObserver( - kind, - () => logger.error("Error creating MediaDeviceObserver"), - requestPermissions, - ).pipe( - startWith([]), - // This Observable emits new values whenever the browser fires a - // MediaDevices 'devicechange' event. One would think, innocently, that - // a 'devicechange' event means the devices have changed. But as of the - // time of writing, we are seeing mobile Safari firing spurious - // 'devicechange' events (where no change has actually occurred) when - // we call MediaDevices.getUserMedia. So, filter by deep equality. - distinctUntilChanged(deepCompare), - ), - [kind, requestPermissions], - ); - const available = useObservableEagerState( - useMemo( - () => - deviceObserver$.pipe( - map((availableRaw) => { - // Sometimes browsers (particularly Firefox) can return multiple device - // entries for the exact same device ID; using a map deduplicates them - let available = new Map( - availableRaw.map((d, i) => [ - d.deviceId, - d.label - ? { type: "name", name: d.label } - : { type: "number", number: i + 1 }, - ]), - ); - // Create a virtual default audio output for browsers that don't have one. - // Its device ID must be the empty string because that's what setSinkId - // recognizes. - // We also create this if we do not have any available devices, so that - // we can use the default or the earpiece. - if ( - kind === "audiooutput" && - !available.has("") && - !available.has("default") && - available.size - ) - available = new Map([ - ["", { type: "default", name: availableRaw[0]?.label || null }], - ...available, - ]); - // Note: creating virtual default input devices would be another problem - // entirely, because requesting a media stream from deviceId "" won't - // automatically track the default device. - return available; - }), - ), - [deviceObserver$, kind], - ), - ); - - const [preferredId, select] = useSetting(setting); - const selectedId = useSelectedId(available, preferredId); - - const selectedGroupId = useObservableEagerState( - useMemo( - () => - deviceObserver$.pipe( - map( - (availableRaw) => - availableRaw.find((d) => d.deviceId === selectedId)?.groupId, - ), - ), - [deviceObserver$, selectedId], - ), - ); - - return useMemo( - () => ({ - available, - selectedId, - useAsEarpiece: false, - selectedGroupId, - select, - }), - [available, selectedId, selectedGroupId, select], - ); -} - -export const deviceStub: MediaDeviceHandle = { - available: new Map(), - selectedId: undefined, - selectedGroupId: undefined, - select: () => {}, - useAsEarpiece: false, -}; - -export const devicesStub: MediaDevices = { - audioInput: deviceStub, - audioOutput: deviceStub, - videoInput: deviceStub, - startUsingDeviceNames: () => {}, - stopUsingDeviceNames: () => {}, -}; - -export const MediaDevicesContext = createContext(devicesStub); - -function useInputDevices(): InputDevices { - // Counts the number of callers currently using device names. - const [numCallersUsingNames, setNumCallersUsingNames] = useState(0); - const usingNames = numCallersUsingNames > 0; - - const audioInput = useMediaDeviceHandle( - "audioinput", - audioInputSetting, - usingNames, - ); - const videoInput = useMediaDeviceHandle( - "videoinput", - videoInputSetting, - usingNames, - ); - - const startUsingDeviceNames = useCallback( - () => setNumCallersUsingNames((n) => n + 1), - [setNumCallersUsingNames], - ); - const stopUsingDeviceNames = useCallback( - () => setNumCallersUsingNames((n) => n - 1), - [setNumCallersUsingNames], - ); - - return { - audioInput, - videoInput, - startUsingDeviceNames, - stopUsingDeviceNames, - usingNames, - }; -} - -interface Props { - children: JSX.Element; -} - -export const MediaDevicesProvider: FC = ({ children }) => { - const { - audioInput, - videoInput, - startUsingDeviceNames, - stopUsingDeviceNames, - usingNames, - } = useInputDevices(); - - const { controlledAudioDevices } = useUrlParams(); - - const webViewAudioOutput = useMediaDeviceHandle( - "audiooutput", - audioOutputSetting, - usingNames, - ); - const controlledAudioOutput = useControlledOutput(); - - const context: MediaDevices = useMemo( - () => ({ - audioInput, - audioOutput: controlledAudioDevices - ? controlledAudioOutput - : webViewAudioOutput, - videoInput, - startUsingDeviceNames, - stopUsingDeviceNames, - }), - [ - audioInput, - controlledAudioDevices, - controlledAudioOutput, - webViewAudioOutput, - videoInput, - startUsingDeviceNames, - stopUsingDeviceNames, - ], - ); - - return ( - - {children} - - ); -}; - -function useControlledOutput(): MediaDeviceHandle { - const { available } = useObservableEagerState( - useObservable(() => { - const outputDeviceData$ = availableOutputDevices$.pipe( - map((devices) => { - const deviceForEarpiece = devices.find((d) => d.forEarpiece); - const deviceMapTuple: [string, DeviceLabel][] = devices.map( - ({ id, name, isEarpiece, isSpeaker /*,isExternalHeadset*/ }) => { - let deviceLabel: DeviceLabel = { type: "name", name }; - // if (isExternalHeadset) // Do we want this? - if (isEarpiece) deviceLabel = { type: "earpiece" }; - if (isSpeaker) deviceLabel = { type: "default", name }; - return [id, deviceLabel]; - }, - ); - return { - devicesMap: new Map(deviceMapTuple), - deviceForEarpiece, - }; - }), - ); - - return combineLatest( - [outputDeviceData$, iosDeviceMenu$], - ({ devicesMap, deviceForEarpiece }, iosShowEarpiece) => { - let available = devicesMap; - if (iosShowEarpiece && !!deviceForEarpiece) { - available = new Map([ - ...devicesMap.entries(), - [EARPIECE_CONFIG_ID, { type: "earpiece" }], - ]); - } - return { available, deviceForEarpiece }; - }, - ); - }), - ); - const [preferredId, setPreferredId] = useSetting(audioOutputSetting); - useEffect(() => { - const subscription = outputDevice$.subscribe((id) => { - if (id) setPreferredId(id); - }); - return (): void => subscription.unsubscribe(); - }, [setPreferredId]); - - const selectedId = useSelectedId(available, preferredId); - - const [asEarpiece, setAsEarpiece] = useState(false); - - useEffect(() => { - // Let the hosting application know which output device has been selected. - // This information is probably only of interest if the earpiece mode has been - // selected - for example, Element X iOS listens to this to determine whether it - // should enable the proximity sensor. - if (selectedId) { - window.controls.onAudioDeviceSelect?.(selectedId); - // Call deprecated method for backwards compatibility. - window.controls.onOutputDeviceSelect?.(selectedId); - } - setAsEarpiece(selectedId === EARPIECE_CONFIG_ID); - }, [selectedId]); - - return useMemo( - () => ({ - available: available, - selectedId, - selectedGroupId: undefined, - select: setPreferredId, - useAsEarpiece: asEarpiece, - }), - [available, selectedId, setPreferredId, asEarpiece], - ); -} - -export const useMediaDevices = (): MediaDevices => - useContext(MediaDevicesContext); - -/** - * React hook that requests for the media devices context to be populated with - * real device names while this component is mounted. This is not done by - * default because it may involve requesting additional permissions from the - * user. - */ -export const useMediaDeviceNames = ( - context: MediaDevices, - enabled = true, -): void => - useEffect(() => { - if (enabled) { - context.startUsingDeviceNames(); - return context.stopUsingDeviceNames; - } - }, [context, enabled]); - -/** - * A convenience hook to get the audio node configuration for the earpiece. - * It will check the `useAsEarpiece` of the `audioOutput` device and return - * the appropriate pan and volume values. - * - * @returns pan and volume values for the earpiece audio node configuration. - */ -export const useEarpieceAudioConfig = (): { - pan: number; - volume: number; -} => { - const { audioOutput } = useMediaDevices(); - // We use only the right speaker (pan = 1) for the earpiece. - // This mimics the behavior of the native earpiece speaker (only the top speaker on an iPhone) - const pan = useMemo( - () => (audioOutput.useAsEarpiece ? 1 : 0), - [audioOutput.useAsEarpiece], - ); - // We also do lower the volume by a factor of 10 to optimize for the usecase where - // a user is holding the phone to their ear. - const volume = useMemo( - () => (audioOutput.useAsEarpiece ? 0.1 : 1), - [audioOutput.useAsEarpiece], - ); - return { pan, volume }; -}; diff --git a/src/livekit/useLivekit.ts b/src/livekit/useLivekit.ts index 4589063e..58f088f6 100644 --- a/src/livekit/useLivekit.ts +++ b/src/livekit/useLivekit.ts @@ -19,12 +19,18 @@ import E2EEWorker from "livekit-client/e2ee-worker?worker"; import { logger } from "matrix-js-sdk/lib/logger"; import { type MatrixRTCSession } from "matrix-js-sdk/lib/matrixrtc"; import { useObservable, useObservableEagerState } from "observable-hooks"; -import { map } from "rxjs"; +import { + map, + NEVER, + type Observable, + type Subscription, + switchMap, +} from "rxjs"; import { defaultLiveKitOptions } from "./options"; import { type SFUConfig } from "./openIDSFU"; import { type MuteStates } from "../room/MuteStates"; -import { type MediaDeviceHandle, useMediaDevices } from "./MediaDevicesContext"; +import { useMediaDevices } from "../MediaDevicesContext"; import { type ECConnectionState, useECConnectionState, @@ -39,6 +45,8 @@ import { import { observeTrackReference$ } from "../state/MediaViewModel"; import { useUrlParams } from "../UrlParams"; import { useInitial } from "../useInitial"; +import { getValue } from "../utils/observable"; +import { type SelectedDevice } from "../state/MediaDevices"; interface UseLivekitResult { livekitRoom?: Room; @@ -56,7 +64,9 @@ export function useLivekit( const initialMuteStates = useInitial(() => muteStates); const devices = useMediaDevices(); - const initialDevices = useInitial(() => devices); + const initialAudioInputId = useInitial( + () => getValue(devices.audioInput.selected$)?.id, + ); // Store if audio/video are currently updating. If to prohibit unnecessary calls // to setMicrophoneEnabled/setCameraEnabled @@ -94,15 +104,20 @@ export function useLivekit( ...defaultLiveKitOptions, videoCaptureDefaults: { ...defaultLiveKitOptions.videoCaptureDefaults, - deviceId: initialDevices.videoInput.selectedId, + deviceId: getValue(devices.videoInput.selected$)?.id, processor, }, audioCaptureDefaults: { ...defaultLiveKitOptions.audioCaptureDefaults, - deviceId: initialDevices.audioInput.selectedId, + deviceId: initialAudioInputId, }, audioOutput: { - deviceId: initialDevices.audioOutput.selectedId, + // When using controlled audio devices, we don't want to set the + // deviceId here, because it will be set by the native app. + // (also the id does not need to match a browser device id) + deviceId: controlledAudioDevices + ? undefined + : getValue(devices.audioOutput.selected$)?.id, }, e2ee, }; @@ -157,7 +172,7 @@ export function useLivekit( ); const connectionState = useECConnectionState( - initialDevices.audioInput.selectedId, + initialAudioInputId, initialMuteStates.audio.enabled, room, sfuConfig, @@ -312,62 +327,65 @@ export function useLivekit( ) { const syncDevice = ( kind: MediaDeviceKind, - device: MediaDeviceHandle, - ): void => { - const id = device.selectedId; - - // Detect if we're trying to use chrome's default device, in which case - // we need to to see if the default device has changed to a different device - // by comparing the group ID of the device we're using against the group ID - // of what the default device is *now*. - // This is special-cased for only audio inputs because we need to dig around - // in the LocalParticipant object for the track object and there's not a nice - // way to do that generically. There is usually no OS-level default video capture - // device anyway, and audio outputs work differently. - if ( - id === "default" && - kind === "audioinput" && - room.options.audioCaptureDefaults?.deviceId === "default" - ) { - const activeMicTrack = Array.from( - room.localParticipant.audioTrackPublications.values(), - ).find((d) => d.source === Track.Source.Microphone)?.track; - + selected$: Observable, + ): Subscription => + selected$.subscribe((device) => { if ( - activeMicTrack && - // only restart if the stream is still running: LiveKit will detect - // when a track stops & restart appropriately, so this is not our job. - // Plus, we need to avoid restarting again if the track is already in - // the process of being restarted. - activeMicTrack.mediaStreamTrack.readyState !== "ended" && - device.selectedGroupId !== - activeMicTrack.mediaStreamTrack.getSettings().groupId + device !== undefined && + room.getActiveDevice(kind) !== device.id ) { - // It's different, so restart the track, ie. cause Livekit to do another - // getUserMedia() call with deviceId: default to get the *new* default device. - // Note that room.switchActiveDevice() won't work: Livekit will ignore it because - // the deviceId hasn't changed (was & still is default). - room.localParticipant - .getTrackPublication(Track.Source.Microphone) - ?.audioTrack?.restartTrack() - .catch((e) => { - logger.error(`Failed to restart audio device track`, e); - }); - } - } else { - if (id !== undefined && room.getActiveDevice(kind) !== id) { room - .switchActiveDevice(kind, id) + .switchActiveDevice(kind, device.id) .catch((e) => logger.error(`Failed to sync ${kind} device with LiveKit`, e), ); } - } - }; + }); - syncDevice("audioinput", devices.audioInput); - syncDevice("audiooutput", devices.audioOutput); - syncDevice("videoinput", devices.videoInput); + const subscriptions = [ + syncDevice("audioinput", devices.audioInput.selected$), + syncDevice("audiooutput", devices.audioOutput.selected$), + syncDevice("videoinput", devices.videoInput.selected$), + // Restart the audio input track whenever we detect that the active media + // device has changed to refer to a different hardware device. We do this + // for the sake of Chrome, which provides a "default" device that is meant + // to match the system's default audio input, whatever that may be. + // This is special-cased for only audio inputs because we need to dig around + // in the LocalParticipant object for the track object and there's not a nice + // way to do that generically. There is usually no OS-level default video capture + // device anyway, and audio outputs work differently. + devices.audioInput.selected$ + .pipe(switchMap((device) => device?.hardwareDeviceChange$ ?? NEVER)) + .subscribe(() => { + const activeMicTrack = Array.from( + room.localParticipant.audioTrackPublications.values(), + ).find((d) => d.source === Track.Source.Microphone)?.track; + + if ( + activeMicTrack && + // only restart if the stream is still running: LiveKit will detect + // when a track stops & restart appropriately, so this is not our job. + // Plus, we need to avoid restarting again if the track is already in + // the process of being restarted. + activeMicTrack.mediaStreamTrack.readyState !== "ended" + ) { + // Restart the track, which will cause Livekit to do another + // getUserMedia() call with deviceId: default to get the *new* default device. + // Note that room.switchActiveDevice() won't work: Livekit will ignore it because + // the deviceId hasn't changed (was & still is default). + room.localParticipant + .getTrackPublication(Track.Source.Microphone) + ?.audioTrack?.restartTrack() + .catch((e) => { + logger.error(`Failed to restart audio device track`, e); + }); + } + }), + ]; + + return (): void => { + for (const s of subscriptions) s.unsubscribe(); + }; } }, [room, devices, connectionState, controlledAudioDevices]); diff --git a/src/main.tsx b/src/main.tsx index 654bd93c..06275f59 100644 --- a/src/main.tsx +++ b/src/main.tsx @@ -23,6 +23,7 @@ import { import { App } from "./App"; import { init as initRageshake } from "./settings/rageshake"; import { Initializer } from "./initializer"; +import { AppViewModel } from "./state/AppViewModel"; window.setLKLogLevel = setLKLogLevel; @@ -60,7 +61,7 @@ Initializer.initBeforeReact() .then(() => { root.render( - + , ); }) diff --git a/src/room/GroupCallView.test.tsx b/src/room/GroupCallView.test.tsx index 04b1b62a..a7c8de56 100644 --- a/src/room/GroupCallView.test.tsx +++ b/src/room/GroupCallView.test.tsx @@ -32,6 +32,7 @@ import { mockEmitter, mockMatrixRoom, mockMatrixRoomMember, + mockMediaDevices, mockRtcMembership, MockRTCSession, } from "../utils/test"; @@ -40,6 +41,7 @@ import { type WidgetHelpers } from "../widget"; import { LazyEventEmitter } from "../LazyEventEmitter"; import { MatrixRTCFocusMissingError } from "../utils/errors"; import { ProcessorProvider } from "../livekit/TrackProcessorContext"; +import { MediaDevicesContext } from "../MediaDevicesContext"; vi.mock("../soundUtils"); vi.mock("../useAudioContext"); @@ -147,20 +149,22 @@ function createGroupCallView( const { getByText } = render( - - - + + + + + , ); diff --git a/src/room/GroupCallView.tsx b/src/room/GroupCallView.tsx index 831cf84f..b981bdd6 100644 --- a/src/room/GroupCallView.tsx +++ b/src/room/GroupCallView.tsx @@ -40,7 +40,7 @@ import { useProfile } from "../profile/useProfile"; import { findDeviceByName } from "../utils/media"; import { ActiveCall } from "./InCallView"; import { MUTE_PARTICIPANT_COUNT, type MuteStates } from "./MuteStates"; -import { useMediaDevices } from "../livekit/MediaDevicesContext"; +import { useMediaDevices } from "../MediaDevicesContext"; import { useMatrixRTCSessionMemberships } from "../useMatrixRTCSessionMemberships"; import { enterRTCSession, leaveRTCSession } from "../rtcSessionHelpers"; import { @@ -197,8 +197,7 @@ export const GroupCallView: FC = ({ [memberships], ); - const deviceContext = useMediaDevices(); - const latestDevices = useLatest(deviceContext); + const mediaDevices = useMediaDevices(); const latestMuteStates = useLatest(muteStates); const enterRTCSessionOrError = useCallback( @@ -250,7 +249,7 @@ export const GroupCallView: FC = ({ logger.debug( `Found audio input ID ${deviceId} for name ${audioInput}`, ); - latestDevices.current!.audioInput.select(deviceId); + mediaDevices.audioInput.select(deviceId); } } @@ -264,7 +263,7 @@ export const GroupCallView: FC = ({ logger.debug( `Found video input ID ${deviceId} for name ${videoInput}`, ); - latestDevices.current!.videoInput.select(deviceId); + mediaDevices.videoInput.select(deviceId); } } }; @@ -306,7 +305,7 @@ export const GroupCallView: FC = ({ preload, skipLobby, perParticipantE2EE, - latestDevices, + mediaDevices, latestMuteStates, enterRTCSessionOrError, useNewMembershipManager, diff --git a/src/room/InCallView.test.tsx b/src/room/InCallView.test.tsx index 59ca7696..d681a584 100644 --- a/src/room/InCallView.test.tsx +++ b/src/room/InCallView.test.tsx @@ -31,6 +31,7 @@ import { mockLocalParticipant, mockMatrixRoom, mockMatrixRoomMember, + mockMediaDevices, mockRemoteParticipant, mockRtcMembership, type MockRTCSession, @@ -45,6 +46,7 @@ import { import { ReactionsSenderProvider } from "../reactions/useReactionsSender"; import { useRoomEncryptionSystem } from "../e2ee/sharedKeyManagement"; import { MatrixAudioRenderer } from "../livekit/MatrixAudioRenderer"; +import { MediaDevicesContext } from "../MediaDevicesContext"; // vi.hoisted(() => { // localStorage = {} as unknown as Storage; @@ -147,41 +149,43 @@ function createInCallView(): RenderResult & { rtcSession.joined = true; const renderResult = render( - - - - - - - + + + + + + + + + , ); return { diff --git a/src/room/LobbyView.tsx b/src/room/LobbyView.tsx index f5b47cdd..eab29262 100644 --- a/src/room/LobbyView.tsx +++ b/src/room/LobbyView.tsx @@ -24,7 +24,7 @@ import { type LocalVideoTrack, Track, } from "livekit-client"; -import { useObservable } from "observable-hooks"; +import { useObservable, useObservableEagerState } from "observable-hooks"; import { map } from "rxjs"; import { useNavigate } from "react-router-dom"; @@ -45,7 +45,7 @@ import { SettingsModal, defaultSettingsTab } from "../settings/SettingsModal"; import { useMediaQuery } from "../useMediaQuery"; import { E2eeType } from "../e2ee/e2eeType"; import { Link } from "../button/Link"; -import { useMediaDevices } from "../livekit/MediaDevicesContext"; +import { useMediaDevices } from "../MediaDevicesContext"; import { useInitial } from "../useInitial"; import { useSwitchCamera as useShowSwitchCamera } from "./useSwitchCamera"; import { @@ -54,6 +54,7 @@ import { } from "../livekit/TrackProcessorContext"; import { usePageTitle } from "../usePageTitle"; import { useLatest } from "../useLatest"; +import { getValue } from "../utils/observable"; interface Props { client: MatrixClient; @@ -126,13 +127,18 @@ export const LobbyView: FC = ({ ); const devices = useMediaDevices(); + const videoInputId = useObservableEagerState( + devices.videoInput.selected$, + )?.id; // Capture the audio options as they were when we first mounted, because // we're not doing anything with the audio anyway so we don't need to // re-open the devices when they change (see below). const initialAudioOptions = useInitial( () => - muteStates.audio.enabled && { deviceId: devices.audioInput.selectedId }, + muteStates.audio.enabled && { + deviceId: getValue(devices.audioInput.selected$)?.id, + }, ); const { processor } = useTrackProcessor(); @@ -148,14 +154,14 @@ export const LobbyView: FC = ({ // which would cause the devices to be re-opened on the next render. audio: Object.assign({}, initialAudioOptions), video: muteStates.video.enabled && { - deviceId: devices.videoInput.selectedId, + deviceId: videoInputId, processor: initialProcessor, }, }), [ initialAudioOptions, muteStates.video.enabled, - devices.videoInput.selectedId, + videoInputId, initialProcessor, ], ); diff --git a/src/room/MuteStates.test.tsx b/src/room/MuteStates.test.tsx index 65e7d333..9d4a63e7 100644 --- a/src/room/MuteStates.test.tsx +++ b/src/room/MuteStates.test.tsx @@ -5,20 +5,29 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial Please see LICENSE in the repository root for full details. */ -import { afterAll, afterEach, describe, expect, it, vi } from "vitest"; +import { + afterAll, + afterEach, + describe, + expect, + it, + onTestFinished, + vi, +} from "vitest"; import { type FC, useCallback, useState } from "react"; import { render, screen } from "@testing-library/react"; import { MemoryRouter } from "react-router-dom"; import userEvent from "@testing-library/user-event"; +import { createMediaDeviceObserver } from "@livekit/components-core"; +import { of } from "rxjs"; import { useMuteStates } from "./MuteStates"; -import { - type DeviceLabel, - type MediaDeviceHandle, - type MediaDevices, - MediaDevicesContext, -} from "../livekit/MediaDevicesContext"; +import { MediaDevicesContext } from "../MediaDevicesContext"; import { mockConfig } from "../utils/test"; +import { MediaDevices } from "../state/MediaDevices"; +import { ObservableScope } from "../state/ObservableScope"; + +vi.mock("@livekit/components-core"); interface TestComponentProps { isJoined?: boolean; @@ -73,16 +82,6 @@ const mockCamera: MediaDeviceInfo = { }, }; -function mockDevices(available: Map): MediaDeviceHandle { - return { - available, - selectedId: "", - selectedGroupId: "", - select: (): void => {}, - useAsEarpiece: false, - }; -} - function mockMediaDevices( { microphone, @@ -94,21 +93,21 @@ function mockMediaDevices( camera?: boolean; } = { microphone: true, speaker: true, camera: true }, ): MediaDevices { - return { - audioInput: mockDevices( - microphone - ? new Map([[mockMicrophone.deviceId, mockMicrophone]]) - : new Map(), - ), - audioOutput: mockDevices( - speaker ? new Map([[mockSpeaker.deviceId, mockSpeaker]]) : new Map(), - ), - videoInput: mockDevices( - camera ? new Map([[mockCamera.deviceId, mockCamera]]) : new Map(), - ), - startUsingDeviceNames: (): void => {}, - stopUsingDeviceNames: (): void => {}, - }; + vi.mocked(createMediaDeviceObserver).mockImplementation((kind) => { + switch (kind) { + case "audioinput": + return of(microphone ? [mockMicrophone] : []); + case "audiooutput": + return of(speaker ? [mockSpeaker] : []); + case "videoinput": + return of(camera ? [mockCamera] : []); + case undefined: + throw new Error("Unimplemented"); + } + }); + const scope = new ObservableScope(); + onTestFinished(() => scope.end()); + return new MediaDevices(scope); } describe("useMuteStates", () => { @@ -206,7 +205,12 @@ describe("useMuteStates", () => { const user = userEvent.setup(); mockConfig(); const noDevices = mockMediaDevices({ microphone: false, camera: false }); + // Warm up these Observables before making further changes to the + // createMediaDevicesObserver mock + noDevices.audioInput.available$.subscribe(() => {}).unsubscribe(); + noDevices.videoInput.available$.subscribe(() => {}).unsubscribe(); const someDevices = mockMediaDevices(); + const ReappearanceTest: FC = () => { const [devices, setDevices] = useState(someDevices); const onConnectDevicesClick = useCallback( diff --git a/src/room/MuteStates.ts b/src/room/MuteStates.ts index 5e0f6f6d..b9b32c35 100644 --- a/src/room/MuteStates.ts +++ b/src/room/MuteStates.ts @@ -14,11 +14,14 @@ import { } from "react"; import { type IWidgetApiRequest } from "matrix-widget-api"; import { logger } from "matrix-js-sdk/lib/logger"; +import { useObservableEagerState } from "observable-hooks"; import { - type MediaDeviceHandle, - useMediaDevices, -} from "../livekit/MediaDevicesContext"; + type DeviceLabel, + type SelectedDevice, + type MediaDevice, +} from "../state/MediaDevices"; +import { useMediaDevices } from "../MediaDevicesContext"; import { useReactiveState } from "../useReactiveState"; import { ElementWidgetActions, widget } from "../widget"; import { Config } from "../config/Config"; @@ -53,24 +56,24 @@ export interface MuteStates { } function useMuteState( - device: MediaDeviceHandle, + device: MediaDevice, enabledByDefault: () => boolean, ): MuteState { + const available = useObservableEagerState(device.available$); const [enabled, setEnabled] = useReactiveState( // Determine the default value once devices are actually connected - (prev) => - prev ?? (device.available.size > 0 ? enabledByDefault() : undefined), - [device.available.size], + (prev) => prev ?? (available.size > 0 ? enabledByDefault() : undefined), + [available.size], ); return useMemo( () => - device.available.size === 0 + available.size === 0 ? deviceUnavailable : { enabled: enabled ?? false, setEnabled: setEnabled as Dispatch>, }, - [device.available.size, enabled, setEnabled], + [available.size, enabled, setEnabled], ); } diff --git a/src/room/useSwitchCamera.ts b/src/room/useSwitchCamera.ts index 121ffb10..975776ae 100644 --- a/src/room/useSwitchCamera.ts +++ b/src/room/useSwitchCamera.ts @@ -22,7 +22,7 @@ import { import { useObservable, useObservableEagerState } from "observable-hooks"; import { logger } from "matrix-js-sdk/lib/logger"; -import { useMediaDevices } from "../livekit/MediaDevicesContext"; +import { useMediaDevices } from "../MediaDevicesContext"; import { platform } from "../Platform"; import { useLatest } from "../useLatest"; diff --git a/src/settings/DeviceSelection.tsx b/src/settings/DeviceSelection.tsx index aee043c6..50972326 100644 --- a/src/settings/DeviceSelection.tsx +++ b/src/settings/DeviceSelection.tsx @@ -21,15 +21,18 @@ import { Separator, } from "@vector-im/compound-web"; import { Trans, useTranslation } from "react-i18next"; +import { useObservableEagerState } from "observable-hooks"; import { - EARPIECE_CONFIG_ID, - type MediaDeviceHandle, -} from "../livekit/MediaDevicesContext"; + type AudioOutputDeviceLabel, + type DeviceLabel, + type SelectedDevice, + type MediaDevice, +} from "../state/MediaDevices"; import styles from "./DeviceSelection.module.css"; interface Props { - device: MediaDeviceHandle; + device: MediaDevice; title: string; numberedLabel: (number: number) => string; } @@ -41,6 +44,8 @@ export const DeviceSelection: FC = ({ }) => { const { t } = useTranslation(); const groupId = useId(); + const available = useObservableEagerState(device.available$); + const selectedId = useObservableEagerState(device.selected$)?.id; const onChange = useCallback( (e: ChangeEvent) => { device.select(e.target.value); @@ -49,7 +54,7 @@ export const DeviceSelection: FC = ({ ); // There is no need to show the menu if there is no choice that can be made. - if (device.available.size <= 1) return null; + if (available.size <= 1) return null; return (
@@ -64,7 +69,7 @@ export const DeviceSelection: FC = ({
- {[...device.available].map(([id, label]) => { + {[...available].map(([id, label]) => { let labelText: ReactNode; switch (label.type) { case "name": @@ -94,20 +99,13 @@ export const DeviceSelection: FC = ({ break; } - let isSelected = false; - if (device.useAsEarpiece) { - isSelected = id === EARPIECE_CONFIG_ID; - } else { - isSelected = id === device.selectedId; - } - return ( diff --git a/src/settings/SettingsModal.tsx b/src/settings/SettingsModal.tsx index dc4fa8a5..376fdd64 100644 --- a/src/settings/SettingsModal.tsx +++ b/src/settings/SettingsModal.tsx @@ -5,7 +5,7 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial Please see LICENSE in the repository root for full details. */ -import { type FC, type ReactNode, useState } from "react"; +import { type FC, type ReactNode, useEffect, useState } from "react"; import { useTranslation } from "react-i18next"; import { type MatrixClient } from "matrix-js-sdk"; import { Button, Root as Form, Separator } from "@vector-im/compound-web"; @@ -17,11 +17,8 @@ import styles from "./SettingsModal.module.css"; import { type Tab, TabContainer } from "../tabs/Tabs"; import { ProfileSettingsTab } from "./ProfileSettingsTab"; import { FeedbackSettingsTab } from "./FeedbackSettingsTab"; -import { - useMediaDevices, - useMediaDeviceNames, - iosDeviceMenu$, -} from "../livekit/MediaDevicesContext"; +import { iosDeviceMenu$ } from "../state/MediaDevices"; +import { useMediaDevices } from "../MediaDevicesContext"; import { widget } from "../widget"; import { useSetting, @@ -98,7 +95,10 @@ export const SettingsModal: FC = ({ }; const devices = useMediaDevices(); - useMediaDeviceNames(devices, open); + useEffect(() => { + if (open) devices.requestDeviceNames(); + }, [open, devices]); + const [soundVolume, setSoundVolume] = useSetting(soundEffectVolumeSetting); const [soundVolumeRaw, setSoundVolumeRaw] = useState(soundVolume); const [showDeveloperSettingsTab] = useSetting(developerMode); diff --git a/src/state/AppViewModel.ts b/src/state/AppViewModel.ts new file mode 100644 index 00000000..5f65c226 --- /dev/null +++ b/src/state/AppViewModel.ts @@ -0,0 +1,19 @@ +/* +Copyright 2025 New Vector Ltd. + +SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial +Please see LICENSE in the repository root for full details. +*/ + +import { MediaDevices } from "./MediaDevices"; +import { ViewModel } from "./ViewModel"; + +/** + * The top-level state holder for the application. + */ +export class AppViewModel extends ViewModel { + public readonly mediaDevices = new MediaDevices(this.scope); + + // TODO: Move more application logic here. The CallViewModel, at the very + // least, ought to be accessible from this object. +} diff --git a/src/state/MediaDevices.ts b/src/state/MediaDevices.ts new file mode 100644 index 00000000..873cc1fc --- /dev/null +++ b/src/state/MediaDevices.ts @@ -0,0 +1,366 @@ +/* +Copyright 2025 New Vector Ltd. + +SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial +Please see LICENSE in the repository root for full details. +*/ + +import { + combineLatest, + filter, + map, + merge, + of, + pairwise, + startWith, + Subject, + switchMap, + type Observable, +} from "rxjs"; +import { createMediaDeviceObserver } from "@livekit/components-core"; +import { logger } from "matrix-js-sdk/lib/logger"; + +import { + audioInput as audioInputSetting, + audioOutput as audioOutputSetting, + videoInput as videoInputSetting, + alwaysShowIphoneEarpiece as alwaysShowIphoneEarpieceSetting, +} from "../settings/settings"; +import { type ObservableScope } from "./ObservableScope"; +import { + outputDevice$ as controlledOutputSelection$, + availableOutputDevices$ as controlledAvailableOutputDevices$, +} from "../controls"; +import { getUrlParams } from "../UrlParams"; + +// This hardcoded id is used in EX ios! It can only be changed in coordination with +// the ios swift team. +const EARPIECE_CONFIG_ID = "earpiece-id"; + +export type DeviceLabel = + | { type: "name"; name: string } + | { type: "number"; number: number } + | { type: "default"; name: string | null }; + +export type AudioOutputDeviceLabel = DeviceLabel | { type: "earpiece" }; + +export interface SelectedDevice { + id: string; +} + +export interface SelectedAudioInputDevice extends SelectedDevice { + /** + * Emits whenever we think that this audio input device has logically changed + * to refer to a different hardware device. + */ + hardwareDeviceChange$: Observable; +} + +export interface SelectedAudioOutputDevice extends SelectedDevice { + /** + * Whether this device is a "virtual earpiece" device. If so, we should output + * on a single channel of the device at a reduced volume. + */ + virtualEarpiece: boolean; +} + +export interface MediaDevice { + /** + * A map from available device IDs to labels. + */ + available$: Observable>; + /** + * The selected device. + */ + selected$: Observable; + /** + * Selects a new device. + */ + select(id: string): void; +} + +/** + * An observable that represents if we should display the devices menu for iOS. + * This implies the following + * - hide any input devices (they do not work anyhow on ios) + * - Show a button to show the native output picker instead. + * - Only show the earpiece toggle option if the earpiece is available: + * `availableOutputDevices$.includes((d)=>d.forEarpiece)` + */ +export const iosDeviceMenu$ = navigator.userAgent.includes("iPhone") + ? of(true) + : alwaysShowIphoneEarpieceSetting.value$; + +function availableRawDevices$( + kind: MediaDeviceKind, + usingNames$: Observable, + scope: ObservableScope, +): Observable { + return usingNames$.pipe( + switchMap((usingNames) => + createMediaDeviceObserver( + kind, + (e) => logger.error("Error creating MediaDeviceObserver", e), + usingNames, + ), + ), + startWith([]), + scope.state(), + ); +} + +function buildDeviceMap( + availableRaw: MediaDeviceInfo[], +): Map { + return new Map( + availableRaw.map((d, i) => [ + d.deviceId, + d.label + ? { type: "name", name: d.label } + : { type: "number", number: i + 1 }, + ]), + ); +} + +function selectDevice$