Refactor media devices to live outside React as Observables (#3334)

* Refactor media devices to live outside React as Observables

This moves the media devices state out of React to further our transition to a MVVM architecture in which we can more easily model and store complex application state. I have created an AppViewModel to act as the overarching state holder for any future non-React state we end up creating, and the MediaDevices reside within this. We should move more application logic (including the CallViewModel itself) there in the future.

* Address review feedback

* Fixes from ios debugging session: (#3342)

- dont use preferred vs selected concept in controlled media. Its not needed since we dont use the id for actual browser media devices (the id's are not even actual browser media devices)
  - add more logging
  - add more conditions to not accidently set a deviceId that is not a browser deviceId but one provided via controlled.

---------

Co-authored-by: Timo <16718859+toger5@users.noreply.github.com>
This commit is contained in:
Robin
2025-06-20 12:37:25 -04:00
committed by GitHub
parent 5bf7361d01
commit 5e2e94d794
24 changed files with 763 additions and 682 deletions

View File

@@ -32,6 +32,7 @@ import {
mockEmitter,
mockMatrixRoom,
mockMatrixRoomMember,
mockMediaDevices,
mockRtcMembership,
MockRTCSession,
} from "../utils/test";
@@ -40,6 +41,7 @@ import { type WidgetHelpers } from "../widget";
import { LazyEventEmitter } from "../LazyEventEmitter";
import { MatrixRTCFocusMissingError } from "../utils/errors";
import { ProcessorProvider } from "../livekit/TrackProcessorContext";
import { MediaDevicesContext } from "../MediaDevicesContext";
vi.mock("../soundUtils");
vi.mock("../useAudioContext");
@@ -147,20 +149,22 @@ function createGroupCallView(
const { getByText } = render(
<BrowserRouter>
<TooltipProvider>
<ProcessorProvider>
<GroupCallView
client={client}
isPasswordlessUser={false}
confineToRoom={false}
preload={false}
skipLobby={false}
hideHeader={true}
rtcSession={rtcSession as unknown as MatrixRTCSession}
isJoined={joined}
muteStates={muteState}
widget={widget}
/>
</ProcessorProvider>
<MediaDevicesContext.Provider value={mockMediaDevices({})}>
<ProcessorProvider>
<GroupCallView
client={client}
isPasswordlessUser={false}
confineToRoom={false}
preload={false}
skipLobby={false}
hideHeader={true}
rtcSession={rtcSession as unknown as MatrixRTCSession}
isJoined={joined}
muteStates={muteState}
widget={widget}
/>
</ProcessorProvider>
</MediaDevicesContext.Provider>
</TooltipProvider>
</BrowserRouter>,
);

View File

@@ -40,7 +40,7 @@ import { useProfile } from "../profile/useProfile";
import { findDeviceByName } from "../utils/media";
import { ActiveCall } from "./InCallView";
import { MUTE_PARTICIPANT_COUNT, type MuteStates } from "./MuteStates";
import { useMediaDevices } from "../livekit/MediaDevicesContext";
import { useMediaDevices } from "../MediaDevicesContext";
import { useMatrixRTCSessionMemberships } from "../useMatrixRTCSessionMemberships";
import { enterRTCSession, leaveRTCSession } from "../rtcSessionHelpers";
import {
@@ -197,8 +197,7 @@ export const GroupCallView: FC<Props> = ({
[memberships],
);
const deviceContext = useMediaDevices();
const latestDevices = useLatest(deviceContext);
const mediaDevices = useMediaDevices();
const latestMuteStates = useLatest(muteStates);
const enterRTCSessionOrError = useCallback(
@@ -250,7 +249,7 @@ export const GroupCallView: FC<Props> = ({
logger.debug(
`Found audio input ID ${deviceId} for name ${audioInput}`,
);
latestDevices.current!.audioInput.select(deviceId);
mediaDevices.audioInput.select(deviceId);
}
}
@@ -264,7 +263,7 @@ export const GroupCallView: FC<Props> = ({
logger.debug(
`Found video input ID ${deviceId} for name ${videoInput}`,
);
latestDevices.current!.videoInput.select(deviceId);
mediaDevices.videoInput.select(deviceId);
}
}
};
@@ -306,7 +305,7 @@ export const GroupCallView: FC<Props> = ({
preload,
skipLobby,
perParticipantE2EE,
latestDevices,
mediaDevices,
latestMuteStates,
enterRTCSessionOrError,
useNewMembershipManager,

View File

@@ -31,6 +31,7 @@ import {
mockLocalParticipant,
mockMatrixRoom,
mockMatrixRoomMember,
mockMediaDevices,
mockRemoteParticipant,
mockRtcMembership,
type MockRTCSession,
@@ -45,6 +46,7 @@ import {
import { ReactionsSenderProvider } from "../reactions/useReactionsSender";
import { useRoomEncryptionSystem } from "../e2ee/sharedKeyManagement";
import { MatrixAudioRenderer } from "../livekit/MatrixAudioRenderer";
import { MediaDevicesContext } from "../MediaDevicesContext";
// vi.hoisted(() => {
// localStorage = {} as unknown as Storage;
@@ -147,41 +149,43 @@ function createInCallView(): RenderResult & {
rtcSession.joined = true;
const renderResult = render(
<BrowserRouter>
<ReactionsSenderProvider
vm={vm}
rtcSession={rtcSession as unknown as MatrixRTCSession}
>
<TooltipProvider>
<RoomContext.Provider value={livekitRoom}>
<InCallView
client={client}
hideHeader={true}
rtcSession={rtcSession as unknown as MatrixRTCSession}
muteStates={muteState}
vm={vm}
matrixInfo={{
userId: "",
displayName: "",
avatarUrl: "",
roomId: "",
roomName: "",
roomAlias: null,
roomAvatar: null,
e2eeSystem: {
kind: E2eeType.NONE,
},
}}
livekitRoom={livekitRoom}
participantCount={0}
onLeave={function (): void {
throw new Error("Function not implemented.");
}}
connState={ConnectionState.Connected}
onShareClick={null}
/>
</RoomContext.Provider>
</TooltipProvider>
</ReactionsSenderProvider>
<MediaDevicesContext.Provider value={mockMediaDevices({})}>
<ReactionsSenderProvider
vm={vm}
rtcSession={rtcSession as unknown as MatrixRTCSession}
>
<TooltipProvider>
<RoomContext.Provider value={livekitRoom}>
<InCallView
client={client}
hideHeader={true}
rtcSession={rtcSession as unknown as MatrixRTCSession}
muteStates={muteState}
vm={vm}
matrixInfo={{
userId: "",
displayName: "",
avatarUrl: "",
roomId: "",
roomName: "",
roomAlias: null,
roomAvatar: null,
e2eeSystem: {
kind: E2eeType.NONE,
},
}}
livekitRoom={livekitRoom}
participantCount={0}
onLeave={function (): void {
throw new Error("Function not implemented.");
}}
connState={ConnectionState.Connected}
onShareClick={null}
/>
</RoomContext.Provider>
</TooltipProvider>
</ReactionsSenderProvider>
</MediaDevicesContext.Provider>
</BrowserRouter>,
);
return {

View File

@@ -24,7 +24,7 @@ import {
type LocalVideoTrack,
Track,
} from "livekit-client";
import { useObservable } from "observable-hooks";
import { useObservable, useObservableEagerState } from "observable-hooks";
import { map } from "rxjs";
import { useNavigate } from "react-router-dom";
@@ -45,7 +45,7 @@ import { SettingsModal, defaultSettingsTab } from "../settings/SettingsModal";
import { useMediaQuery } from "../useMediaQuery";
import { E2eeType } from "../e2ee/e2eeType";
import { Link } from "../button/Link";
import { useMediaDevices } from "../livekit/MediaDevicesContext";
import { useMediaDevices } from "../MediaDevicesContext";
import { useInitial } from "../useInitial";
import { useSwitchCamera as useShowSwitchCamera } from "./useSwitchCamera";
import {
@@ -54,6 +54,7 @@ import {
} from "../livekit/TrackProcessorContext";
import { usePageTitle } from "../usePageTitle";
import { useLatest } from "../useLatest";
import { getValue } from "../utils/observable";
interface Props {
client: MatrixClient;
@@ -126,13 +127,18 @@ export const LobbyView: FC<Props> = ({
);
const devices = useMediaDevices();
const videoInputId = useObservableEagerState(
devices.videoInput.selected$,
)?.id;
// Capture the audio options as they were when we first mounted, because
// we're not doing anything with the audio anyway so we don't need to
// re-open the devices when they change (see below).
const initialAudioOptions = useInitial(
() =>
muteStates.audio.enabled && { deviceId: devices.audioInput.selectedId },
muteStates.audio.enabled && {
deviceId: getValue(devices.audioInput.selected$)?.id,
},
);
const { processor } = useTrackProcessor();
@@ -148,14 +154,14 @@ export const LobbyView: FC<Props> = ({
// which would cause the devices to be re-opened on the next render.
audio: Object.assign({}, initialAudioOptions),
video: muteStates.video.enabled && {
deviceId: devices.videoInput.selectedId,
deviceId: videoInputId,
processor: initialProcessor,
},
}),
[
initialAudioOptions,
muteStates.video.enabled,
devices.videoInput.selectedId,
videoInputId,
initialProcessor,
],
);

View File

@@ -5,20 +5,29 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { afterAll, afterEach, describe, expect, it, vi } from "vitest";
import {
afterAll,
afterEach,
describe,
expect,
it,
onTestFinished,
vi,
} from "vitest";
import { type FC, useCallback, useState } from "react";
import { render, screen } from "@testing-library/react";
import { MemoryRouter } from "react-router-dom";
import userEvent from "@testing-library/user-event";
import { createMediaDeviceObserver } from "@livekit/components-core";
import { of } from "rxjs";
import { useMuteStates } from "./MuteStates";
import {
type DeviceLabel,
type MediaDeviceHandle,
type MediaDevices,
MediaDevicesContext,
} from "../livekit/MediaDevicesContext";
import { MediaDevicesContext } from "../MediaDevicesContext";
import { mockConfig } from "../utils/test";
import { MediaDevices } from "../state/MediaDevices";
import { ObservableScope } from "../state/ObservableScope";
vi.mock("@livekit/components-core");
interface TestComponentProps {
isJoined?: boolean;
@@ -73,16 +82,6 @@ const mockCamera: MediaDeviceInfo = {
},
};
function mockDevices(available: Map<string, DeviceLabel>): MediaDeviceHandle {
return {
available,
selectedId: "",
selectedGroupId: "",
select: (): void => {},
useAsEarpiece: false,
};
}
function mockMediaDevices(
{
microphone,
@@ -94,21 +93,21 @@ function mockMediaDevices(
camera?: boolean;
} = { microphone: true, speaker: true, camera: true },
): MediaDevices {
return {
audioInput: mockDevices(
microphone
? new Map([[mockMicrophone.deviceId, mockMicrophone]])
: new Map(),
),
audioOutput: mockDevices(
speaker ? new Map([[mockSpeaker.deviceId, mockSpeaker]]) : new Map(),
),
videoInput: mockDevices(
camera ? new Map([[mockCamera.deviceId, mockCamera]]) : new Map(),
),
startUsingDeviceNames: (): void => {},
stopUsingDeviceNames: (): void => {},
};
vi.mocked(createMediaDeviceObserver).mockImplementation((kind) => {
switch (kind) {
case "audioinput":
return of(microphone ? [mockMicrophone] : []);
case "audiooutput":
return of(speaker ? [mockSpeaker] : []);
case "videoinput":
return of(camera ? [mockCamera] : []);
case undefined:
throw new Error("Unimplemented");
}
});
const scope = new ObservableScope();
onTestFinished(() => scope.end());
return new MediaDevices(scope);
}
describe("useMuteStates", () => {
@@ -206,7 +205,12 @@ describe("useMuteStates", () => {
const user = userEvent.setup();
mockConfig();
const noDevices = mockMediaDevices({ microphone: false, camera: false });
// Warm up these Observables before making further changes to the
// createMediaDevicesObserver mock
noDevices.audioInput.available$.subscribe(() => {}).unsubscribe();
noDevices.videoInput.available$.subscribe(() => {}).unsubscribe();
const someDevices = mockMediaDevices();
const ReappearanceTest: FC = () => {
const [devices, setDevices] = useState(someDevices);
const onConnectDevicesClick = useCallback(

View File

@@ -14,11 +14,14 @@ import {
} from "react";
import { type IWidgetApiRequest } from "matrix-widget-api";
import { logger } from "matrix-js-sdk/lib/logger";
import { useObservableEagerState } from "observable-hooks";
import {
type MediaDeviceHandle,
useMediaDevices,
} from "../livekit/MediaDevicesContext";
type DeviceLabel,
type SelectedDevice,
type MediaDevice,
} from "../state/MediaDevices";
import { useMediaDevices } from "../MediaDevicesContext";
import { useReactiveState } from "../useReactiveState";
import { ElementWidgetActions, widget } from "../widget";
import { Config } from "../config/Config";
@@ -53,24 +56,24 @@ export interface MuteStates {
}
function useMuteState(
device: MediaDeviceHandle,
device: MediaDevice<DeviceLabel, SelectedDevice>,
enabledByDefault: () => boolean,
): MuteState {
const available = useObservableEagerState(device.available$);
const [enabled, setEnabled] = useReactiveState<boolean | undefined>(
// Determine the default value once devices are actually connected
(prev) =>
prev ?? (device.available.size > 0 ? enabledByDefault() : undefined),
[device.available.size],
(prev) => prev ?? (available.size > 0 ? enabledByDefault() : undefined),
[available.size],
);
return useMemo(
() =>
device.available.size === 0
available.size === 0
? deviceUnavailable
: {
enabled: enabled ?? false,
setEnabled: setEnabled as Dispatch<SetStateAction<boolean>>,
},
[device.available.size, enabled, setEnabled],
[available.size, enabled, setEnabled],
);
}

View File

@@ -22,7 +22,7 @@ import {
import { useObservable, useObservableEagerState } from "observable-hooks";
import { logger } from "matrix-js-sdk/lib/logger";
import { useMediaDevices } from "../livekit/MediaDevicesContext";
import { useMediaDevices } from "../MediaDevicesContext";
import { platform } from "../Platform";
import { useLatest } from "../useLatest";