Merge branch 'livekit' into fkwp/delegation_of_delayed_events

This commit is contained in:
fkwp
2026-03-27 15:29:16 +01:00
118 changed files with 7230 additions and 4014 deletions

39
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,39 @@
<!-- Thanks for submitting a PR! Please ensure the following requirements are met in order for us to review your PR -->
## Content
<!-- Describe shortly what has been changed -->
## Motivation and context
<!-- Provide link to the corresponding issue if applicable or explain the context -->
## Screenshots / GIFs
<!--
You can use a table like this to show screenshots comparison.
Uncomment this markdown table below and edit the last line `|||`:
|copy screenshot of before here|copy screenshot of after here|
|Before|After|
|-|-|
|||
-->
## Tests
<!-- Explain how you tested your development -->
- Step 1
- Step 2
- Step ...
-
## Checklist
- [ ] I have read through [CONTRIBUTING.md](https://github.com/element-hq/element-call/blob/livekit/CONTRIBUTING.md).
- [ ] Pull request includes screenshots or videos if containing UI changes
- [ ] Tests written for new code (and old code if feasible).
- [ ] Linter and other CI checks pass.
- [ ] I have licensed the changes to Element by completing the [Contributor License Agreement (CLA)](https://cla-assistant.io/element-hq/element-call)

View File

@@ -1,7 +1,16 @@
name: Prevent blocked
on:
# zizmor: ignore[dangerous-triggers]
# Reason: This workflow does not checkout code or use secrets.
# It only reads labels to set a failure status on the PR.
pull_request_target:
types: [opened, labeled, unlabeled, synchronize]
permissions:
pull-requests: read
# Required to fail the check on the PR
statuses: write
jobs:
prevent-blocked:
name: Prevent blocked

View File

@@ -20,10 +20,13 @@ jobs:
runs-on: ubuntu-latest
permissions:
contents: write # required to upload release asset
packages: write
packages: write # needed for publishing packages to GHCR
id-token: write # needed for login into tailscale with GitHub OIDC Token
steps:
- name: Check it out
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
persist-credentials: false
- name: 📥 Download artifact
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
@@ -34,26 +37,64 @@ jobs:
path: dist
- name: Log in to container registry
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Connect to Tailscale
uses: tailscale/github-action@306e68a486fd2350f2bfc3b19fcd143891a4a2d8 # v4
if: github.event_name != 'pull_request'
with:
oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}
audience: ${{ secrets.TS_AUDIENCE }}
tags: tag:github-actions
- name: Compute vault jwt role name
id: vault-jwt-role
if: github.event_name != 'pull_request'
run: |
echo "role_name=github_service_management_$( echo "${{ github.repository }}" | sed -r 's|[/-]|_|g')" | tee -a "$GITHUB_OUTPUT"
- name: Get team registry token
id: import-secrets
uses: hashicorp/vault-action@4c06c5ccf5c0761b6029f56cfb1dcf5565918a3b # v3
if: github.event_name != 'pull_request'
with:
url: https://vault.infra.ci.i.element.dev
role: ${{ steps.vault-jwt-role.outputs.role_name }}
path: service-management/github-actions
jwtGithubAudience: https://vault.infra.ci.i.element.dev
method: jwt
secrets: |
services/voip-repositories/secret/data/oci.element.io username | OCI_USERNAME ;
services/voip-repositories/secret/data/oci.element.io password | OCI_PASSWORD ;
- name: Login to oci.element.io Registry
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3
if: github.event_name != 'pull_request'
with:
registry: oci-push.vpn.infra.element.io
username: ${{ steps.import-secrets.outputs.OCI_USERNAME }}
password: ${{ steps.import-secrets.outputs.OCI_PASSWORD }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v5.9.0
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: ${{ inputs.docker_tags}}
images: |
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
oci-push.vpn.infra.element.io/element-call
tags: ${{ inputs.docker_tags }}
labels: |
org.opencontainers.image.licenses=AGPL-3.0-only OR LicenseRef-Element-Commercial
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
- name: Build and push Docker image
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
with:
context: .
platforms: linux/amd64,linux/arm64

View File

@@ -7,7 +7,7 @@ on:
type: string
package:
type: string # This would ideally be a `choice` type, but that isn't supported yet
description: The package type to be built. Must be one of 'full' or 'embedded'
description: The package type to be built. Must be one of 'full', 'embedded', or 'sdk'
required: true
build_mode:
type: string # This would ideally be a `choice` type, but that isn't supported yet
@@ -33,6 +33,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
persist-credentials: false
- name: Enable Corepack
run: corepack enable
- name: Yarn cache
@@ -43,7 +45,7 @@ jobs:
- name: Install dependencies
run: "yarn install --immutable"
- name: Build Element Call
run: ${{ format('yarn run build:{0}:{1}', inputs.package, inputs.build_mode) }}
run: yarn run build:"$PACKAGE":"$BUILD_MODE"
env:
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}
SENTRY_PROJECT: ${{ secrets.SENTRY_PROJECT }}
@@ -52,6 +54,8 @@ jobs:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
VITE_APP_VERSION: ${{ inputs.vite_app_version }}
NODE_OPTIONS: "--max-old-space-size=4096"
PACKAGE: ${{ inputs.package }}
BUILD_MODE: ${{ inputs.build_mode }}
- name: Upload Artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
with:

View File

@@ -49,7 +49,9 @@ jobs:
permissions:
contents: write
packages: write
id-token: write
uses: ./.github/workflows/build-and-publish-docker.yaml
secrets: inherit
with:
artifact_run_id: ${{ github.run_id }}
docker_tags: |
@@ -69,3 +71,17 @@ jobs:
SENTRY_URL: ${{ secrets.SENTRY_URL }}
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
build_sdk_element_call:
# Use the embedded package vite build
uses: ./.github/workflows/build-element-call.yaml
with:
package: sdk
vite_app_version: ${{ github.event.release.tag_name || github.sha }}
build_mode: ${{ github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'development build') && 'development' || 'production' }}
secrets:
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}
SENTRY_PROJECT: ${{ secrets.SENTRY_PROJECT }}
SENTRY_URL: ${{ secrets.SENTRY_URL }}
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -1,8 +1,16 @@
name: PR changelog label
on:
# zizmor: ignore[dangerous-triggers]
# This is safe because we do not use actions/checkout or execute untrusted code.
# Using pull_request_target is necessary to allow status writes for PRs from forks.
pull_request_target:
types: [labeled, unlabeled, opened]
permissions:
pull-requests: read
statuses: write
jobs:
pr-changelog-label:
runs-on: ubuntu-latest

View File

@@ -14,6 +14,10 @@ on:
deployment_ref:
required: true
type: string
package:
required: true
type: string
description: Which package to deploy - 'full', 'embedded', or 'sdk'
artifact_run_id:
required: false
type: string
@@ -50,7 +54,7 @@ jobs:
with:
github-token: ${{ secrets.ELEMENT_BOT_TOKEN }}
run-id: ${{ inputs.artifact_run_id }}
name: build-output-full
name: build-output-${{ inputs.package }}
path: webapp
- name: Add redirects file
@@ -58,15 +62,23 @@ jobs:
run: curl -s https://raw.githubusercontent.com/element-hq/element-call/main/config/netlify_redirects > webapp/_redirects
- name: Add config file
run: curl -s "https://raw.githubusercontent.com/${{ inputs.pr_head_full_name }}/${{ inputs.pr_head_ref }}/config/config_netlify_preview.json" > webapp/config.json
run: |
if [ "${INPUTS_PACKAGE}" = "full" ]; then
curl -s "https://raw.githubusercontent.com/${INPUTS_PR_HEAD_FULL_NAME}/${INPUTS_PR_HEAD_REF}/config/config_netlify_preview.json" > webapp/config.json
else
curl -s "https://raw.githubusercontent.com/${INPUTS_PR_HEAD_FULL_NAME}/${INPUTS_PR_HEAD_REF}/config/config_netlify_preview_sdk.json" > webapp/config.json
fi
env:
INPUTS_PACKAGE: ${{ inputs.package }}
INPUTS_PR_HEAD_FULL_NAME: ${{ inputs.pr_head_full_name }}
INPUTS_PR_HEAD_REF: ${{ inputs.pr_head_ref }}
- name: ☁️ Deploy to Netlify
id: netlify
uses: nwtgck/actions-netlify@4cbaf4c08f1a7bfa537d6113472ef4424e4eb654 # v3.0
with:
publish-dir: webapp
deploy-message: "Deploy from GitHub Actions"
alias: pr${{ inputs.pr_number }}
alias: ${{ inputs.package == 'sdk' && format('pr{0}-sdk', inputs.pr_number) || format('pr{0}', inputs.pr_number) }}
env:
NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }}
NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }}

View File

@@ -8,6 +8,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
persist-credentials: false
- name: Enable Corepack
run: corepack enable
- name: Yarn cache

View File

@@ -1,5 +1,7 @@
name: Deploy previews for PRs
on:
# zizmor: ignore[dangerous-triggers]
# Reason: This is now restricted to internal PRs only using the 'if' condition below.
workflow_run:
workflows: ["Build"]
types:
@@ -7,7 +9,14 @@ on:
jobs:
prdetails:
if: ${{ github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request' }}
# Logic:
# 1. Build must be successful
# 2. Event must be a pull_request
# 3. Head repository must be the SAME as the base repository (No Forks!)
if: >
github.event.workflow_run.conclusion == 'success' &&
github.event.workflow_run.event == 'pull_request' &&
github.event.workflow_run.head_repository.full_name == github.repository
runs-on: ubuntu-latest
outputs:
pr_number: ${{ steps.prdetails.outputs.pr_id }}
@@ -20,7 +29,7 @@ jobs:
owner: ${{ github.event.workflow_run.head_repository.owner.login }}
branch: ${{ github.event.workflow_run.head_branch }}
netlify:
netlify-full:
needs: prdetails
permissions:
deployments: write
@@ -31,6 +40,24 @@ jobs:
pr_head_full_name: ${{ github.event.workflow_run.head_repository.full_name }}
pr_head_ref: ${{ needs.prdetails.outputs.pr_data_json && fromJSON(needs.prdetails.outputs.pr_data_json).head.ref }}
deployment_ref: ${{ needs.prdetails.outputs.pr_data_json && fromJSON(needs.prdetails.outputs.pr_data_json).head.sha || github.ref || github.head_ref }}
package: full
secrets:
ELEMENT_BOT_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }}
NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }}
netlify-sdk:
needs: prdetails
permissions:
deployments: write
uses: ./.github/workflows/deploy-to-netlify.yaml
with:
artifact_run_id: ${{ github.event.workflow_run.id || github.run_id }}
pr_number: ${{ needs.prdetails.outputs.pr_number }}
pr_head_full_name: ${{ github.event.workflow_run.head_repository.full_name }}
pr_head_ref: ${{ needs.prdetails.outputs.pr_data_json && fromJSON(needs.prdetails.outputs.pr_data_json).head.ref }}
deployment_ref: ${{ needs.prdetails.outputs.pr_data_json && fromJSON(needs.prdetails.outputs.pr_data_json).head.sha || github.ref || github.head_ref }}
package: sdk
secrets:
ELEMENT_BOT_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }}
@@ -42,7 +69,9 @@ jobs:
permissions:
contents: write
packages: write
id-token: write
uses: ./.github/workflows/build-and-publish-docker.yaml
secrets: inherit
with:
artifact_run_id: ${{ github.event.workflow_run.id || github.run_id }}
docker_tags: |

View File

@@ -22,8 +22,18 @@ jobs:
TAG: ${{ steps.tag.outputs.TAG }}
steps:
- name: Calculate VERSION
# We should only use the hard coded test value for a dry run
run: echo "VERSION=${{ github.event_name == 'release' && github.event.release.tag_name || 'v0.0.0-pre.0' }}" >> "$GITHUB_ENV"
# Safely store dynamic values in environment variables
# to prevent shell injection (template-injection)
run: |
# The logic is executed within the shell using the env variables
if [ "$EVENT_NAME" = "release" ]; then
echo "VERSION=$RELEASE_TAG" >> "$GITHUB_ENV"
else
echo "VERSION=v0.0.0-pre.0" >> "$GITHUB_ENV"
fi
env:
RELEASE_TAG: ${{ github.event.release.tag_name }}
EVENT_NAME: ${{ github.event_name }}
- id: dry_run
name: Set DRY_RUN
# We perform a dry run for all events except releases.
@@ -71,7 +81,9 @@ jobs:
contents: write # required to upload release asset
steps:
- name: Determine filename
run: echo "FILENAME_PREFIX=element-call-embedded-${{ needs.versioning.outputs.UNPREFIXED_VERSION }}" >> "$GITHUB_ENV"
run: echo "FILENAME_PREFIX=element-call-embedded-${NEEDS_VERSIONING_OUTPUTS_UNPREFIXED_VERSION}" >> "$GITHUB_ENV"
env:
NEEDS_VERSIONING_OUTPUTS_UNPREFIXED_VERSION: ${{ needs.versioning.outputs.UNPREFIXED_VERSION }}
- name: 📥 Download built element-call artifact
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
with:
@@ -80,9 +92,9 @@ jobs:
name: build-output-embedded
path: ${{ env.FILENAME_PREFIX}}
- name: Create Tarball
run: tar --numeric-owner -cvzf ${{ env.FILENAME_PREFIX }}.tar.gz ${{ env.FILENAME_PREFIX }}
run: tar --numeric-owner -cvzf ${FILENAME_PREFIX}.tar.gz ${FILENAME_PREFIX}
- name: Create Checksum
run: find ${{ env.FILENAME_PREFIX }} -type f -print0 | sort -z | xargs -0 sha256sum | tee ${{ env.FILENAME_PREFIX }}.sha256
run: find ${FILENAME_PREFIX} -type f -print0 | sort -z | xargs -0 sha256sum | tee ${FILENAME_PREFIX}.sha256
- name: Upload
if: ${{ needs.versioning.outputs.DRY_RUN == 'false' }}
uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2
@@ -104,6 +116,8 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
persist-credentials: false
- name: 📥 Download built element-call artifact
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
@@ -123,13 +137,16 @@ jobs:
- name: Publish npm
working-directory: embedded/web
run: |
npm version ${{ needs.versioning.outputs.PREFIXED_VERSION }} --no-git-tag-version
npm version ${NEEDS_VERSIONING_OUTPUTS_PREFIXED_VERSION} --no-git-tag-version
echo "ARTIFACT_VERSION=$(jq '.version' --raw-output package.json)" >> "$GITHUB_ENV"
npm publish --provenance --access public --tag ${{ needs.versioning.outputs.TAG }} ${{ needs.versioning.outputs.DRY_RUN == 'true' && '--dry-run' || '' }}
npm publish --provenance --access public --tag ${NEEDS_VERSIONING_OUTPUTS_TAG} ${{ needs.versioning.outputs.DRY_RUN == 'true' && '--dry-run' || '' }}
env:
NEEDS_VERSIONING_OUTPUTS_PREFIXED_VERSION: ${{ needs.versioning.outputs.PREFIXED_VERSION }}
NEEDS_VERSIONING_OUTPUTS_TAG: ${{ needs.versioning.outputs.TAG }}
- id: artifact_version
name: Output artifact version
run: echo "ARTIFACT_VERSION=${{env.ARTIFACT_VERSION}}" >> "$GITHUB_OUTPUT"
run: echo "ARTIFACT_VERSION=${ARTIFACT_VERSION}" >> "$GITHUB_OUTPUT"
publish_android:
needs: [build_element_call, versioning]
@@ -143,6 +160,8 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
persist-credentials: false
- name: 📥 Download built element-call artifact
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
@@ -153,7 +172,7 @@ jobs:
path: embedded/android/lib/src/main/assets/element-call
- name: ☕️ Setup Java
uses: actions/setup-java@c5195efecf7bdfc987ee8bae7a71cb8b11521c00 # v4
uses: actions/setup-java@c1e323688fd81a25caa38c78aa6df2d33d3e20d9 # v4
with:
distribution: "temurin"
java-version: "17"
@@ -161,16 +180,19 @@ jobs:
- name: Get artifact version
# Anything that is not a final release will be tagged as a snapshot
run: |
if [[ "${{ needs.versioning.outputs.TAG }}" == "latest" ]]; then
echo "ARTIFACT_VERSION=${{ needs.versioning.outputs.UNPREFIXED_VERSION }}" >> "$GITHUB_ENV"
elif [[ "${{ needs.versioning.outputs.TAG }}" == "rc" ]]; then
echo "ARTIFACT_VERSION=${{ needs.versioning.outputs.UNPREFIXED_VERSION }}" >> "$GITHUB_ENV"
if [[ "${NEEDS_VERSIONING_OUTPUTS_TAG}" == "latest" ]]; then
echo "ARTIFACT_VERSION=${NEEDS_VERSIONING_OUTPUTS_UNPREFIXED_VERSION}" >> "$GITHUB_ENV"
elif [[ "${NEEDS_VERSIONING_OUTPUTS_TAG}" == "rc" ]]; then
echo "ARTIFACT_VERSION=${NEEDS_VERSIONING_OUTPUTS_UNPREFIXED_VERSION}" >> "$GITHUB_ENV"
else
echo "ARTIFACT_VERSION=${{ needs.versioning.outputs.UNPREFIXED_VERSION }}-SNAPSHOT" >> "$GITHUB_ENV"
echo "ARTIFACT_VERSION=${NEEDS_VERSIONING_OUTPUTS_UNPREFIXED_VERSION}-SNAPSHOT" >> "$GITHUB_ENV"
fi
env:
NEEDS_VERSIONING_OUTPUTS_TAG: ${{ needs.versioning.outputs.TAG }}
NEEDS_VERSIONING_OUTPUTS_UNPREFIXED_VERSION: ${{ needs.versioning.outputs.UNPREFIXED_VERSION }}
- name: Set version string
run: sed -i "s/0.0.0/${{ env.ARTIFACT_VERSION }}/g" embedded/android/lib/src/main/kotlin/io/element/android/call/embedded/Version.kt
run: sed -i "s/0.0.0/${ARTIFACT_VERSION}/g" embedded/android/lib/src/main/kotlin/io/element/android/call/embedded/Version.kt
- name: Publish AAR
working-directory: embedded/android
@@ -184,7 +206,7 @@ jobs:
- id: artifact_version
name: Output artifact version
run: echo "ARTIFACT_VERSION=${{env.ARTIFACT_VERSION}}" >> "$GITHUB_OUTPUT"
run: echo "ARTIFACT_VERSION=${ARTIFACT_VERSION}" >> "$GITHUB_OUTPUT"
publish_ios:
needs: [build_element_call, versioning]
@@ -200,6 +222,7 @@ jobs:
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
path: element-call
persist-credentials: false
- name: 📥 Download built element-call artifact
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
@@ -215,15 +238,18 @@ jobs:
repository: element-hq/element-call-swift
path: element-call-swift
token: ${{ secrets.SWIFT_RELEASE_TOKEN }}
persist-credentials: false
- name: Copy files
run: rsync -a --delete --exclude .git element-call/embedded/ios/ element-call-swift
- name: Get artifact version
run: echo "ARTIFACT_VERSION=${{ needs.versioning.outputs.UNPREFIXED_VERSION }}" >> "$GITHUB_ENV"
run: echo "ARTIFACT_VERSION=${NEEDS_VERSIONING_OUTPUTS_UNPREFIXED_VERSION}" >> "$GITHUB_ENV"
env:
NEEDS_VERSIONING_OUTPUTS_UNPREFIXED_VERSION: ${{ needs.versioning.outputs.UNPREFIXED_VERSION }}
- name: Set version string
run: sed -i "s/0.0.0/${{ env.ARTIFACT_VERSION }}/g" element-call-swift/Sources/EmbeddedElementCall/EmbeddedElementCall.swift
run: sed -i "s/0.0.0/${ARTIFACT_VERSION}/g" element-call-swift/Sources/EmbeddedElementCall/EmbeddedElementCall.swift
- name: Test build
working-directory: element-call-swift
@@ -235,17 +261,22 @@ jobs:
git config --global user.email "ci@element.io"
git config --global user.name "Element CI"
git add -A
git commit -am "Release ${{ needs.versioning.outputs.PREFIXED_VERSION }}"
git tag -a ${{ env.ARTIFACT_VERSION }} -m "${{ github.event.release.html_url }}"
git commit -am "Release ${NEEDS_VERSIONING_OUTPUTS_PREFIXED_VERSION}"
git tag -a ${ARTIFACT_VERSION} -m "${GITHUB_EVENT_RELEASE_HTML_URL}"
env:
NEEDS_VERSIONING_OUTPUTS_PREFIXED_VERSION: ${{ needs.versioning.outputs.PREFIXED_VERSION }}
GITHUB_EVENT_RELEASE_HTML_URL: ${{ github.event.release.html_url }}
- name: Push
working-directory: element-call-swift
run: |
git push --tags ${{ needs.versioning.outputs.DRY_RUN == 'true' && '--dry-run' || '' }}
git push "https://x-access-token:${SWIFT_RELEASE_TOKEN}@github.com/element-hq/element-call-swift.git" --tags ${{ needs.versioning.outputs.DRY_RUN == 'true' && '--dry-run' || '' }}
env:
SWIFT_RELEASE_TOKEN: ${{ secrets.SWIFT_RELEASE_TOKEN }}
- id: artifact_version
name: Output artifact version
run: echo "ARTIFACT_VERSION=${{env.ARTIFACT_VERSION}}" >> "$GITHUB_OUTPUT"
run: echo "ARTIFACT_VERSION=${ARTIFACT_VERSION}" >> "$GITHUB_OUTPUT"
release_notes:
needs: [versioning, publish_npm, publish_android, publish_ios]
@@ -257,9 +288,13 @@ jobs:
steps:
- name: Log versions
run: |
echo "NPM: ${{ needs.publish_npm.outputs.ARTIFACT_VERSION }}"
echo "Android: ${{ needs.publish_android.outputs.ARTIFACT_VERSION }}"
echo "iOS: ${{ needs.publish_ios.outputs.ARTIFACT_VERSION }}"
echo "NPM: ${NEEDS_PUBLISH_NPM_OUTPUTS_ARTIFACT_VERSION}"
echo "Android: ${NEEDS_PUBLISH_ANDROID_OUTPUTS_ARTIFACT_VERSION}"
echo "iOS: ${NEEDS_PUBLISH_IOS_OUTPUTS_ARTIFACT_VERSION}"
env:
NEEDS_PUBLISH_NPM_OUTPUTS_ARTIFACT_VERSION: ${{ needs.publish_npm.outputs.ARTIFACT_VERSION }}
NEEDS_PUBLISH_ANDROID_OUTPUTS_ARTIFACT_VERSION: ${{ needs.publish_android.outputs.ARTIFACT_VERSION }}
NEEDS_PUBLISH_IOS_OUTPUTS_ARTIFACT_VERSION: ${{ needs.publish_ios.outputs.ARTIFACT_VERSION }}
- name: Add release notes
if: ${{ needs.versioning.outputs.DRY_RUN == 'false' }}
uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2

View File

@@ -38,9 +38,9 @@ jobs:
name: build-output-full
path: ${{ env.FILENAME_PREFIX }}
- name: Create Tarball
run: tar --numeric-owner --transform "s/dist/${{ env.FILENAME_PREFIX }}/" -cvzf ${{ env.FILENAME_PREFIX }}.tar.gz ${{ env.FILENAME_PREFIX }}
run: tar --numeric-owner --transform "s/dist/${FILENAME_PREFIX}/" -cvzf ${FILENAME_PREFIX}.tar.gz ${FILENAME_PREFIX}
- name: Create Checksum
run: find ${{ env.FILENAME_PREFIX }} -type f -print0 | sort -z | xargs -0 sha256sum | tee ${{ env.FILENAME_PREFIX }}.sha256
run: find ${FILENAME_PREFIX} -type f -print0 | sort -z | xargs -0 sha256sum | tee ${FILENAME_PREFIX}.sha256
- name: Upload
uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2
with:
@@ -55,7 +55,9 @@ jobs:
permissions:
contents: write
packages: write
id-token: write
uses: ./.github/workflows/build-and-publish-docker.yaml
secrets: inherit
with:
artifact_run_id: ${{ github.event.workflow_run.id || github.run_id }}
docker_tags: |

View File

@@ -10,6 +10,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
persist-credentials: false
- name: Enable Corepack
run: corepack enable
- name: Yarn cache
@@ -22,7 +24,7 @@ jobs:
- name: Vitest
run: "yarn run test:coverage"
- name: Upload to codecov
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
@@ -34,6 +36,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
persist-credentials: false
- name: Enable Corepack
run: corepack enable
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4

View File

@@ -14,6 +14,8 @@ jobs:
steps:
- name: Checkout the code
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
persist-credentials: false
- name: Enable Corepack
run: corepack enable
@@ -42,7 +44,7 @@ jobs:
- name: Create Pull Request
id: cpr
uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
uses: peter-evans/create-pull-request@22a9089034f40e5a961c8808d113e2c98fb63676 # v7.0.11
with:
token: ${{ secrets.ELEMENT_BOT_TOKEN }}
branch: actions/localazy-download

View File

@@ -15,6 +15,8 @@ jobs:
steps:
- name: Checkout the code
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
with:
persist-credentials: false
- name: Upload
uses: localazy/upload@27e6b5c0fddf4551596b42226b1c24124335d24a # v1

23
.github/workflows/zizmor.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: GitHub Actions Security Analysis with zizmor 🌈
on:
push:
branches: ["livekit", "full-mesh"]
pull_request: {}
permissions: {}
jobs:
zizmor:
name: Run zizmor 🌈
runs-on: ubuntu-latest
permissions:
security-events: write # Required for upload-sarif (used by zizmor-action) to upload SARIF files.
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Run zizmor 🌈
uses: zizmorcore/zizmor-action@71321a20a9ded102f6e9ce5718a2fcec2c4f70d8 # v0.5.2

View File

@@ -0,0 +1,16 @@
{
"default_server_config": {
"m.homeserver": {
"base_url": "https://call-unstable.ems.host",
"server_name": "call-unstable.ems.host"
}
},
"ssla": "https://static.element.io/legal/element-software-and-services-license-agreement-uk-1.pdf",
"matrix_rtc_session": {
"wait_for_key_rotation_ms": 3000,
"membership_event_expiry_ms": 180000000,
"delayed_leave_event_delay_ms": 18000,
"delayed_leave_event_restart_ms": 4000,
"network_error_retry_ms": 100
}
}

View File

@@ -47,7 +47,7 @@ services:
- ecbackend
livekit:
image: livekit/livekit-server:v1.9.4
image: livekit/livekit-server:v1.9.11
pull_policy: always
hostname: livekit-sfu
command: --dev --config /etc/livekit.yaml
@@ -70,7 +70,7 @@ services:
- ecbackend
livekit-1:
image: livekit/livekit-server:v1.9.4
image: livekit/livekit-server:v1.9.11
pull_policy: always
hostname: livekit-sfu-1
command: --dev --config /etc/livekit.yaml
@@ -94,7 +94,7 @@ services:
synapse:
hostname: homeserver
image: ghcr.io/element-hq/synapse:pr-18968-dcb7678281bc02d4551043a6338fe5b7e6aa47ce
image: ghcr.io/element-hq/synapse:latest
pull_policy: always
environment:
- SYNAPSE_CONFIG_PATH=/data/cfg/homeserver.yaml
@@ -112,7 +112,7 @@ services:
synapse-1:
hostname: homeserver-1
image: ghcr.io/element-hq/synapse:pr-18968-dcb7678281bc02d4551043a6338fe5b7e6aa47ce
image: ghcr.io/element-hq/synapse:latest
pull_policy: always
environment:
- SYNAPSE_CONFIG_PATH=/data/cfg/homeserver.yaml

View File

@@ -46,32 +46,32 @@ possible to support encryption.
These parameters are relevant to both [widget](./embedded-standalone.md) and [standalone](./embedded-standalone.md) modes:
| Name | Values | Required for widget | Required for SPA | Description |
| ------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | ---------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `intent` | `start_call`, `join_existing`, `start_call_dm`, `join_existing_dm. | No, defaults to `start_call` | No, defaults to `start_call` | The intent is a special url parameter that defines the defaults for all the other parameters. In most cases it should be enough to only set the intent to setup element-call. |
| `allowIceFallback` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | Allows use of fallback STUN servers for ICE if the user's homeserver doesnt provide any. |
| `analyticsID` (deprecated: use `posthogUserId` instead) | Posthog analytics ID | No | No | Available only with user's consent for sharing telemetry in Element Web. |
| `appPrompt` | `true` or `false` | No, defaults to `true` | No, defaults to `true` | Prompts the user to launch the native mobile app upon entering a room, applicable only on Android and iOS, and must be enabled in config. |
| `confineToRoom` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | Keeps the user confined to the current call/room. |
| `displayName` | | No | No | Display name used for auto-registration. |
| `enableE2EE` (deprecated) | `true` or `false` | No, defaults to `true` | No, defaults to `true` | Legacy flag to enable end-to-end encryption, not used in the `livekit` branch. |
| `fontScale` | A decimal number such as `0.9` | No, defaults to `1.0` | No, defaults to `1.0` | Factor by which to scale the interface's font size. |
| `fonts` | | No | No | Defines the font(s) used by the interface. Multiple font parameters can be specified: `?font=font-one&font=font-two...`. |
| `header` | `none`, `standard` or `app_bar` | No, defaults to `standard` | No, defaults to `standard` | The style of headers to show. `standard` is the default arrangement, `none` hides the header entirely, and `app_bar` produces a header with a back button like you might see in mobile apps. The callback for the back button is `window.controls.onBackButtonPressed`. |
| `hideScreensharing` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | Hides the screen-sharing button. |
| `homeserver` | | Not applicable | No | Homeserver for registering a new (guest) user, configures non-default guest user server when creating a spa link. |
| `lang` | [BCP 47](https://www.rfc-editor.org/info/bcp47) code | No | No | The language the app should use. |
| `password` | | No | No | E2EE password when using a shared secret. (For individual sender keys in embedded mode this is not required.) |
| `perParticipantE2EE` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | Enables per participant encryption with Keys exchanged over encrypted matrix room messages. |
| `controlledAudioDevices` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | Whether the [global JS controls for audio devices](./controls.md#audio-devices) should be enabled, allowing the list of audio devices to be controlled by the app hosting Element Call. |
| `roomId` | [Matrix Room ID](https://spec.matrix.org/v1.12/appendices/#room-ids) | Yes | No | Anything about what room we're pointed to should be from useRoomIdentifier which parses the path and resolves alias with respect to the default server name, however roomId is an exception as we need the room ID in embedded widget mode, and not the room alias (or even the via params because we are not trying to join it). This is also not validated, where it is in `useRoomIdentifier()`. |
| `showControls` | `true` or `false` | No, defaults to `true` | No, defaults to `true` | Displays controls like mute, screen-share, invite, and hangup buttons during a call. |
| `skipLobby` (deprecated: use `intent` instead) | `true` or `false` | No. If `intent` is explicitly `start_call` then defaults to `true`. Otherwise defaults to `false` | No, defaults to `false` | Skips the lobby to join a call directly, can be combined with preload in widget. When `true` the audio and video inputs will be muted by default. (This means there currently is no way to start without muted video if one wants to skip the lobby. Also not in widget mode.) |
| `theme` | One of: `light`, `dark`, `light-high-contrast`, `dark-high-contrast` | No, defaults to `dark` | No, defaults to `dark` | UI theme to use. |
| `viaServers` | Comma separated list of [Matrix Server Names](https://spec.matrix.org/v1.12/appendices/#server-name) | Not applicable | No | Homeserver for joining a room, non-empty value required for rooms not on the users default homeserver. |
| `sendNotificationType` | `ring` or `notification` | No | No | Will send a "ring" or "notification" `m.rtc.notification` event if the user is the first one in the call. |
| `autoLeaveWhenOthersLeft` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | Whether the app should automatically leave the call when there is no one left in the call. |
| `waitForCallPickup` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | When sending a notification, show UI that the app is awaiting an answer, play a dial tone, and (in widget mode) auto-close the widget once the notification expires. |
| Name | Values | Required for widget | Required for SPA | Description |
| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | ---------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `intent` | `start_call`, `join_existing`, `start_call_dm`, `join_existing_dm. | No, defaults to `start_call` | No, defaults to `start_call` | The intent is a special url parameter that defines the defaults for all the other parameters. In most cases it should be enough to only set the intent to setup element-call. |
| `allowIceFallback` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | Allows use of fallback STUN servers for ICE if the user's homeserver doesnt provide any. |
| `posthogUserId` | Posthog analytics ID | No | No | Available only with user's consent for sharing telemetry in Element Web. |
| `appPrompt` | `true` or `false` | No, defaults to `true` | No, defaults to `true` | Prompts the user to launch the native mobile app upon entering a room, applicable only on Android and iOS, and must be enabled in config. |
| `confineToRoom` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | Keeps the user confined to the current call/room. |
| `displayName` | | No | No | Display name used for auto-registration. |
| `enableE2EE` (deprecated) | `true` or `false` | No, defaults to `true` | No, defaults to `true` | Legacy flag to enable end-to-end encryption, not used in the `livekit` branch. |
| `fontScale` | A decimal number such as `0.9` | No, defaults to `1.0` | No, defaults to `1.0` | Factor by which to scale the interface's font size. |
| `fonts` | | No | No | Defines the font(s) used by the interface. Multiple font parameters can be specified: `?font=font-one&font=font-two...`. |
| `header` | `none`, `standard` or `app_bar` | No, defaults to `standard` | No, defaults to `standard` | The style of headers to show. `standard` is the default arrangement, `none` hides the header entirely, and `app_bar` produces a header with a back button like you might see in mobile apps. The callback for the back button is `window.controls.onBackButtonPressed`. |
| `hideScreensharing` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | Hides the screen-sharing button. |
| `homeserver` | | Not applicable | No | Homeserver for registering a new (guest) user, configures non-default guest user server when creating a spa link. |
| `lang` | [BCP 47](https://www.rfc-editor.org/info/bcp47) code | No | No | The language the app should use. |
| `password` | | No | No | E2EE password when using a shared secret. (For individual sender keys in embedded mode this is not required.) |
| `perParticipantE2EE` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | Enables per participant encryption with Keys exchanged over encrypted matrix room messages. |
| `controlledAudioDevices` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | Whether the [global JS controls for audio devices](./controls.md#audio-devices) should be enabled, allowing the list of audio devices to be controlled by the app hosting Element Call. |
| `roomId` | [Matrix Room ID](https://spec.matrix.org/v1.12/appendices/#room-ids) | Yes | No | Anything about what room we're pointed to should be from useRoomIdentifier which parses the path and resolves alias with respect to the default server name, however roomId is an exception as we need the room ID in embedded widget mode, and not the room alias (or even the via params because we are not trying to join it). This is also not validated, where it is in `useRoomIdentifier()`. |
| `showControls` | `true` or `false` | No, defaults to `true` | No, defaults to `true` | Displays controls like mute, screen-share, invite, and hangup buttons during a call. |
| `skipLobby` (deprecated: use `intent` instead) | `true` or `false` | No. If `intent` is explicitly `start_call` then defaults to `true`. Otherwise defaults to `false` | No, defaults to `false` | Skips the lobby to join a call directly, can be combined with preload in widget. When `true` the audio and video inputs will be muted by default. (This means there currently is no way to start without muted video if one wants to skip the lobby. Also not in widget mode.) |
| `theme` | One of: `light`, `dark`, `light-high-contrast`, `dark-high-contrast` | No, defaults to `dark` | No, defaults to `dark` | UI theme to use. |
| `viaServers` | Comma separated list of [Matrix Server Names](https://spec.matrix.org/v1.12/appendices/#server-name) | Not applicable | No | Homeserver for joining a room, non-empty value required for rooms not on the users default homeserver. |
| `sendNotificationType` | `ring` or `notification` | No | No | Will send a "ring" or "notification" `m.rtc.notification` event if the user is the first one in the call. |
| `autoLeaveWhenOthersLeft` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | Whether the app should automatically leave the call when there is no one left in the call. |
| `waitForCallPickup` | `true` or `false` | No, defaults to `false` | No, defaults to `false` | When sending a notification, show UI that the app is awaiting an answer, play a dial tone, and (in widget mode) auto-close the widget once the notification expires. |
### Widget-only parameters

View File

@@ -2,11 +2,11 @@
# https://docs.gradle.org/current/userguide/platforms.html#sub::toml-dependencies-format
[versions]
android_gradle_plugin = "8.13.1"
android_gradle_plugin = "8.13.2"
[libraries]
android_gradle_plugin = { module = "com.android.tools.build:gradle", version.ref = "android_gradle_plugin" }
[plugins]
android_library = { id = "com.android.library", version.ref = "android_gradle_plugin" }
maven_publish = { id = "com.vanniktech.maven.publish", version = "0.35.0" }
maven_publish = { id = "com.vanniktech.maven.publish", version = "0.36.0" }

View File

@@ -1,6 +1,6 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-8.14.3-all.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-8.14.4-all.zip
networkTimeout=10000
validateDistributionUrl=true
zipStoreBase=GRADLE_USER_HOME

View File

@@ -34,6 +34,12 @@ export default {
// then Knip will flag it as a false positive
// https://github.com/webpro-nl/knip/issues/766
"@vector-im/compound-web",
// Yarn plugins are allowed to depend on packages provided by the Yarn
// runtime. These shouldn't be listed in package.json, because plugins
// should work before Yarn even installs dependencies for the first time.
// https://yarnpkg.com/advanced/plugin-tutorial#what-does-a-plugin-look-like
"@yarnpkg/core",
"@yarnpkg/parsers",
"matrix-widget-api",
],
ignoreExportsUsedInFile: true,

View File

@@ -249,12 +249,14 @@
"version": "{{productName}} version: {{version}}",
"video_tile": {
"always_show": "Always show",
"call_ended": "Call ended",
"calling": "Calling…",
"camera_starting": "Video loading...",
"change_fit_contain": "Fit to frame",
"collapse": "Collapse",
"expand": "Expand",
"mute_for_me": "Mute for me",
"muted_for_me": "Muted for me",
"screen_share_volume": "Screen share volume",
"volume": "Volume",
"waiting_for_media": "Waiting for media..."
}

View File

@@ -13,8 +13,9 @@
"build:embedded": "yarn build:full --config vite-embedded.config.js",
"build:embedded:production": "yarn build:embedded",
"build:embedded:development": "yarn build:embedded --mode development",
"build:sdk": "yarn build:full --config vite-sdk.config.js",
"build:sdk:development": "yarn build:sdk --mode development",
"build:sdk": "yarn build:full --config vite-sdk.config.js",
"build:sdk:production": "yarn build:sdk",
"serve": "vite preview",
"prettier:check": "prettier -c .",
"prettier:format": "prettier -w .",
@@ -42,12 +43,12 @@
"@codecov/vite-plugin": "^1.3.0",
"@fontsource/inconsolata": "^5.1.0",
"@fontsource/inter": "^5.1.0",
"@formatjs/intl-durationformat": "^0.9.0",
"@formatjs/intl-durationformat": "^0.10.0",
"@formatjs/intl-segmenter": "^11.7.3",
"@livekit/components-core": "^0.12.0",
"@livekit/components-react": "^2.0.0",
"@livekit/protocol": "^1.42.2",
"@livekit/track-processors": "^0.6.0 || ^0.7.1",
"@livekit/track-processors": "^0.7.1",
"@mediapipe/tasks-vision": "^0.10.18",
"@playwright/test": "^1.57.0",
"@radix-ui/react-dialog": "^1.0.4",
@@ -78,7 +79,7 @@
"@vector-im/compound-design-tokens": "^6.0.0",
"@vector-im/compound-web": "^8.0.0",
"@vitejs/plugin-react": "^4.0.1",
"@vitest/coverage-v8": "^3.0.0",
"@vitest/coverage-v8": "^4.0.18",
"babel-plugin-transform-vite-meta-env": "^1.0.3",
"classnames": "^2.3.1",
"copy-to-clipboard": "^3.3.3",
@@ -100,11 +101,11 @@
"i18next-browser-languagedetector": "^8.0.0",
"i18next-parser": "^9.1.0",
"jsdom": "^26.0.0",
"knip": "^5.27.2",
"knip": "^5.86.0",
"livekit-client": "^2.13.0",
"lodash-es": "^4.17.21",
"loglevel": "^1.9.1",
"matrix-js-sdk": "matrix-org/matrix-js-sdk#develop",
"matrix-js-sdk": "matrix-org/matrix-js-sdk#6e3efef0c5f660df47cf00874927dec1c75cc3cf",
"matrix-widget-api": "^1.16.1",
"node-stdlib-browser": "^1.3.1",
"normalize.css": "^8.0.1",
@@ -117,7 +118,7 @@
"qrcode": "^1.5.4",
"react": "19",
"react-dom": "19",
"react-i18next": "^16.0.0 <16.1.0",
"react-i18next": "^16.0.0 <16.6.0",
"react-router-dom": "^7.0.0",
"react-use-measure": "^2.1.1",
"rxjs": "^7.8.1",
@@ -127,17 +128,22 @@
"unique-names-generator": "^4.6.0",
"uuid": "^13.0.0",
"vaul": "^1.0.0",
"vite": "^7.0.0",
"vite": "^7.3.0",
"vite-plugin-generate-file": "^0.3.0",
"vite-plugin-html": "^3.2.2",
"vite-plugin-node-stdlib-browser": "^0.2.1",
"vite-plugin-svgr": "^4.0.0",
"vitest": "^3.0.0",
"vitest": "^4.0.18",
"vitest-axe": "^1.0.0-pre.3"
},
"resolutions": {
"@livekit/components-core/rxjs": "^7.8.1",
"@livekit/track-processors/@mediapipe/tasks-vision": "^0.10.18"
"@livekit/track-processors/@mediapipe/tasks-vision": "^0.10.18",
"minimatch": "^10.2.3",
"tar": "^7.5.11",
"glob": "^10.5.0",
"qs": "^6.14.1",
"js-yaml": "^4.1.1"
},
"packageManager": "yarn@4.7.0"
}

View File

@@ -22,8 +22,8 @@ test("Start a new call then leave and show the feedback screen", async ({
await expect(page.getByTestId("lobby_joinCall")).toBeVisible();
// Check the button toolbar
// await expect(page.getByRole('button', { name: 'Mute microphone' })).toBeVisible();
// await expect(page.getByRole('button', { name: 'Stop video' })).toBeVisible();
// await expect(page.getByRole('switch', { name: 'Mute microphone' })).toBeVisible();
// await expect(page.getByRole('switch', { name: 'Stop video' })).toBeVisible();
await expect(page.getByRole("button", { name: "Settings" })).toBeVisible();
await expect(page.getByRole("button", { name: "End call" })).toBeVisible();

View File

@@ -100,8 +100,16 @@ mobileTest(
{ id: "earpiece", name: "Handset", isEarpiece: true },
{ id: "headphones", name: "Headphones" },
]);
window.controls.setAudioDevice("earpiece");
});
// Open settings to select earpiece
await guestPage.getByRole("button", { name: "Settings" }).click();
await guestPage.getByText("Handset", { exact: true }).click();
// dismiss settings
await guestPage.locator("#root").getByLabel("Settings").press("Escape");
await guestPage.pause();
await expect(
guestPage.getByRole("heading", { name: "Handset Mode" }),
).toBeVisible();

View File

@@ -49,12 +49,12 @@ test("can only interact with header and footer while reconnecting", async ({
).toBeVisible();
// Tab order should jump directly from header to footer, skipping media tiles
await page.getByRole("button", { name: "Mute microphone" }).focus();
await page.getByRole("switch", { name: "Mute microphone" }).focus();
await expect(
page.getByRole("button", { name: "Mute microphone" }),
page.getByRole("switch", { name: "Mute microphone" }),
).toBeFocused();
await page.keyboard.press("Tab");
await expect(page.getByRole("button", { name: "Stop video" })).toBeFocused();
await expect(page.getByRole("switch", { name: "Stop video" })).toBeFocused();
// Most critically, we should be able to press the hangup button
await page.getByRole("button", { name: "End call" }).click();
});

View File

@@ -55,13 +55,10 @@ widgetTest("Create and join a group call", async ({ addUser, browserName }) => {
const frame = user.page
.locator('iframe[title="Element Call"]')
.contentFrame();
// No lobby, should start with video on
// The only way to know if it is muted or not is to look at the data-kind attribute..
const videoButton = frame.getByTestId("incall_videomute");
await expect(videoButton).toBeVisible();
// video should be off by default in a voice call
await expect(videoButton).toHaveAttribute("aria-label", /^Stop video$/);
await expect(
frame.getByRole("switch", { name: "Stop video", checked: true }),
).toBeVisible();
}
// We should see 5 video tiles everywhere now
@@ -101,13 +98,15 @@ widgetTest("Create and join a group call", async ({ addUser, browserName }) => {
const florianFrame = florian.page
.locator('iframe[title="Element Call"]')
.contentFrame();
const florianMuteButton = florianFrame.getByTestId("incall_videomute");
await florianMuteButton.click();
const florianVideoButton = florianFrame.getByRole("switch", {
name: /video/,
});
await expect(florianVideoButton).toHaveAccessibleName("Stop video");
await expect(florianVideoButton).toBeChecked();
await florianVideoButton.click();
// Now the button should indicate we can start video
await expect(florianMuteButton).toHaveAttribute(
"aria-label",
/^Start video$/,
);
await expect(florianVideoButton).toHaveAccessibleName("Start video");
await expect(florianVideoButton).not.toBeChecked();
// wait a bit for the state to propagate
await valere.page.waitForTimeout(3000);

View File

@@ -0,0 +1,73 @@
/*
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { expect, test } from "@playwright/test";
import { widgetTest } from "../fixtures/widget-user.ts";
import { HOST1, TestHelpers } from "./test-helpers.ts";
widgetTest("Footer interaction in PiP", async ({ addUser, browserName }) => {
test.skip(
browserName === "firefox",
"The is test is not working on firefox CI environment. No mic/audio device inputs so cam/mic are disabled",
);
test.slow();
const valere = await addUser("Valere", HOST1);
const callRoom = "CallRoom";
await TestHelpers.createRoom("CallRoom", valere.page);
await TestHelpers.createRoom("OtherRoom", valere.page);
await TestHelpers.switchToRoomNamed(valere.page, callRoom);
// Start the call as Valere
await TestHelpers.startCallInCurrentRoom(valere.page, false);
await expect(
valere.page.locator('iframe[title="Element Call"]'),
).toBeVisible();
await TestHelpers.joinCallFromLobby(valere.page);
// wait a bit so that the PIP has rendered
await valere.page.waitForTimeout(600);
// Switch to the other room, the call should go to PIP
await TestHelpers.switchToRoomNamed(valere.page, "OtherRoom");
// We should see the PIP overlay
const iFrame = valere.page
.locator('iframe[title="Element Call"]')
.contentFrame();
{
// Check for a bug where the video had the wrong fit in PIP
const audioBtn = iFrame.getByRole("switch", { name: /microphone/ });
const videoBtn = iFrame.getByRole("switch", { name: /video/ });
await expect(
iFrame.getByRole("button", { name: "End call" }),
).toBeVisible();
await expect(audioBtn).toBeVisible();
await expect(videoBtn).toBeVisible();
await expect(audioBtn).toHaveAccessibleName("Mute microphone");
await expect(audioBtn).toBeChecked();
await expect(videoBtn).toHaveAccessibleName("Stop video");
await expect(videoBtn).toBeChecked();
await videoBtn.click();
await audioBtn.click();
// stop hovering on any of the buttons
await iFrame.getByTestId("videoTile").hover();
await expect(audioBtn).toHaveAccessibleName("Unmute microphone");
await expect(audioBtn).not.toBeChecked();
await expect(videoBtn).toHaveAccessibleName("Start video");
await expect(videoBtn).not.toBeChecked();
}
});

View File

@@ -0,0 +1,72 @@
/*
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { expect, test } from "@playwright/test";
import { widgetTest } from "../fixtures/widget-user.ts";
import { HOST1, TestHelpers } from "./test-helpers.ts";
widgetTest("Put call in PIP", async ({ addUser, browserName }) => {
test.skip(
browserName === "firefox",
"The is test is not working on firefox CI environment. No mic/audio device inputs so cam/mic are disabled",
);
test.slow();
const valere = await addUser("Valere", HOST1);
const timo = await addUser("Timo", HOST1);
const callRoom = "TeamRoom";
await TestHelpers.createRoom(callRoom, valere.page, [timo.mxId]);
await TestHelpers.createRoom("DoubleTask", valere.page);
await TestHelpers.acceptRoomInvite(callRoom, timo.page);
await TestHelpers.switchToRoomNamed(valere.page, callRoom);
// Start the call as Valere
await TestHelpers.startCallInCurrentRoom(valere.page, false);
await expect(
valere.page.locator('iframe[title="Element Call"]'),
).toBeVisible();
await TestHelpers.joinCallFromLobby(valere.page);
await TestHelpers.joinCallInCurrentRoom(timo.page);
const frame = timo.page
.locator('iframe[title="Element Call"]')
.contentFrame();
// check that the video is on
await expect(
frame.getByRole("switch", { name: "Stop video", checked: true }),
).toBeVisible();
// Switch to the other room, the call should go to PIP
await TestHelpers.switchToRoomNamed(valere.page, "DoubleTask");
// We should see the PIP overlay
await expect(valere.page.getByTestId("widget-pip-container")).toBeVisible();
{
// wait a bit so that the PIP has rendered the video
await valere.page.waitForTimeout(600);
// Check for a bug where the video had the wrong fit in PIP
const frame = valere.page
.locator('iframe[title="Element Call"]')
.contentFrame();
const videoElements = await frame.locator("video").all();
expect(videoElements.length).toBe(1);
const pipVideo = videoElements[0];
await expect(pipVideo).toHaveCSS("object-fit", "cover");
}
});

View File

@@ -152,6 +152,22 @@ export class TestHelpers {
}
}
public static async maybeDismissKeyBackupToast(page: Page): Promise<void> {
const toast = page
.locator(".mx_Toast_toast")
.getByText("Back up your chats");
try {
await expect(toast).toBeVisible({ timeout: 700 });
await page
.locator(".mx_Toast_toast")
.getByRole("button", { name: "Dismiss" })
.click();
} catch {
// toast not visible, continue as normal
}
}
public static async createRoom(
name: string,
page: Page,
@@ -167,6 +183,7 @@ export class TestHelpers {
await page.getByRole("button", { name: "Create room" }).click();
await expect(page.getByText("You created this room.")).toBeVisible();
await expect(page.getByText("Encryption enabled")).toBeVisible();
await TestHelpers.maybeDismissKeyBackupToast(page);
// Invite users if any
if (andInvite.length > 0) {
@@ -201,6 +218,7 @@ export class TestHelpers {
await expect(
page.getByRole("main").getByRole("heading", { name: roomName }),
).toBeVisible();
await TestHelpers.maybeDismissKeyBackupToast(page);
}
/**
@@ -276,4 +294,16 @@ export class TestHelpers {
});
}
}
/**
* Switches to a room in the room list by its name.
* @param page - The EW page
* @param roomName - The name of the room to switch to
*/
public static async switchToRoomNamed(
page: Page,
roomName: string,
): Promise<void> {
await page.getByRole("option", { name: `Open room ${roomName}` }).click();
}
}

View File

@@ -34,9 +34,12 @@ widgetTest(
.locator('iframe[title="Element Call"]')
.contentFrame();
// We should show a ringing overlay, let's check for that
// We should show a ringing tile, let's check for that
await expect(
brooksFrame.getByText(`Waiting for ${whistler.displayName} to join…`),
brooksFrame
.getByTestId("videoTile")
.filter({ has: brooksFrame.getByText(whistler.displayName) })
.filter({ has: brooksFrame.getByText("Calling…") }),
).toBeVisible();
await expect(whistler.page.getByText("Incoming voice call")).toBeVisible();
@@ -51,34 +54,36 @@ widgetTest(
.contentFrame();
// ASSERT the button states for whistler (the callee)
{
// The only way to know if it is muted or not is to look at the data-kind attribute..
const videoButton = whistlerFrame.getByTestId("incall_videomute");
// video should be off by default in a voice call
await expect(videoButton).toHaveAttribute("aria-label", /^Start video$/);
const audioButton = whistlerFrame.getByTestId("incall_mute");
// audio should be on for the voice call
await expect(audioButton).toHaveAttribute(
"aria-label",
/^Mute microphone$/,
);
}
// video should be off by default in a voice call
await expect(
whistlerFrame.getByRole("switch", {
name: "Start video",
checked: false,
}),
).toBeVisible();
// audio should be on for the voice call
await expect(
whistlerFrame.getByRole("switch", {
name: "Mute microphone",
checked: true,
}),
).toBeVisible();
// ASSERT the button states for brools (the caller)
{
// The only way to know if it is muted or not is to look at the data-kind attribute..
const videoButton = brooksFrame.getByTestId("incall_videomute");
// video should be off by default in a voice call
await expect(videoButton).toHaveAttribute("aria-label", /^Start video$/);
const audioButton = brooksFrame.getByTestId("incall_mute");
// audio should be on for the voice call
await expect(audioButton).toHaveAttribute(
"aria-label",
/^Mute microphone$/,
);
}
// video should be off by default in a voice call
await expect(
whistlerFrame.getByRole("switch", {
name: "Start video",
checked: false,
}),
).toBeVisible();
// audio should be on for the voice call
await expect(
whistlerFrame.getByRole("switch", {
name: "Mute microphone",
checked: true,
}),
).toBeVisible();
// In order to confirm that the call is disconnected we will check that the message composer is shown again.
// So first we need to confirm that it is hidden when in the call.
@@ -90,10 +95,7 @@ widgetTest(
).not.toBeVisible();
// ASSERT hanging up on one side ends the call for both
{
const hangupButton = brooksFrame.getByTestId("incall_leave");
await hangupButton.click();
}
await brooksFrame.getByRole("button", { name: "End call" }).click();
// The widget should be closed on both sides and the timeline should be back on screen
await expect(
@@ -125,9 +127,12 @@ widgetTest(
.locator('iframe[title="Element Call"]')
.contentFrame();
// We should show a ringing overlay, let's check for that
// We should show a ringing tile, let's check for that
await expect(
brooksFrame.getByText(`Waiting for ${whistler.displayName} to join…`),
brooksFrame
.getByTestId("videoTile")
.filter({ has: brooksFrame.getByText(whistler.displayName) })
.filter({ has: brooksFrame.getByText("Calling…") }),
).toBeVisible();
await expect(whistler.page.getByText("Incoming video call")).toBeVisible();
@@ -142,34 +147,30 @@ widgetTest(
.contentFrame();
// ASSERT the button states for whistler (the callee)
{
// The only way to know if it is muted or not is to look at the data-kind attribute..
const videoButton = whistlerFrame.getByTestId("incall_videomute");
// video should be on by default in a voice call
await expect(videoButton).toHaveAttribute("aria-label", /^Stop video$/);
const audioButton = whistlerFrame.getByTestId("incall_mute");
// audio should be on for the voice call
await expect(audioButton).toHaveAttribute(
"aria-label",
/^Mute microphone$/,
);
}
// video should be off by default in a video call
await expect(
whistlerFrame.getByRole("switch", { name: "Stop video", checked: true }),
).toBeVisible();
// audio should be on too
await expect(
whistlerFrame.getByRole("switch", {
name: "Mute microphone",
checked: true,
}),
).toBeVisible();
// ASSERT the button states for brools (the caller)
{
// The only way to know if it is muted or not is to look at the data-kind attribute..
const videoButton = brooksFrame.getByTestId("incall_videomute");
// video should be on by default in a voice call
await expect(videoButton).toHaveAttribute("aria-label", /^Stop video$/);
const audioButton = brooksFrame.getByTestId("incall_mute");
// audio should be on for the voice call
await expect(audioButton).toHaveAttribute(
"aria-label",
/^Mute microphone$/,
);
}
// video should be off by default in a video call
await expect(
whistlerFrame.getByRole("switch", { name: "Stop video", checked: true }),
).toBeVisible();
// audio should be on too
await expect(
whistlerFrame.getByRole("switch", {
name: "Mute microphone",
checked: true,
}),
).toBeVisible();
// In order to confirm that the call is disconnected we will check that the message composer is shown again.
// So first we need to confirm that it is hidden when in the call.
@@ -181,10 +182,7 @@ widgetTest(
).not.toBeVisible();
// ASSERT hanging up on one side ends the call for both
{
const hangupButton = brooksFrame.getByTestId("incall_leave");
await hangupButton.click();
}
await brooksFrame.getByRole("button", { name: "End call" }).click();
// The widget should be closed on both sides and the timeline should be back on screen
await expect(
@@ -216,9 +214,12 @@ widgetTest(
.locator('iframe[title="Element Call"]')
.contentFrame();
// We should show a ringing overlay, let's check for that
// We should show a ringing tile, let's check for that
await expect(
brooksFrame.getByText(`Waiting for ${whistler.displayName} to join…`),
brooksFrame
.getByTestId("videoTile")
.filter({ has: brooksFrame.getByText(whistler.displayName) })
.filter({ has: brooksFrame.getByText("Calling…") }),
).toBeVisible();
await expect(whistler.page.getByText("Incoming video call")).toBeVisible();

View File

@@ -1,4 +1,4 @@
# SDK mode
# SDK mode (EXPERIMENTAL)
EC can be build in sdk mode. This will result in a compiled js file that can be imported in very simple webapps.

View File

@@ -12,15 +12,12 @@ Please see LICENSE in the repository root for full details.
import { logger as rootLogger } from "matrix-js-sdk/lib/logger";
import { scan } from "rxjs";
import { widget as _widget } from "../src/widget";
import { type WidgetHelpers } from "../src/widget";
import { type LivekitRoomItem } from "../src/state/CallViewModel/CallViewModel";
export const logger = rootLogger.getChild("[MatrixRTCSdk]");
if (!_widget) throw Error("No widget. This webapp can only start as a widget");
export const widget = _widget;
export const tryMakeSticky = (): void => {
export const tryMakeSticky = (widget: WidgetHelpers): void => {
logger.info("try making sticky MatrixRTCSdk");
void widget.api
.setAlwaysOnScreen(true)

View File

@@ -6,6 +6,8 @@ Please see LICENSE in the repository root for full details.
*/
/**
* EXPERIMENTAL
*
* This file is the entrypoint for the sdk build of element call: `yarn build:sdk`
* use in widgets.
* It exposes the `createMatrixRTCSdk` which creates the `MatrixRTCSdk` interface (see below) that
@@ -30,8 +32,8 @@ import {
} from "rxjs";
import {
type CallMembership,
MatrixRTCSession,
MatrixRTCSessionEvent,
MatrixRTCSessionManager,
} from "matrix-js-sdk/lib/matrixrtc";
import {
type Room as LivekitRoom,
@@ -50,14 +52,12 @@ import { getUrlParams } from "../src/UrlParams";
import { MuteStates } from "../src/state/MuteStates";
import { MediaDevices } from "../src/state/MediaDevices";
import { E2eeType } from "../src/e2ee/e2eeType";
import { currentAndPrev, logger, TEXT_LK_TOPIC, tryMakeSticky } from "./helper";
import {
currentAndPrev,
logger,
TEXT_LK_TOPIC,
tryMakeSticky,
widget,
} from "./helper";
import { ElementWidgetActions, initializeWidget } from "../src/widget";
ElementWidgetActions,
widget as _widget,
initializeWidget,
} from "../src/widget";
import { type Connection } from "../src/state/CallViewModel/remoteMembers/Connection";
interface MatrixRTCSdk {
@@ -68,7 +68,13 @@ interface MatrixRTCSdk {
join: () => void;
/** @throws on leave errors */
leave: () => void;
data$: Observable<{ sender: string; data: string }>;
/**
* Ends the rtc sdk. This will unsubscribe any event listeners. And end the associated scope.
* No updates can be received from the rtc sdk. The sdk cannot be restarted after.
* A new sdk needs to be created via createMatrixRTCSdk.
*/
stop: () => void;
data$: Observable<{ rtcBackendIdentity: string; data: string }>;
/**
* flattened list of members
*/
@@ -79,32 +85,54 @@ interface MatrixRTCSdk {
participant: LocalParticipant | RemoteParticipant | null;
}[]
>;
/**
* flattened local members
*/
localMember$: Behavior<{
connection: Connection | null;
membership: CallMembership;
participant: LocalParticipant | null;
} | null>;
/** Use the LocalMemberConnectionState returned from `join` for a more detailed connection state */
connected$: Behavior<boolean>;
sendData?: (data: unknown) => Promise<void>;
sendRoomMessage?: (message: string) => Promise<void>;
}
export async function createMatrixRTCSdk(
application: string = "m.call",
id: string = "",
sticky: boolean = false,
): Promise<MatrixRTCSdk> {
initializeWidget();
const scope = new ObservableScope();
// widget client
initializeWidget(application, true);
const widget = _widget;
if (!widget) throw Error("No widget. This webapp can only start as a widget");
const client = await widget.client;
logger.info("client created");
const scope = new ObservableScope();
// url params
const { roomId } = getUrlParams();
if (roomId === null) throw Error("could not get roomId from url params");
const room = client.getRoom(roomId);
if (room === null) throw Error("could not get room from client");
// rtc session
const slot = { application, id };
const rtcSessionManager = new MatrixRTCSessionManager(logger, client, slot);
rtcSessionManager.start();
const rtcSession = rtcSessionManager.getRoomSession(room);
// media devices
const mediaDevices = new MediaDevices(scope);
const muteStates = new MuteStates(scope, mediaDevices, {
audioEnabled: true,
videoEnabled: true,
audioEnabled: false,
videoEnabled: false,
});
const slot = { application, id };
const rtcSession = new MatrixRTCSession(client, room, slot);
// call view model
const callViewModel = createCallViewModel$(
scope,
rtcSession,
@@ -117,8 +145,9 @@ export async function createMatrixRTCSdk(
constant({ supported: false, processor: undefined }),
);
logger.info("CallViewModelCreated");
// create data listener
const data$ = new Subject<{ sender: string; data: string }>();
const data$ = new Subject<{ rtcBackendIdentity: string; data: string }>();
const lkTextStreamHandlerFunction = async (
reader: TextStreamReader,
@@ -140,7 +169,7 @@ export async function createMatrixRTCSdk(
if (participants && participants.includes(participantInfo.identity)) {
const text = await reader.readAll();
logger.info(`Received text: ${text}`);
data$.next({ sender: participantInfo.identity, data: text });
data$.next({ rtcBackendIdentity: participantInfo.identity, data: text });
} else {
logger.warn(
"Received text from unknown participant",
@@ -230,6 +259,16 @@ export async function createMatrixRTCSdk(
}
};
const sendRoomMessage = async (message: string): Promise<void> => {
const messageString = JSON.stringify(message);
logger.info("try sending to room: ", messageString);
try {
await client.sendTextMessage(room.roomId, message);
} catch (e) {
logger.error("failed sending to room: ", messageString, e);
}
};
// after hangup gets called
const leaveSubs = callViewModel.leave$.subscribe(() => {
const scheduleWidgetCloseOnLeave = async (): Promise<void> => {
@@ -257,9 +296,6 @@ export async function createMatrixRTCSdk(
// schedule close first and then leave (scope.end)
void scheduleWidgetCloseOnLeave();
// actual hangup (ending scope will send the leave event.. its kinda odd. since you might end up closing the widget too fast)
scope.end();
});
logger.info("createMatrixRTCSdk done");
@@ -267,15 +303,40 @@ export async function createMatrixRTCSdk(
return {
join: (): void => {
// first lets try making the widget sticky
tryMakeSticky();
if (sticky) tryMakeSticky(widget);
callViewModel.join();
},
leave: (): void => {
callViewModel.hangup();
callViewModel.leave();
},
stop: (): void => {
leaveSubs.unsubscribe();
livekitRoomItemsSub.unsubscribe();
scope.end();
},
data$,
localMember$: scope.behavior(
callViewModel.localMatrixLivekitMember$.pipe(
tap((member) =>
logger.info("localMatrixLivekitMember$ next: ", member),
),
switchMap((member) => {
if (member === null) return of(null);
return combineLatest([
member.connection$,
member.membership$,
member.participant.value$,
]).pipe(
map(([connection, membership, participant]) => ({
connection,
membership,
participant,
})),
);
}),
tap((member) => logger.info("localMember$ next: ", member)),
),
),
connected$: callViewModel.connected$,
members$: scope.behavior(
callViewModel.matrixLivekitMembers$.pipe(
@@ -302,5 +363,6 @@ export async function createMatrixRTCSdk(
[],
),
sendData,
sendRoomMessage,
};
}

View File

@@ -9,14 +9,18 @@ import { afterEach, expect, test, vi } from "vitest";
import { render, screen } from "@testing-library/react";
import { type MatrixClient } from "matrix-js-sdk";
import { type FC, type PropsWithChildren } from "react";
import { type WidgetApi } from "matrix-widget-api";
import { ClientContextProvider } from "./ClientContext";
import { Avatar } from "./Avatar";
import { mockMatrixRoomMember, mockRtcMembership } from "./utils/test";
import { widget } from "./widget";
const TestComponent: FC<
PropsWithChildren<{ client: MatrixClient; supportsThumbnails?: boolean }>
> = ({ client, children, supportsThumbnails }) => {
PropsWithChildren<{
client: MatrixClient;
}>
> = ({ client, children }) => {
return (
<ClientContextProvider
value={{
@@ -24,7 +28,6 @@ const TestComponent: FC<
disconnected: false,
supportedFeatures: {
reactions: true,
thumbnails: supportsThumbnails ?? true,
},
setClient: vi.fn(),
authenticated: {
@@ -40,6 +43,12 @@ const TestComponent: FC<
);
};
vi.mock("./widget", () => ({
widget: {
api: null, // Ideally we'd only mock this in the as a widget test so the whole module is otherwise null, but just nulling `api` by default works well enough
},
}));
afterEach(() => {
vi.unstubAllGlobals();
});
@@ -73,36 +82,7 @@ test("should just render a placeholder when the user has no avatar", () => {
expect(client.mxcUrlToHttp).toBeCalledTimes(0);
});
test("should just render a placeholder when thumbnails are not supported", () => {
const client = vi.mocked<MatrixClient>({
getAccessToken: () => "my-access-token",
mxcUrlToHttp: () => vi.fn(),
} as unknown as MatrixClient);
vi.spyOn(client, "mxcUrlToHttp");
const member = mockMatrixRoomMember(
mockRtcMembership("@alice:example.org", "AAAA"),
{
getMxcAvatarUrl: () => "mxc://example.org/alice-avatar",
},
);
const displayName = "Alice";
render(
<TestComponent client={client} supportsThumbnails={false}>
<Avatar
id={member.userId}
name={displayName}
size={96}
src={member.getMxcAvatarUrl()}
/>
</TestComponent>,
);
const element = screen.getByRole("img", { name: "@alice:example.org" });
expect(element.tagName).toEqual("SPAN");
expect(client.mxcUrlToHttp).toBeCalledTimes(0);
});
test("should attempt to fetch authenticated media", async () => {
test("should attempt to fetch authenticated media from the server", async () => {
const expectedAuthUrl = "http://example.org/media/alice-avatar";
const expectedObjectURL = "my-object-url";
const accessToken = "my-access-token";
@@ -154,3 +134,47 @@ test("should attempt to fetch authenticated media", async () => {
headers: { Authorization: `Bearer ${accessToken}` },
});
});
test("should attempt to use widget API if running as a widget", async () => {
const expectedMXCUrl = "mxc://example.org/alice-avatar";
const expectedObjectURL = "my-object-url";
const theBlob = new Blob([]);
// vitest doesn't have a implementation of create/revokeObjectURL, so we need
// to delete the property. It's a bit odd, but it works.
Reflect.deleteProperty(global.window.URL, "createObjectURL");
globalThis.URL.createObjectURL = vi.fn().mockReturnValue(expectedObjectURL);
Reflect.deleteProperty(global.window.URL, "revokeObjectURL");
globalThis.URL.revokeObjectURL = vi.fn();
const client = vi.mocked<MatrixClient>({
getAccessToken: () => undefined,
} as unknown as MatrixClient);
widget!.api = { downloadFile: vi.fn() } as unknown as WidgetApi;
vi.spyOn(widget!.api, "downloadFile").mockResolvedValue({ file: theBlob });
const member = mockMatrixRoomMember(
mockRtcMembership("@alice:example.org", "AAAA"),
{
getMxcAvatarUrl: () => expectedMXCUrl,
},
);
const displayName = "Alice";
render(
<TestComponent client={client}>
<Avatar
id={member.userId}
name={displayName}
size={96}
src={member.getMxcAvatarUrl()}
/>
</TestComponent>,
);
// Fetch is asynchronous, so wait for this to resolve.
await vi.waitUntil(() =>
document.querySelector(`img[src='${expectedObjectURL}']`),
);
expect(widget!.api.downloadFile).toBeCalledWith(expectedMXCUrl);
});

View File

@@ -14,8 +14,10 @@ import {
} from "react";
import { Avatar as CompoundAvatar } from "@vector-im/compound-web";
import { type MatrixClient } from "matrix-js-sdk";
import { type WidgetApi } from "matrix-widget-api";
import { useClientState } from "./ClientContext";
import { widget } from "./widget";
export enum Size {
XS = "xs",
@@ -78,50 +80,54 @@ export const Avatar: FC<Props> = ({
const sizePx = useMemo(
() =>
Object.values(Size).includes(size as Size)
? sizes.get(size as Size)
? sizes.get(size as Size)!
: (size as number),
[size],
);
const [avatarUrl, setAvatarUrl] = useState<string | undefined>(undefined);
// In theory, a change in `clientState` or `sizePx` could run extra getAvatarFromWidgetAPI calls, but in practice they should be stable long before this code runs.
useEffect(() => {
if (clientState?.state !== "valid") {
return;
}
const { authenticated, supportedFeatures } = clientState;
const client = authenticated?.client;
if (!client || !src || !sizePx || !supportedFeatures.thumbnails) {
if (!src) {
setAvatarUrl(undefined);
return;
}
const token = client.getAccessToken();
if (!token) {
return;
}
const resolveSrc = getAvatarUrl(client, src, sizePx);
if (!resolveSrc) {
let blob: Promise<Blob>;
if (widget?.api) {
blob = getAvatarFromWidgetAPI(widget.api, src);
} else if (
clientState?.state === "valid" &&
clientState.authenticated?.client &&
sizePx
) {
blob = getAvatarFromServer(clientState.authenticated.client, src, sizePx);
} else {
setAvatarUrl(undefined);
return;
}
let objectUrl: string | undefined;
fetch(resolveSrc, {
headers: {
Authorization: `Bearer ${token}`,
},
})
.then(async (req) => req.blob())
let stale = false;
blob
.then((blob) => {
if (stale) {
return;
}
objectUrl = URL.createObjectURL(blob);
setAvatarUrl(objectUrl);
})
.catch((ex) => {
if (stale) {
return;
}
setAvatarUrl(undefined);
});
return (): void => {
stale = true;
if (objectUrl) {
URL.revokeObjectURL(objectUrl);
}
@@ -140,3 +146,44 @@ export const Avatar: FC<Props> = ({
/>
);
};
async function getAvatarFromServer(
client: MatrixClient,
src: string,
sizePx: number,
): Promise<Blob> {
const httpSrc = getAvatarUrl(client, src, sizePx);
if (!httpSrc) {
throw new Error("Failed to get http avatar URL");
}
const token = client.getAccessToken();
if (!token) {
throw new Error("Failed to get access token");
}
const request = await fetch(httpSrc, {
headers: {
Authorization: `Bearer ${token}`,
},
});
const blob = await request.blob();
return blob;
}
async function getAvatarFromWidgetAPI(
api: WidgetApi,
src: string,
): Promise<Blob> {
const response = await api.downloadFile(src);
const file = response.file;
// element-web sends a Blob, and the MSC4039 is considering changing the spec to strictly Blob, so only handling that
if (!(file instanceof Blob)) {
throw new Error("Downloaded file is not a Blob");
}
return file;
}

View File

@@ -48,7 +48,6 @@ export type ValidClientState = {
disconnected: boolean;
supportedFeatures: {
reactions: boolean;
thumbnails: boolean;
};
setClient: (client: MatrixClient, session: Session) => void;
};
@@ -249,7 +248,6 @@ export const ClientProvider: FC<Props> = ({ children }) => {
const [isDisconnected, setIsDisconnected] = useState(false);
const [supportsReactions, setSupportsReactions] = useState(false);
const [supportsThumbnails, setSupportsThumbnails] = useState(false);
const state: ClientState | undefined = useMemo(() => {
if (alreadyOpenedErr) {
@@ -275,7 +273,6 @@ export const ClientProvider: FC<Props> = ({ children }) => {
disconnected: isDisconnected,
supportedFeatures: {
reactions: supportsReactions,
thumbnails: supportsThumbnails,
},
};
}, [
@@ -286,7 +283,6 @@ export const ClientProvider: FC<Props> = ({ children }) => {
setClient,
isDisconnected,
supportsReactions,
supportsThumbnails,
]);
const onSync = useCallback(
@@ -312,8 +308,6 @@ export const ClientProvider: FC<Props> = ({ children }) => {
}
if (initClientState.widgetApi) {
// There is currently no widget API for authenticated media thumbnails.
setSupportsThumbnails(false);
const reactSend = initClientState.widgetApi.hasCapability(
"org.matrix.msc2762.send.event:m.reaction",
);
@@ -335,7 +329,6 @@ export const ClientProvider: FC<Props> = ({ children }) => {
}
} else {
setSupportsReactions(true);
setSupportsThumbnails(true);
}
return (): void => {

View File

@@ -473,8 +473,7 @@ export const computeUrlParams = (search = "", hash = ""): UrlParams => {
homeserver: !isWidget ? parser.getParam("homeserver") : null,
posthogApiHost: parser.getParam("posthogApiHost"),
posthogApiKey: parser.getParam("posthogApiKey"),
posthogUserId:
parser.getParam("posthogUserId") ?? parser.getParam("analyticsID"),
posthogUserId: parser.getParam("posthogUserId"),
rageshakeSubmitUrl: parser.getParam("rageshakeSubmitUrl"),
sentryDsn: parser.getParam("sentryDsn"),
sentryEnvironment: parser.getParam("sentryEnvironment"),

View File

@@ -14,7 +14,7 @@ exports[`AppBar > renders 1`] = `
>
<button
aria-labelledby="_r_0_"
class="_icon-button_1pz9o_8"
class="_icon-button_1215g_8"
data-kind="primary"
role="button"
style="--cpd-icon-button-size: 32px;"

View File

@@ -22,23 +22,25 @@ import {
import styles from "./Button.module.css";
interface MicButtonProps extends ComponentPropsWithoutRef<"button"> {
muted: boolean;
enabled: boolean;
size?: "sm" | "lg";
}
export const MicButton: FC<MicButtonProps> = ({ muted, ...props }) => {
export const MicButton: FC<MicButtonProps> = ({ enabled, ...props }) => {
const { t } = useTranslation();
const Icon = muted ? MicOffSolidIcon : MicOnSolidIcon;
const label = muted
? t("unmute_microphone_button_label")
: t("mute_microphone_button_label");
const Icon = enabled ? MicOnSolidIcon : MicOffSolidIcon;
const label = enabled
? t("mute_microphone_button_label")
: t("unmute_microphone_button_label");
return (
<Tooltip label={label}>
<CpdButton
iconOnly
aria-label={label}
Icon={Icon}
kind={muted ? "primary" : "secondary"}
kind={enabled ? "primary" : "secondary"}
role="switch"
aria-checked={enabled}
{...props}
/>
</Tooltip>
@@ -46,23 +48,25 @@ export const MicButton: FC<MicButtonProps> = ({ muted, ...props }) => {
};
interface VideoButtonProps extends ComponentPropsWithoutRef<"button"> {
muted: boolean;
enabled: boolean;
size?: "sm" | "lg";
}
export const VideoButton: FC<VideoButtonProps> = ({ muted, ...props }) => {
export const VideoButton: FC<VideoButtonProps> = ({ enabled, ...props }) => {
const { t } = useTranslation();
const Icon = muted ? VideoCallOffSolidIcon : VideoCallSolidIcon;
const label = muted
? t("start_video_button_label")
: t("stop_video_button_label");
const Icon = enabled ? VideoCallSolidIcon : VideoCallOffSolidIcon;
const label = enabled
? t("stop_video_button_label")
: t("start_video_button_label");
return (
<Tooltip label={label}>
<CpdButton
iconOnly
aria-label={label}
Icon={Icon}
kind={muted ? "primary" : "secondary"}
kind={enabled ? "primary" : "secondary"}
role="switch"
aria-checked={enabled}
{...props}
/>
</Tooltip>
@@ -71,6 +75,7 @@ export const VideoButton: FC<VideoButtonProps> = ({ muted, ...props }) => {
interface ShareScreenButtonProps extends ComponentPropsWithoutRef<"button"> {
enabled: boolean;
size: "sm" | "lg";
}
export const ShareScreenButton: FC<ShareScreenButtonProps> = ({
@@ -88,13 +93,19 @@ export const ShareScreenButton: FC<ShareScreenButtonProps> = ({
iconOnly
Icon={ShareScreenSolidIcon}
kind={enabled ? "primary" : "secondary"}
role="switch"
aria-checked={enabled}
{...props}
/>
</Tooltip>
);
};
export const EndCallButton: FC<ComponentPropsWithoutRef<"button">> = ({
interface EndCallButtonProps extends ComponentPropsWithoutRef<"button"> {
size?: "sm" | "lg";
}
export const EndCallButton: FC<EndCallButtonProps> = ({
className,
...props
}) => {
@@ -105,7 +116,6 @@ export const EndCallButton: FC<ComponentPropsWithoutRef<"button">> = ({
<CpdButton
className={classNames(className, styles.endCall)}
iconOnly
aria-label={t("hangup_button_label")}
Icon={EndCallIcon}
destructive
{...props}
@@ -114,9 +124,10 @@ export const EndCallButton: FC<ComponentPropsWithoutRef<"button">> = ({
);
};
export const SettingsButton: FC<ComponentPropsWithoutRef<"button">> = (
props,
) => {
interface SettingsButtonProps extends ComponentPropsWithoutRef<"button"> {
size?: "sm" | "lg";
}
export const SettingsButton: FC<SettingsButtonProps> = (props) => {
const { t } = useTranslation();
return (

View File

@@ -166,6 +166,7 @@ export function ReactionPopupMenu({
interface ReactionToggleButtonProps extends ComponentPropsWithoutRef<"button"> {
identifier: string;
vm: CallViewModel;
size?: "sm" | "lg";
}
export function ReactionToggleButton({

View File

@@ -33,12 +33,38 @@ export interface Controls {
showNativeOutputDevicePicker?: () => void;
}
/**
* Output Audio device when using the controlled audio output mode (mobile).
*/
export interface OutputDevice {
id: string;
name: string;
/**
* `forEarpiece` in an iOS only flag, that will be set on the default speaker device.
* The default speaker device will be used for the earpiece mode by
* using a stereo pan and reducing the volume significantly. (in combination this is similar to a dedicated earpiece mode)
* - on iOS this is true if output is routed to speaker.
* In that case then ElementCalls manually appends an earpiece device with id `EARPIECE_CONFIG_ID` and `{ type: "earpiece" }`
* - on Android this is unused.
*/
forEarpiece?: boolean;
/**
* Is the device the OS earpiece audio configuration?
* - on iOS always undefined
* - on Android true for the `TYPE_BUILTIN_EARPIECE`
*/
isEarpiece?: boolean;
/**
* Is the device the OS default speaker:
* - on iOS always true if output is routed to speaker. In other case iOS on declare a `dummy` id device.
* - on Android true for the `TYPE_BUILTIN_SPEAKER`
*/
isSpeaker?: boolean;
/**
* Is the device the OS default external headset (bluetooth):
* - on iOS always undefined.
* - on Android true for the `TYPE_BLUETOOTH_SCO`
*/
isExternalHeadset?: boolean;
}
@@ -47,8 +73,16 @@ export interface OutputDevice {
*/
export const setPipEnabled$ = new Subject<boolean>();
/**
* Stores the list of available controlled audio output devices.
* This is set when the native code calls `setAvailableAudioDevices` with the list of available audio output devices.
*/
export const availableOutputDevices$ = new Subject<OutputDevice[]>();
/**
* Stores the current audio output device id.
* This is set when the native code calls `setAudioDevice`
*/
export const outputDevice$ = new Subject<string>();
/**
@@ -80,16 +114,41 @@ window.controls = {
setPipEnabled$.next(false);
},
/**
* Reverse engineered:
*
* - on iOS:
* This always a list of one thing. If current route output is speaker it returns
* the single `{"id":"Speaker","name":"Speaker","forEarpiece":true,"isSpeaker":true}` Notice that EC will
* also manually add a virtual earpiece device with id `EARPIECE_CONFIG_ID` and `{ type: "earpiece" }`.
* If the route output is not speaker then it will be `{id: 'dummy', name: 'dummy'}`
*
*
* - on Android:
* This is a list of all available output audio devices. The `id` is the Android AudioDeviceInfo.getId()
* and the `name` is based the Android AudioDeviceInfo.productName (mapped to static strings for known types)
* The `isEarpiece`, `isSpeaker` and `isExternalHeadset` are set based on the Android AudioDeviceInfo.type
* matching the corresponding types for earpiece, speaker and bluetooth headset.
*/
setAvailableAudioDevices(devices: OutputDevice[]): void {
logger.info("setAvailableAudioDevices called from native:", devices);
logger.info(
"[MediaDevices controls] setAvailableAudioDevices called from native:",
devices,
);
availableOutputDevices$.next(devices);
},
setAudioDevice(id: string): void {
logger.info("setAudioDevice called from native", id);
logger.info(
"[MediaDevices controls] setAudioDevice called from native",
id,
);
outputDevice$.next(id);
},
setAudioEnabled(enabled: boolean): void {
logger.info("setAudioEnabled called from native:", enabled);
logger.info(
"[MediaDevices controls] setAudioEnabled called from native:",
enabled,
);
if (!setAudioEnabled$.observed)
throw new Error(
"Output controls are disabled. No setAudioEnabled$ observer",

View File

@@ -51,15 +51,15 @@ export const makeOneOnOneLayout: CallLayout<OneOnOneLayoutModel> = ({
return (
<div ref={ref} className={styles.layer}>
<Slot
id={model.remote.id}
model={model.remote}
id={model.spotlight.id}
model={model.spotlight}
className={styles.container}
style={{ width: tileWidth, height: tileHeight }}
>
<Slot
className={classNames(styles.slot, styles.local)}
id={model.local.id}
model={model.local}
id={model.pip.id}
model={model.pip}
onDrag={onDragLocalTile}
data-block-alignment={pipAlignmentValue.block}
data-inline-alignment={pipAlignmentValue.inline}

View File

@@ -27,7 +27,13 @@ interface Props<M, R extends HTMLElement> {
state: Parameters<Handler<"drag", EventTypes["drag"]>>[0],
) => void
> | null;
/**
* The width this tile will have once its animations have settled.
*/
targetWidth: number;
/**
* The width this tile will have once its animations have settled.
*/
targetHeight: number;
model: M;
Tile: ComponentType<TileProps<M, R>>;

View File

@@ -30,7 +30,13 @@ import {
} from "../utils/test";
import { initializeWidget } from "../widget";
initializeWidget();
export const TestAudioContextConstructor = vi.fn(() => testAudioContext);
export const TestAudioContextConstructor = vi.fn(
class {
public constructor() {
return testAudioContext;
}
},
);
const MediaDevicesProvider = MediaDevicesContext.MediaDevicesContext.Provider;

View File

@@ -67,6 +67,6 @@ Initializer.initBeforeReact()
);
})
.catch((e) => {
logger.error("Failed to initialize app", e);
logger.error(`Failed to initialize app ${e.message}`, e);
root.render(e.message);
});

View File

@@ -65,6 +65,7 @@ Please see LICENSE in the repository root for full details.
.footer.overlay.hidden {
display: grid;
opacity: 0;
pointer-events: none;
}
.footer.overlay:has(:focus-visible) {
@@ -107,22 +108,9 @@ Please see LICENSE in the repository root for full details.
}
}
@media (max-width: 370px) {
.shareScreen {
display: none;
}
@media (max-height: 400px) {
.footer {
display: none;
}
}
}
@media (max-width: 320px) {
.invite,
.raiseHand {
display: none;
@media (max-height: 800px) {
.footer {
padding-block: var(--cpd-space-8x);
}
}
@@ -132,9 +120,27 @@ Please see LICENSE in the repository root for full details.
}
}
@media (max-height: 800px) {
.footer {
padding-block: var(--cpd-space-8x);
@media (max-width: 370px) {
.shareScreen {
display: none;
}
/* PIP custom css */
@media (max-height: 400px) {
.shareScreen {
display: flex;
}
.footer {
padding-block-start: var(--cpd-space-3x);
padding-block-end: var(--cpd-space-2x);
}
}
}
@media (max-width: 320px) {
.invite,
.raiseHand {
display: none;
}
}

View File

@@ -5,12 +5,12 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { IconButton, Text, Tooltip } from "@vector-im/compound-web";
import { IconButton, Tooltip } from "@vector-im/compound-web";
import { type MatrixClient, type Room as MatrixRoom } from "matrix-js-sdk";
import {
type FC,
type PointerEvent,
type TouchEvent,
type MouseEvent as ReactMouseEvent,
type PointerEvent as ReactPointerEvent,
useCallback,
useEffect,
useMemo,
@@ -98,8 +98,6 @@ import { useAppBarHidden, useAppBarSecondaryButton } from "../AppBar.tsx";
import { useBehavior } from "../useBehavior.ts";
import { Toast } from "../Toast.tsx";
import overlayStyles from "../Overlay.module.css";
import { Avatar, Size as AvatarSize } from "../Avatar";
import waitingStyles from "./WaitingForJoin.module.css";
import { prefetchSounds } from "../soundUtils";
import { useAudioContext } from "../useAudioContext";
import ringtoneMp3 from "../sound/ringtone.mp3?url";
@@ -107,11 +105,10 @@ import ringtoneOgg from "../sound/ringtone.ogg?url";
import { useTrackProcessorObservable$ } from "../livekit/TrackProcessorContext.tsx";
import { type Layout } from "../state/layout-types.ts";
import { ObservableScope } from "../state/ObservableScope.ts";
import { useLatest } from "../useLatest.ts";
const logger = rootLogger.getChild("[InCallView]");
const maxTapDurationMs = 400;
export interface ActiveCallProps extends Omit<
InCallViewProps,
"vm" | "livekitRoom" | "connState"
@@ -226,8 +223,6 @@ export const InCallView: FC<InCallViewProps> = ({
const { showControls } = useUrlParams();
const muteAllAudio = useBehavior(muteAllAudio$);
// Call pickup state and display names are needed for waiting overlay/sounds
const callPickupState = useBehavior(vm.callPickupState$);
// Preload a waiting and decline sounds
const pickupPhaseSoundCache = useInitial(async () => {
@@ -241,6 +236,7 @@ export const InCallView: FC<InCallViewProps> = ({
latencyHint: "interactive",
muted: muteAllAudio,
});
const latestPickupPhaseAudio = useLatest(pickupPhaseAudio);
const audioEnabled = useBehavior(muteStates.audio.enabled$);
const videoEnabled = useBehavior(muteStates.video.enabled$);
@@ -259,6 +255,7 @@ export const InCallView: FC<InCallViewProps> = ({
() => void toggleRaisedHand(),
);
const ringing = useBehavior(vm.ringing$);
const audioParticipants = useBehavior(vm.livekitRoomItems$);
const participantCount = useBehavior(vm.participantCount$);
const reconnecting = useBehavior(vm.reconnecting$);
@@ -273,7 +270,6 @@ export const InCallView: FC<InCallViewProps> = ({
const audioOutputSwitcher = useBehavior(vm.audioOutputSwitcher$);
const sharingScreen = useBehavior(vm.sharingScreen$);
const ringOverlay = useBehavior(vm.ringOverlay$);
const fatalCallError = useBehavior(vm.fatalError$);
// Stop the rendering and throw for the error boundary
if (fatalCallError) {
@@ -281,93 +277,36 @@ export const InCallView: FC<InCallViewProps> = ({
throw fatalCallError;
}
// We need to set the proper timings on the animation based upon the sound length.
const ringDuration = pickupPhaseAudio?.soundDuration["waiting"] ?? 1;
useEffect((): (() => void) => {
// The CSS animation includes the delay, so we must double the length of the sound.
window.document.body.style.setProperty(
"--call-ring-duration-s",
`${ringDuration * 2}s`,
);
window.document.body.style.setProperty(
"--call-ring-delay-s",
`${ringDuration}s`,
);
// Remove properties when we unload.
return () => {
window.document.body.style.removeProperty("--call-ring-duration-s");
window.document.body.style.removeProperty("--call-ring-delay-s");
};
}, [pickupPhaseAudio?.soundDuration, ringDuration]);
// When waiting for pickup, loop a waiting sound
// While ringing, loop the ringtone
useEffect((): void | (() => void) => {
if (callPickupState !== "ringing" || !pickupPhaseAudio) return;
const endSound = pickupPhaseAudio.playSoundLooping("waiting", ringDuration);
return () => {
void endSound().catch((e) => {
logger.error("Failed to stop ringing sound", e);
});
};
}, [callPickupState, pickupPhaseAudio, ringDuration]);
const audio = latestPickupPhaseAudio.current;
if (ringing && audio) {
const endSound = audio.playSoundLooping(
"waiting",
audio.soundDuration["waiting"] ?? 1,
);
return () => {
void endSound().catch((e) => {
logger.error("Failed to stop ringing sound", e);
});
};
}
}, [ringing, latestPickupPhaseAudio]);
// Waiting UI overlay
const waitingOverlay: JSX.Element | null = useMemo(() => {
return ringOverlay ? (
<div className={classNames(overlayStyles.bg, waitingStyles.overlay)}>
<div
className={classNames(overlayStyles.content, waitingStyles.content)}
>
<div className={waitingStyles.pulse}>
<Avatar
id={ringOverlay.idForAvatar}
name={ringOverlay.name}
src={ringOverlay.avatarMxc}
size={AvatarSize.XL}
/>
</div>
<Text size="md" className={waitingStyles.text}>
{ringOverlay.text}
</Text>
</div>
</div>
) : null;
}, [ringOverlay]);
// Ideally we could detect taps by listening for click events and checking
// that the pointerType of the event is "touch", but this isn't yet supported
// in Safari: https://developer.mozilla.org/en-US/docs/Web/API/Element/click_event#browser_compatibility
// Instead we have to watch for sufficiently fast touch events.
const touchStart = useRef<number | null>(null);
const onTouchStart = useCallback(() => (touchStart.current = Date.now()), []);
const onTouchEnd = useCallback(() => {
const start = touchStart.current;
if (start !== null && Date.now() - start <= maxTapDurationMs)
vm.tapScreen();
touchStart.current = null;
}, [vm]);
const onTouchCancel = useCallback(() => (touchStart.current = null), []);
// We also need to tell the footer controls to prevent touch events from
// bubbling up, or else the footer will be dismissed before a click/change
// event can be registered on the control
const onControlsTouchEnd = useCallback(
(e: TouchEvent) => {
// Somehow applying pointer-events: none to the controls when the footer
// is hidden is not enough to stop clicks from happening as the footer
// becomes visible, so we check manually whether the footer is shown
if (showFooter) {
e.stopPropagation();
vm.tapControls();
} else {
e.preventDefault();
}
const onViewClick = useCallback(
(e: ReactMouseEvent) => {
if (
(e.nativeEvent as PointerEvent).pointerType === "touch" &&
// If an interactive element was tapped, don't count this as a tap on the screen
(e.target as Element).closest?.("button, input") === null
)
vm.tapScreen();
},
[vm, showFooter],
[vm],
);
const onPointerMove = useCallback(
(e: PointerEvent) => {
(e: ReactPointerEvent) => {
if (e.pointerType === "mouse") vm.hoverScreen();
},
[vm],
@@ -606,8 +545,8 @@ export const InCallView: FC<InCallViewProps> = ({
vm={layout.spotlight}
expanded
onToggleExpanded={null}
targetWidth={gridBounds.height}
targetHeight={gridBounds.width}
targetWidth={gridBounds.width}
targetHeight={gridBounds.height}
showIndicators={false}
focusable={!contentObscured}
aria-hidden={contentObscured}
@@ -662,20 +601,21 @@ export const InCallView: FC<InCallViewProps> = ({
const buttons: JSX.Element[] = [];
const buttonSize = layout.type === "pip" ? "sm" : "lg";
buttons.push(
<MicButton
size={buttonSize}
key="audio"
muted={!audioEnabled}
enabled={audioEnabled}
onClick={toggleAudio ?? undefined}
onTouchEnd={onControlsTouchEnd}
disabled={toggleAudio === null}
data-testid="incall_mute"
/>,
<VideoButton
size={buttonSize}
key="video"
muted={!videoEnabled}
enabled={videoEnabled}
onClick={toggleVideo ?? undefined}
onTouchEnd={onControlsTouchEnd}
disabled={toggleVideo === null}
data-testid="incall_videomute"
/>,
@@ -683,11 +623,11 @@ export const InCallView: FC<InCallViewProps> = ({
if (vm.toggleScreenSharing !== null) {
buttons.push(
<ShareScreenButton
size={buttonSize}
key="share_screen"
className={styles.shareScreen}
enabled={sharingScreen}
onClick={vm.toggleScreenSharing}
onTouchEnd={onControlsTouchEnd}
data-testid="incall_screenshare"
/>,
);
@@ -695,30 +635,30 @@ export const InCallView: FC<InCallViewProps> = ({
if (supportsReactions) {
buttons.push(
<ReactionToggleButton
size={buttonSize}
vm={vm}
key="raise_hand"
className={styles.raiseHand}
identifier={`${client.getUserId()}:${client.getDeviceId()}`}
onTouchEnd={onControlsTouchEnd}
/>,
);
}
if (layout.type !== "pip")
buttons.push(
<SettingsButton
size={buttonSize}
key="settings"
onClick={openSettings}
onTouchEnd={onControlsTouchEnd}
/>,
);
buttons.push(
<EndCallButton
size={buttonSize}
key="end_call"
onClick={function (): void {
vm.hangup();
}}
onTouchEnd={onControlsTouchEnd}
data-testid="incall_leave"
/>,
);
@@ -751,7 +691,6 @@ export const InCallView: FC<InCallViewProps> = ({
className={styles.layout}
layout={gridMode}
setLayout={setGridMode}
onTouchEnd={onControlsTouchEnd}
/>
)}
</div>
@@ -760,12 +699,13 @@ export const InCallView: FC<InCallViewProps> = ({
const allConnections = useBehavior(vm.allConnections$);
return (
// The onClick handler here exists to control the visibility of the footer,
// and the footer is also viewable by moving focus into it, so this is fine.
// eslint-disable-next-line jsx-a11y/no-static-element-interactions, jsx-a11y/click-events-have-key-events
<div
className={styles.inRoom}
ref={containerRef}
onTouchStart={onTouchStart}
onTouchEnd={onTouchEnd}
onTouchCancel={onTouchCancel}
onClick={onViewClick}
onPointerMove={onPointerMove}
onPointerOut={onPointerOut}
>
@@ -785,7 +725,6 @@ export const InCallView: FC<InCallViewProps> = ({
{reconnectingToast}
{earpieceOverlay}
<ReactionsOverlay vm={vm} />
{waitingOverlay}
{footer}
{layout.type !== "pip" && (
<>

View File

@@ -5,7 +5,7 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type ChangeEvent, type FC, type TouchEvent, useCallback } from "react";
import { type ChangeEvent, type FC, useCallback } from "react";
import { useTranslation } from "react-i18next";
import { Tooltip } from "@vector-im/compound-web";
import {
@@ -22,15 +22,9 @@ interface Props {
layout: Layout;
setLayout: (layout: Layout) => void;
className?: string;
onTouchEnd?: (e: TouchEvent) => void;
}
export const LayoutToggle: FC<Props> = ({
layout,
setLayout,
className,
onTouchEnd,
}) => {
export const LayoutToggle: FC<Props> = ({ layout, setLayout, className }) => {
const { t } = useTranslation();
const onChange = useCallback(
@@ -47,7 +41,6 @@ export const LayoutToggle: FC<Props> = ({
value="spotlight"
checked={layout === "spotlight"}
onChange={onChange}
onTouchEnd={onTouchEnd}
/>
</Tooltip>
<SpotlightIcon aria-hidden width={24} height={24} />
@@ -58,7 +51,6 @@ export const LayoutToggle: FC<Props> = ({
value="grid"
checked={layout === "grid"}
onChange={onChange}
onTouchEnd={onTouchEnd}
/>
</Tooltip>
<GridIcon aria-hidden width={24} height={24} />

View File

@@ -230,12 +230,12 @@ export const LobbyView: FC<Props> = ({
{recentsButtonInFooter && recentsButton}
<div className={inCallStyles.buttons}>
<MicButton
muted={!audioEnabled}
enabled={audioEnabled}
onClick={toggleAudio ?? undefined}
disabled={toggleAudio === null}
/>
<VideoButton
muted={!videoEnabled}
enabled={videoEnabled}
onClick={toggleVideo ?? undefined}
disabled={toggleVideo === null}
/>

View File

@@ -1,61 +0,0 @@
.overlay {
position: absolute;
inset: 0;
display: flex;
align-items: center;
justify-content: center;
pointer-events: none;
}
.content {
display: flex;
flex-direction: column;
align-items: center;
gap: 16px;
}
.pulse {
position: relative;
height: 90px;
}
.pulse::before {
content: "";
position: absolute;
inset: -12px;
border-radius: 9999px;
border: 12px solid rgba(255, 255, 255, 0.6);
animation: pulse var(--call-ring-duration-s) ease-out infinite;
animation-delay: 1s;
opacity: 0;
}
.text {
color: var(--cpd-color-text-on-solid-primary);
}
@keyframes pulse {
0% {
transform: scale(0.95);
opacity: 0.7;
transform: scale(0);
opacity: 1;
}
35% {
transform: scale(1.15);
opacity: 0.15;
}
50% {
transform: scale(1.2);
opacity: 0;
}
50.01% {
transform: scale(0);
}
85% {
transform: scale(0);
}
100% {
transform: scale(0);
}
}

View File

@@ -128,24 +128,11 @@ exports[`InCallView > rendering > renders 1`] = `
width="1em"
xmlns="http://www.w3.org/2000/svg"
>
<g
clip-path="url(#a)"
>
<path
clip-rule="evenodd"
d="M8.929 15.1a13.6 13.6 0 0 0 4.654 3.066q2.62 1.036 5.492.923h.008l.003-.004.003-.002-.034-3.124-3.52-.483-1.791 1.792-.645-.322a13.5 13.5 0 0 1-3.496-2.52 13.4 13.4 0 0 1-2.52-3.496l-.322-.644 1.792-1.792-.483-3.519-3.123-.034-.003.002-.003.004v.002a13.65 13.65 0 0 0 .932 5.492A13.4 13.4 0 0 0 8.93 15.1m3.92 4.926a15.6 15.6 0 0 1-5.334-3.511 15.4 15.4 0 0 1-3.505-5.346 15.6 15.6 0 0 1-1.069-6.274 1.93 1.93 0 0 1 .589-1.366c.366-.366.84-.589 1.386-.589h.01l3.163.035a1.96 1.96 0 0 1 1.958 1.694v.005l.487 3.545v.003c.043.297.025.605-.076.907a2 2 0 0 1-.485.773l-.762.762a11.4 11.4 0 0 0 3.206 3.54q.457.33.948.614l.762-.761a2 2 0 0 1 .774-.486c.302-.1.61-.118.907-.076l3.553.487a1.96 1.96 0 0 1 1.694 1.958l.034 3.174c0 .546-.223 1.02-.588 1.386-.361.36-.827.582-1.363.588a15.3 15.3 0 0 1-6.29-1.062"
fill-rule="evenodd"
/>
</g>
<defs>
<clippath
id="a"
>
<path
d="M0 0h24v24H0z"
/>
</clippath>
</defs>
<path
clip-rule="evenodd"
d="M8.929 15.1a13.6 13.6 0 0 0 4.654 3.066q2.62 1.036 5.492.923h.008l.003-.004.003-.002-.034-3.124-3.52-.483-1.791 1.792-.645-.322a13.5 13.5 0 0 1-3.496-2.52 13.4 13.4 0 0 1-2.52-3.496l-.322-.645 1.792-1.791-.483-3.52-3.123-.033-.003.002-.003.004v.002a13.65 13.65 0 0 0 .932 5.492A13.4 13.4 0 0 0 8.93 15.1m3.92 4.926a15.6 15.6 0 0 1-5.334-3.511 15.4 15.4 0 0 1-3.505-5.346 15.6 15.6 0 0 1-1.069-6.274 1.93 1.93 0 0 1 .589-1.366c.366-.366.84-.589 1.386-.589h.01l3.163.035a1.96 1.96 0 0 1 1.958 1.694v.005l.487 3.545v.003c.043.297.025.605-.076.907a2 2 0 0 1-.485.773l-.762.762a11.3 11.3 0 0 0 1.806 2.348 11.4 11.4 0 0 0 2.348 1.806l.762-.762a2 2 0 0 1 .774-.485c.302-.1.61-.118.907-.076l3.553.487a1.96 1.96 0 0 1 1.694 1.958l.034 3.174c0 .546-.223 1.02-.588 1.386-.36.36-.827.582-1.363.588a15.3 15.3 0 0 1-6.29-1.062"
fill-rule="evenodd"
/>
</svg>
</div>
<h2
@@ -285,14 +272,14 @@ exports[`InCallView > rendering > renders 1`] = `
class="buttons"
>
<button
aria-checked="false"
aria-disabled="true"
aria-label="Unmute microphone"
aria-labelledby="_r_8_"
class="_button_13vu4_8 _has-icon_13vu4_60 _icon-only_13vu4_53"
data-kind="primary"
data-kind="secondary"
data-size="lg"
data-testid="incall_mute"
role="button"
role="switch"
tabindex="0"
>
<svg
@@ -309,14 +296,14 @@ exports[`InCallView > rendering > renders 1`] = `
</svg>
</button>
<button
aria-checked="false"
aria-disabled="true"
aria-label="Start video"
aria-labelledby="_r_d_"
class="_button_13vu4_8 _has-icon_13vu4_60 _icon-only_13vu4_53"
data-kind="primary"
data-kind="secondary"
data-size="lg"
data-testid="incall_videomute"
role="button"
role="switch"
tabindex="0"
>
<svg
@@ -354,7 +341,6 @@ exports[`InCallView > rendering > renders 1`] = `
</svg>
</button>
<button
aria-label="End call"
aria-labelledby="_r_n_"
class="_button_13vu4_8 endCall _has-icon_13vu4_60 _icon-only_13vu4_53 _destructive_13vu4_110"
data-kind="primary"

View File

@@ -5,13 +5,16 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { describe, expect, it, vi } from "vitest";
import { render, waitFor } from "@testing-library/react";
import { type Room as LivekitRoom } from "livekit-client";
import { afterEach, describe, expect, it, type Mock, vi } from "vitest";
import { render, waitFor, screen } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import { TooltipProvider } from "@vector-im/compound-web";
import type { MatrixClient } from "matrix-js-sdk";
import type { Room as LivekitRoom } from "livekit-client";
import { DeveloperSettingsTab } from "./DeveloperSettingsTab";
import { getSFUConfigWithOpenID } from "../livekit/openIDSFU";
import { customLivekitUrl as customLivekitUrlSetting } from "./settings";
// Mock url params hook to avoid environment-dependent snapshot churn.
vi.mock("../UrlParams", () => ({
useUrlParams: (): { mocked: boolean; answer: number } => ({
@@ -20,6 +23,14 @@ vi.mock("../UrlParams", () => ({
}),
}));
// IMPORTANT: mock the same specifier used by DeveloperSettingsTab
vi.mock("../livekit/openIDSFU", () => ({
getSFUConfigWithOpenID: vi.fn().mockResolvedValue({
url: "mock-url",
jwt: "mock-jwt",
}),
}));
// Provide a minimal mock of a Livekit Room structure used by the component.
function createMockLivekitRoom(
wsUrl: string,
@@ -86,6 +97,7 @@ describe("DeveloperSettingsTab", () => {
const { container } = render(
<DeveloperSettingsTab
client={client}
roomId={"#room:example.org"}
livekitRooms={livekitRooms}
env={{ MY_MOCK_ENV: 10, ENV: "test" } as unknown as ImportMetaEnv}
/>,
@@ -99,4 +111,141 @@ describe("DeveloperSettingsTab", () => {
expect(container).toMatchSnapshot();
});
describe("custom livekit url", () => {
afterEach(() => {
customLivekitUrlSetting.setValue(null);
});
const client = {
doesServerSupportUnstableFeature: vi.fn().mockResolvedValue(true),
getCrypto: () => ({ getVersion: (): string => "x" }),
getUserId: () => "@u:hs",
getDeviceId: () => "DEVICE",
} as unknown as MatrixClient;
it("will not update custom livekit url without roomId", async () => {
const user = userEvent.setup();
render(
<TooltipProvider>
<DeveloperSettingsTab
client={client}
env={{} as unknown as ImportMetaEnv}
/>
</TooltipProvider>,
);
const input = screen.getByLabelText("Custom Livekit-url");
await user.clear(input);
await user.type(input, "wss://example.livekit.invalid");
const saveButton = screen.getByRole("button", { name: "Save" });
await user.click(saveButton);
expect(getSFUConfigWithOpenID).not.toHaveBeenCalled();
expect(customLivekitUrlSetting.getValue()).toBe(null);
});
it("will not update custom livekit url without text in input", async () => {
const user = userEvent.setup();
render(
<TooltipProvider>
<DeveloperSettingsTab
client={client}
roomId="#testRoom"
env={{} as unknown as ImportMetaEnv}
/>
</TooltipProvider>,
);
const input = screen.getByLabelText("Custom Livekit-url");
await user.clear(input);
const saveButton = screen.getByRole("button", { name: "Save" });
await user.click(saveButton);
expect(getSFUConfigWithOpenID).not.toHaveBeenCalled();
expect(customLivekitUrlSetting.getValue()).toBe(null);
});
it("will not update custom livekit url when pressing cancel", async () => {
const user = userEvent.setup();
render(
<TooltipProvider>
<DeveloperSettingsTab
client={client}
roomId="#testRoom"
env={{} as unknown as ImportMetaEnv}
/>
</TooltipProvider>,
);
const input = screen.getByLabelText("Custom Livekit-url");
await user.clear(input);
await user.type(input, "wss://example.livekit.invalid");
const cancelButton = screen.getByRole("button", {
name: "Reset overwrite",
});
await user.click(cancelButton);
expect(getSFUConfigWithOpenID).not.toHaveBeenCalled();
expect(customLivekitUrlSetting.getValue()).toBe(null);
});
it("will update custom livekit url", async () => {
const user = userEvent.setup();
render(
<TooltipProvider>
<DeveloperSettingsTab
client={client}
roomId="#testRoom"
env={{} as unknown as ImportMetaEnv}
/>
</TooltipProvider>,
);
const input = screen.getByLabelText("Custom Livekit-url");
await user.clear(input);
await user.type(input, "wss://example.livekit.valid");
const saveButton = screen.getByRole("button", { name: "Save" });
await user.click(saveButton);
expect(getSFUConfigWithOpenID).toHaveBeenCalledWith(
expect.anything(),
expect.anything(),
"wss://example.livekit.valid",
"#testRoom",
);
expect(customLivekitUrlSetting.getValue()).toBe(
"wss://example.livekit.valid",
);
});
it("will show error on invalid url", async () => {
const user = userEvent.setup();
render(
<TooltipProvider>
<DeveloperSettingsTab
client={client}
roomId="#testRoom"
env={{} as unknown as ImportMetaEnv}
/>
</TooltipProvider>,
);
const input = screen.getByLabelText("Custom Livekit-url");
await user.clear(input);
await user.type(input, "wss://example.livekit.valid");
const saveButton = screen.getByRole("button", { name: "Save" });
(getSFUConfigWithOpenID as Mock).mockImplementation(() => {
throw new Error("Invalid URL");
});
await user.click(saveButton);
expect(
screen.getByText("invalid URL (did not update)"),
).toBeInTheDocument();
expect(customLivekitUrlSetting.getValue()).toBe(null);
});
});
});

View File

@@ -22,6 +22,7 @@ import {
import { logger } from "matrix-js-sdk/lib/logger";
import {
EditInPlace,
ErrorMessage,
Root as Form,
Heading,
HelpMessage,
@@ -45,9 +46,11 @@ import {
} from "./settings";
import styles from "./DeveloperSettingsTab.module.css";
import { useUrlParams } from "../UrlParams";
import { getSFUConfigWithOpenID } from "../livekit/openIDSFU";
interface Props {
client: MatrixClient;
roomId?: string;
livekitRooms?: {
room: LivekitRoom;
url: string;
@@ -60,6 +63,7 @@ interface Props {
export const DeveloperSettingsTab: FC<Props> = ({
client,
livekitRooms,
roomId,
env,
}) => {
const { t } = useTranslation();
@@ -97,6 +101,8 @@ export const DeveloperSettingsTab: FC<Props> = ({
alwaysShowIphoneEarpieceSetting,
);
const [customLivekitUrlUpdateError, setCustomLivekitUrlUpdateError] =
useState<string | null>(null);
const [customLivekitUrl, setCustomLivekitUrl] = useSetting(
customLivekitUrlSetting,
);
@@ -234,14 +240,36 @@ export const DeveloperSettingsTab: FC<Props> = ({
savingLabel={t("developer_mode.custom_livekit_url.saving")}
cancelButtonLabel={t("developer_mode.custom_livekit_url.reset")}
onSave={useCallback(
(e: React.FormEvent<HTMLFormElement>) => {
setCustomLivekitUrl(
customLivekitUrlTextBuffer === ""
? null
: customLivekitUrlTextBuffer,
);
async (e: React.FormEvent<HTMLFormElement>): Promise<void> => {
if (
roomId === undefined ||
customLivekitUrlTextBuffer === "" ||
customLivekitUrlTextBuffer === null
) {
setCustomLivekitUrl(null);
return;
}
try {
const userId = client.getUserId();
const deviceId = client.getDeviceId();
if (userId === null || deviceId === null) {
throw new Error("Invalid user or device ID");
}
await getSFUConfigWithOpenID(
client,
{ userId, deviceId, memberId: "" },
customLivekitUrlTextBuffer,
roomId,
);
setCustomLivekitUrlUpdateError(null);
setCustomLivekitUrl(customLivekitUrlTextBuffer);
} catch {
setCustomLivekitUrlUpdateError("invalid URL (did not update)");
}
},
[setCustomLivekitUrl, customLivekitUrlTextBuffer],
[customLivekitUrlTextBuffer, setCustomLivekitUrl, client, roomId],
)}
value={customLivekitUrlTextBuffer ?? ""}
onChange={useCallback(
@@ -256,7 +284,12 @@ export const DeveloperSettingsTab: FC<Props> = ({
},
[setCustomLivekitUrl],
)}
/>
serverInvalid={customLivekitUrlUpdateError !== null}
>
{customLivekitUrlUpdateError !== null && (
<ErrorMessage>{customLivekitUrlUpdateError}</ErrorMessage>
)}
</EditInPlace>
<Heading as="h3" type="body" weight="semibold" size="lg">
{t("developer_mode.matrixRTCMode.title")}
</Heading>

View File

@@ -213,6 +213,7 @@ export const SettingsModal: FC<Props> = ({
env={import.meta.env}
client={client}
livekitRooms={livekitRooms}
roomId={roomId}
/>
),
};

View File

@@ -234,12 +234,12 @@ exports[`DeveloperSettingsTab > renders and matches snapshot 1`] = `
class="_inline-field-control_19upo_44"
>
<div
class="_container_1qhtc_10"
class="_container_1ug7n_10"
>
<input
aria-describedby="radix-_r_9_ radix-_r_b_ radix-_r_d_"
checked=""
class="_input_1qhtc_18"
class="_input_1ug7n_18"
id="radix-_r_8_"
name="_r_0_"
title=""
@@ -247,7 +247,7 @@ exports[`DeveloperSettingsTab > renders and matches snapshot 1`] = `
value="legacy"
/>
<div
class="_ui_1qhtc_19"
class="_ui_1ug7n_19"
/>
</div>
</div>
@@ -275,11 +275,11 @@ exports[`DeveloperSettingsTab > renders and matches snapshot 1`] = `
class="_inline-field-control_19upo_44"
>
<div
class="_container_1qhtc_10"
class="_container_1ug7n_10"
>
<input
aria-describedby="radix-_r_9_ radix-_r_b_ radix-_r_d_"
class="_input_1qhtc_18"
class="_input_1ug7n_18"
id="radix-_r_a_"
name="_r_0_"
title=""
@@ -287,7 +287,7 @@ exports[`DeveloperSettingsTab > renders and matches snapshot 1`] = `
value="compatibility"
/>
<div
class="_ui_1qhtc_19"
class="_ui_1ug7n_19"
/>
</div>
</div>
@@ -315,11 +315,11 @@ exports[`DeveloperSettingsTab > renders and matches snapshot 1`] = `
class="_inline-field-control_19upo_44"
>
<div
class="_container_1qhtc_10"
class="_container_1ug7n_10"
>
<input
aria-describedby="radix-_r_9_ radix-_r_b_ radix-_r_d_"
class="_input_1qhtc_18"
class="_input_1ug7n_18"
id="radix-_r_c_"
name="_r_0_"
title=""
@@ -327,7 +327,7 @@ exports[`DeveloperSettingsTab > renders and matches snapshot 1`] = `
value="matrix_2_0"
/>
<div
class="_ui_1qhtc_19"
class="_ui_1ug7n_19"
/>
</div>
</div>

View File

@@ -0,0 +1,34 @@
/*
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { expect, it } from "vitest";
import { init as initRageshake } from "./rageshake";
it("Logger should not crash if JSON.stringify fails", async () => {
// JSON.stringify can throw. We want to make sure that the logger can handle this gracefully.
await initRageshake();
const bigIntObj = { n: 1n };
const notStringifiable = {
bigIntObj,
};
// @ts-expect-error - we want to create an object that cannot be stringified
notStringifiable.foo = notStringifiable; // circular reference
// ensure this cannot be stringified
expect(() => JSON.stringify(notStringifiable)).toThrow();
expect(() =>
global.mx_rage_logger.log(
1,
"test",
"This is a test message",
notStringifiable,
),
).not.toThrow();
});

View File

@@ -75,7 +75,14 @@ class ConsoleLogger extends EventEmitter {
} else if (arg instanceof Error) {
return arg.message + (arg.stack ? `\n${arg.stack}` : "");
} else if (typeof arg === "object") {
return JSON.stringify(arg, getCircularReplacer());
try {
return JSON.stringify(arg, getCircularReplacer());
} catch {
// Stringify can fail if the object has circular references or if
// there is a bigInt.
// Did happen even with our `getCircularReplacer`. In this case, just log
return "<$ failed to serialize object $>";
}
} else {
return arg;
}

View File

@@ -34,15 +34,20 @@ export class Setting<T> {
this._value$ = new BehaviorSubject(initialValue);
this.value$ = this._value$;
this._lastUpdateReason$ = new BehaviorSubject<string | null>(null);
this.lastUpdateReason$ = this._lastUpdateReason$;
}
private readonly key: string;
private readonly _value$: BehaviorSubject<T>;
private readonly _lastUpdateReason$: BehaviorSubject<string | null>;
public readonly value$: Behavior<T>;
public readonly lastUpdateReason$: Behavior<string | null>;
public readonly setValue = (value: T): void => {
public readonly setValue = (value: T, reason?: string): void => {
this._value$.next(value);
this._lastUpdateReason$.next(reason ?? null);
localStorage.setItem(this.key, JSON.stringify(value));
};
public readonly getValue = (): T => {

View File

@@ -78,7 +78,6 @@ function renderWithMockClient(
disconnected: false,
supportedFeatures: {
reactions: true,
thumbnails: true,
},
setClient: vi.fn(),
authenticated: {

View File

@@ -0,0 +1,563 @@
/*
Copyright 2026 Element Corp.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { it, vi, expect, beforeEach, afterEach, describe } from "vitest";
import { firstValueFrom, of, Subject, take, toArray } from "rxjs";
import { type RTCCallIntent } from "matrix-js-sdk/lib/matrixrtc";
import { AndroidControlledAudioOutput } from "./AndroidControlledAudioOutput.ts";
import type { Controls, OutputDevice } from "../controls";
import { ObservableScope } from "./ObservableScope";
import { withTestScheduler } from "../utils/test";
// All the following device types are real device types that have been observed in the wild on Android devices,
// gathered from logs.
// There are no BT Speakers because they are currently filtered out by EXA (native layer)
// A device type describing the speaker system (i.e. a mono speaker or stereo speakers) built in a device.
const SPEAKER_DEVICE: OutputDevice = {
id: "3",
name: "Built-in speaker",
isEarpiece: false,
isSpeaker: true,
isExternalHeadset: false,
};
// A device type describing the attached earphone speaker.
const EARPIECE_DEVICE: OutputDevice = {
id: "2",
name: "Built-in earpiece",
isEarpiece: true,
isSpeaker: false,
isExternalHeadset: false,
};
// A device type describing a Bluetooth device typically used for telephony
const BT_HEADSET_DEVICE: OutputDevice = {
id: "2226",
name: "Bluetooth - OpenMove by Shokz",
isEarpiece: false,
isSpeaker: false,
isExternalHeadset: true,
};
// A device type describing a USB audio headset.
const USB_HEADSET_DEVICE: OutputDevice = {
id: "29440",
name: "USB headset - USB-Audio - AB13X USB Audio",
isEarpiece: false,
isSpeaker: false,
isExternalHeadset: false,
};
// A device type describing a headset, which is the combination of a headphones and microphone
const WIRED_HEADSET_DEVICE: OutputDevice = {
id: "54509",
name: "Wired headset - 23117RA68G",
isEarpiece: false,
isSpeaker: false,
isExternalHeadset: false,
};
// A device type describing a pair of wired headphones
const WIRED_HEADPHONE_DEVICE: OutputDevice = {
id: "679",
name: "Wired headphones - TB02",
isEarpiece: false,
isSpeaker: false,
isExternalHeadset: false,
};
/**
* The base device list that is always present on Android devices.
* This list is ordered by the OS, the speaker is listed before the earpiece.
*/
const BASE_DEVICE_LIST = [SPEAKER_DEVICE, EARPIECE_DEVICE];
const BT_HEADSET_BASE_DEVICE_LIST = [BT_HEADSET_DEVICE, ...BASE_DEVICE_LIST];
const WIRED_HEADSET_BASE_DEVICE_LIST = [
WIRED_HEADSET_DEVICE,
...BASE_DEVICE_LIST,
];
/**
* A full device list containing all the observed device types in the wild on Android devices.
* Ordered as they would be ordered by the OS.
*/
const FULL_DEVICE_LIST = [
BT_HEADSET_DEVICE,
USB_HEADSET_DEVICE,
WIRED_HEADSET_DEVICE,
WIRED_HEADPHONE_DEVICE,
...BASE_DEVICE_LIST,
];
let testScope: ObservableScope;
let mockControls: Controls;
beforeEach(() => {
testScope = new ObservableScope();
mockControls = {
onAudioDeviceSelect: vi.fn(),
onOutputDeviceSelect: vi.fn(),
} as unknown as Controls;
});
afterEach(() => {
testScope.end();
});
describe("Default selection", () => {
it("Default to speaker for video calls", async () => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
of(BASE_DEVICE_LIST),
testScope,
"video",
mockControls,
);
const emissions = await firstValueFrom(
controlledAudioOutput.selected$.pipe(take(1), toArray()),
);
expect(emissions).toEqual([
{ id: SPEAKER_DEVICE.id, virtualEarpiece: false },
]);
[
mockControls.onAudioDeviceSelect,
mockControls.onOutputDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(SPEAKER_DEVICE.id);
});
});
it("Default to earpiece for audio calls for base config", async () => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
of(BASE_DEVICE_LIST),
testScope,
"audio",
mockControls,
);
const emissions = await firstValueFrom(
controlledAudioOutput.selected$.pipe(take(1), toArray()),
);
expect(emissions).toEqual([
{ id: EARPIECE_DEVICE.id, virtualEarpiece: false },
]);
[
mockControls.onAudioDeviceSelect,
mockControls.onOutputDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(EARPIECE_DEVICE.id);
});
});
["audio", "video"].forEach((callIntent) => {
it(`Default to BT headset for ${callIntent} calls if present`, async () => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
of(BT_HEADSET_BASE_DEVICE_LIST),
testScope,
callIntent,
mockControls,
);
const emissions = await firstValueFrom(
controlledAudioOutput.selected$.pipe(take(1), toArray()),
);
expect(emissions).toEqual([
{ id: BT_HEADSET_DEVICE.id, virtualEarpiece: false },
]);
[
mockControls.onAudioDeviceSelect,
mockControls.onOutputDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(BT_HEADSET_DEVICE.id);
});
});
});
["audio", "video"].forEach((callIntent) => {
it(`Default to wired headset for ${callIntent} calls if present`, async () => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
of(WIRED_HEADSET_BASE_DEVICE_LIST),
testScope,
callIntent,
mockControls,
);
const emissions = await firstValueFrom(
controlledAudioOutput.selected$.pipe(take(1), toArray()),
);
expect(emissions).toEqual([
{ id: WIRED_HEADSET_DEVICE.id, virtualEarpiece: false },
]);
expect(mockControls.onAudioDeviceSelect).toHaveBeenCalledExactlyOnceWith(
WIRED_HEADSET_DEVICE.id,
);
expect(mockControls.onOutputDeviceSelect).toHaveBeenCalledExactlyOnceWith(
WIRED_HEADSET_DEVICE.id,
);
});
});
});
describe("Test mappings", () => {
it("Should map output device to correct AudioDeviceLabel", async () => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
of(FULL_DEVICE_LIST),
testScope,
undefined,
mockControls,
);
const availableDevices = await firstValueFrom(
controlledAudioOutput.available$.pipe(take(1)),
);
expect(availableDevices).toEqual(
new Map([
[BT_HEADSET_DEVICE.id, { type: "name", name: BT_HEADSET_DEVICE.name }],
[
USB_HEADSET_DEVICE.id,
{ type: "name", name: USB_HEADSET_DEVICE.name },
],
[
WIRED_HEADSET_DEVICE.id,
{ type: "name", name: WIRED_HEADSET_DEVICE.name },
],
[
WIRED_HEADPHONE_DEVICE.id,
{ type: "name", name: WIRED_HEADPHONE_DEVICE.name },
],
[SPEAKER_DEVICE.id, { type: "speaker" }],
[EARPIECE_DEVICE.id, { type: "earpiece" }],
]),
);
});
});
describe("Test select a device", () => {
it(`Switch to correct device `, () => {
withTestScheduler(({ cold, schedule, expectObservable, flush }) => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
cold("a", { a: FULL_DEVICE_LIST }),
testScope,
undefined,
mockControls,
);
schedule("-abc", {
a: () => controlledAudioOutput.select(EARPIECE_DEVICE.id),
b: () => controlledAudioOutput.select(USB_HEADSET_DEVICE.id),
c: () => controlledAudioOutput.select(SPEAKER_DEVICE.id),
});
expectObservable(controlledAudioOutput.selected$).toBe("abcd", {
// virtualEarpiece is always false on android.
// Initially the BT_HEADSET is selected.
a: { id: BT_HEADSET_DEVICE.id, virtualEarpiece: false },
b: { id: EARPIECE_DEVICE.id, virtualEarpiece: false },
c: { id: USB_HEADSET_DEVICE.id, virtualEarpiece: false },
d: { id: SPEAKER_DEVICE.id, virtualEarpiece: false },
});
flush();
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(4);
expect(mockFn).toHaveBeenNthCalledWith(1, BT_HEADSET_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(2, EARPIECE_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(3, USB_HEADSET_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(4, SPEAKER_DEVICE.id);
});
});
});
it(`manually switch then a bt headset is added`, () => {
withTestScheduler(({ cold, schedule, expectObservable, flush }) => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
cold("a--b", {
a: BASE_DEVICE_LIST,
b: BT_HEADSET_BASE_DEVICE_LIST,
}),
testScope,
"audio",
mockControls,
);
// Default was earpiece (audio call), let's switch to speaker
schedule("-a--", {
a: () => controlledAudioOutput.select(SPEAKER_DEVICE.id),
});
expectObservable(controlledAudioOutput.selected$).toBe("ab-c", {
// virtualEarpiece is always false on android.
// Initially the BT_HEADSET is selected.
a: { id: EARPIECE_DEVICE.id, virtualEarpiece: false },
b: { id: SPEAKER_DEVICE.id, virtualEarpiece: false },
c: { id: BT_HEADSET_DEVICE.id, virtualEarpiece: false },
});
flush();
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(3);
expect(mockFn).toHaveBeenNthCalledWith(1, EARPIECE_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(2, SPEAKER_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(3, BT_HEADSET_DEVICE.id);
});
});
});
it(`Go back to the previously selected after the auto-switch device goes away`, () => {
withTestScheduler(({ cold, schedule, expectObservable, flush }) => {
const controlledAudioOutput = new AndroidControlledAudioOutput(
cold("a--b-c", {
a: BASE_DEVICE_LIST,
b: BT_HEADSET_BASE_DEVICE_LIST,
c: BASE_DEVICE_LIST,
}),
testScope,
"audio",
mockControls,
);
// Default was earpiece (audio call), let's switch to speaker
schedule("-a---", {
a: () => controlledAudioOutput.select(SPEAKER_DEVICE.id),
});
expectObservable(controlledAudioOutput.selected$).toBe("ab-c-d", {
// virtualEarpiece is always false on android.
// Initially the BT_HEADSET is selected.
a: { id: EARPIECE_DEVICE.id, virtualEarpiece: false },
b: { id: SPEAKER_DEVICE.id, virtualEarpiece: false },
c: { id: BT_HEADSET_DEVICE.id, virtualEarpiece: false },
d: { id: SPEAKER_DEVICE.id, virtualEarpiece: false },
});
flush();
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(4);
expect(mockFn).toHaveBeenNthCalledWith(1, EARPIECE_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(2, SPEAKER_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(3, BT_HEADSET_DEVICE.id);
expect(mockFn).toHaveBeenNthCalledWith(4, SPEAKER_DEVICE.id);
});
});
});
});
describe("Available device changes", () => {
let availableSource$: Subject<OutputDevice[]>;
const createAudioControlledOutput = (
intent: RTCCallIntent,
): AndroidControlledAudioOutput => {
return new AndroidControlledAudioOutput(
availableSource$,
testScope,
intent,
mockControls,
);
};
beforeEach(() => {
availableSource$ = new Subject<OutputDevice[]>();
});
it("When a BT headset is added, control should switch to use it", () => {
createAudioControlledOutput("video");
// Emit the base device list, the speaker should be selected
availableSource$.next(BASE_DEVICE_LIST);
// Initially speaker would be selected
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(SPEAKER_DEVICE.id);
});
// Emit a new device list with a BT device, the control should switch to it
availableSource$.next([BT_HEADSET_DEVICE, ...BASE_DEVICE_LIST]);
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(2);
expect(mockFn).toHaveBeenLastCalledWith(BT_HEADSET_DEVICE.id);
});
});
// Android does not set `isExternalHeadset` to true for wired headphones, so we can't test this case.'
it.skip("When a wired headset is added, control should switch to use it", async () => {
const controlledAudioOutput = createAudioControlledOutput("video");
// Emit the base device list, the speaker should be selected
availableSource$.next(BASE_DEVICE_LIST);
await firstValueFrom(controlledAudioOutput.selected$.pipe(take(1)));
// Initially speaker would be selected
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(SPEAKER_DEVICE.id);
});
// Emit a new device list with a wired headset, the control should switch to it
availableSource$.next([WIRED_HEADPHONE_DEVICE, ...BASE_DEVICE_LIST]);
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(2);
expect(mockFn).toHaveBeenLastCalledWith(WIRED_HEADPHONE_DEVICE.id);
});
});
it("When the active bt headset is removed on audio call, control should switch to earpiece", () => {
createAudioControlledOutput("audio");
// Emit the BT headset device list, the BT headset should be selected
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
// Initially speaker would be selected
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(BT_HEADSET_DEVICE.id);
});
// Emit a new device list without the BT headset, the control should switch to the earpiece for
// audio calls
availableSource$.next(BASE_DEVICE_LIST);
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(2);
expect(mockFn).toHaveBeenLastCalledWith(EARPIECE_DEVICE.id);
});
});
it("When the active bt headset is removed on video call, control should switch to speaker", () => {
createAudioControlledOutput("video");
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
// Initially bt headset would be selected
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(BT_HEADSET_DEVICE.id);
});
// Emit a new device list without the BT headset, the control should switch to speaker for video call
availableSource$.next(BASE_DEVICE_LIST);
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(2);
expect(mockFn).toHaveBeenLastCalledWith(SPEAKER_DEVICE.id);
});
});
it("Do not repeatidly set the same device", () => {
createAudioControlledOutput("video");
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
// Initially bt headset would be selected
[
mockControls.onOutputDeviceSelect,
mockControls.onAudioDeviceSelect,
].forEach((mockFn) => {
expect(mockFn).toHaveBeenCalledTimes(1);
expect(mockFn).toHaveBeenCalledWith(BT_HEADSET_DEVICE.id);
});
});
});
describe("Scope management", () => {
it("Should stop emitting when scope ends", () => {
const aScope = new ObservableScope();
const controlledAudioOutput = new AndroidControlledAudioOutput(
of(BASE_DEVICE_LIST),
aScope,
undefined,
mockControls,
);
expect(mockControls.onAudioDeviceSelect).toHaveBeenCalledOnce();
aScope.end();
controlledAudioOutput.select(EARPIECE_DEVICE.id);
expect(mockControls.onAudioDeviceSelect).not.toHaveBeenCalledTimes(2);
expect(mockControls.onAudioDeviceSelect).toHaveBeenCalledOnce();
});
it("Should stop updating when scope ends", () => {
const aScope = new ObservableScope();
const availableSource$ = new Subject<OutputDevice[]>();
new AndroidControlledAudioOutput(
availableSource$,
aScope,
undefined,
mockControls,
);
availableSource$.next(BT_HEADSET_BASE_DEVICE_LIST);
expect(mockControls.onAudioDeviceSelect).toHaveBeenCalledOnce();
expect(mockControls.onAudioDeviceSelect).toHaveBeenCalledWith(
BT_HEADSET_DEVICE.id,
);
aScope.end();
availableSource$.next(BASE_DEVICE_LIST);
expect(mockControls.onAudioDeviceSelect).not.toHaveBeenCalledTimes(2);
// Should have been called only once with the initial BT_HEADSET_DEVICE.id
expect(mockControls.onAudioDeviceSelect).toHaveBeenCalledOnce();
});
});

View File

@@ -0,0 +1,360 @@
/*
Copyright 2026 Element Corp.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { logger as rootLogger } from "matrix-js-sdk/lib/logger";
import {
distinctUntilChanged,
map,
merge,
type Observable,
scan,
startWith,
Subject,
tap,
} from "rxjs";
import {
type AudioOutputDeviceLabel,
type MediaDevice,
type SelectedAudioOutputDevice,
} from "./MediaDevices.ts";
import type { ObservableScope } from "./ObservableScope.ts";
import type { RTCCallIntent } from "matrix-js-sdk/lib/matrixrtc";
import { type Controls, type OutputDevice } from "../controls.ts";
import { type Behavior } from "./Behavior.ts";
type ControllerState = {
/**
* The list of available output devices, ordered by preference order (most preferred first).
*/
devices: OutputDevice[];
/**
* Explicit user preference for the selected device.
*/
preferredDeviceId: string | undefined;
/**
* The effective selected device, always valid against available devices.
*/
selectedDeviceId: string | undefined;
};
/**
* The possible actions that can be performed on the controller,
* either by the user or by the system.
*/
type ControllerAction =
| { type: "selectDevice"; deviceId: string | undefined }
| { type: "deviceUpdated"; devices: OutputDevice[] };
/**
* The implementation of the audio output media device for Android when using the controlled audio output mode.
*
* In this mode, the hosting application (e.g. Element Mobile) is responsible for providing the list of available audio output devices.
* There are some android specific logic compared to others:
* - AndroidControlledAudioOutput is the only one responsible for selecting the best output device.
* - On android, we don't listen to the selected device from native code (control.setAudioDevice).
* - If a new device is added or removed, this controller will determine the new selected device based
* on the available devices (that is ordered by preference order) and the user's selection (if any).
*
* Given the differences in how the native code is handling the audio routing on Android compared to iOS,
* we have this separate implementation. It allows us to have proper testing and avoid side effects
* from platform specific logic breaking the other platform's implementation.
*/
export class AndroidControlledAudioOutput implements MediaDevice<
AudioOutputDeviceLabel,
SelectedAudioOutputDevice
> {
private logger = rootLogger.getChild(
"[MediaDevices AndroidControlledAudioOutput]",
);
/**
* STATE stream: the current state of the controller, including the list of available devices and the selected device.
*/
private readonly controllerState$: Behavior<ControllerState>;
/**
* @inheritdoc
*/
public readonly available$: Behavior<Map<string, AudioOutputDeviceLabel>>;
/**
* Effective selected device, always valid against available devices.
*
* On android, we don't listen to the selected device from native code (control.setAudioDevice).
* Instead, we determine the selected device ourselves based on the available devices and the user's selection (if any).
*/
public readonly selected$: Behavior<SelectedAudioOutputDevice | undefined>;
// COMMAND stream: user asks to select a device
private readonly selectDeviceCommand$ = new Subject<string | undefined>();
public select(id: string): void {
this.logger.info(`select device: ${id}`);
this.selectDeviceCommand$.next(id);
}
/**
* Creates an instance of AndroidControlledAudioOutput.
*
* @constructor
* @param controlledDevices$ - The list of available output devices coming from the hosting application, ordered by preference order (most preferred first).
* @param scope - The ObservableScope to create the Behaviors in.
* @param initialIntent - The initial call intent (e.g. "audio" or "video") that can be used to determine the default audio routing (e.g. default to earpiece for audio calls and speaker for video calls).
* @param controls - The controls provided by the hosting application to control the audio routing and notify of user actions.
*/
public constructor(
private readonly controlledDevices$: Observable<OutputDevice[]>,
private readonly scope: ObservableScope,
private initialIntent: RTCCallIntent | undefined = undefined,
controls: Controls,
) {
this.controllerState$ = this.startObservingState$();
this.selected$ = this.effectiveSelectionFromState$(this.controllerState$);
this.available$ = scope.behavior(
this.controllerState$.pipe(
map((state) => {
this.logger.info("available devices updated:", state.devices);
return new Map<string, AudioOutputDeviceLabel>(
state.devices.map((outputDevice) => {
return [outputDevice.id, mapDeviceToLabel(outputDevice)];
}),
);
}),
),
);
// Effect 1: notify host when effective selection changes
this.selected$
// It is a behavior so it has built-in distinct until change
.pipe(scope.bind())
.subscribe((device) => {
// Let the hosting application know which output device has been selected.
if (device !== undefined) {
this.logger.info("onAudioDeviceSelect called:", device);
controls.onAudioDeviceSelect?.(device.id);
// Also invoke the deprecated callback for backward compatibility
// TODO: it appears that on Android the hosting application is only using the deprecated callback (onOutputDeviceSelect)
// and not the new one (onAudioDeviceSelect), we should clean this up and only have one callback for audio device selection.
controls.onOutputDeviceSelect?.(device.id);
}
});
}
private startObservingState$(): Behavior<ControllerState> {
const initialState: ControllerState = {
devices: [],
preferredDeviceId: undefined,
selectedDeviceId: undefined,
};
// Merge the two possible inputs observable as a single
// stream of actions that will update the state of the controller.
const actions$: Observable<ControllerAction> = merge(
this.controlledDevices$.pipe(
map(
(devices) =>
({ type: "deviceUpdated", devices }) satisfies ControllerAction,
),
),
this.selectDeviceCommand$.pipe(
map(
(deviceId) =>
({ type: "selectDevice", deviceId }) satisfies ControllerAction,
),
),
);
const initialAction: ControllerAction = {
type: "deviceUpdated",
devices: [],
};
return this.scope.behavior(
actions$.pipe(
startWith(initialAction),
scan((state, action): ControllerState => {
switch (action.type) {
case "deviceUpdated": {
const chosenDevice = this.chooseEffectiveSelection({
previousDevices: state.devices,
availableDevices: action.devices,
currentSelectedId: state.selectedDeviceId,
preferredDeviceId: state.preferredDeviceId,
});
return {
...state,
devices: action.devices,
selectedDeviceId: chosenDevice,
};
}
case "selectDevice": {
const chosenDevice = this.chooseEffectiveSelection({
previousDevices: state.devices,
availableDevices: state.devices,
currentSelectedId: state.selectedDeviceId,
preferredDeviceId: action.deviceId,
});
return {
...state,
preferredDeviceId: action.deviceId,
selectedDeviceId: chosenDevice,
};
}
}
}, initialState),
),
);
}
private effectiveSelectionFromState$(
state$: Observable<ControllerState>,
): Behavior<SelectedAudioOutputDevice | undefined> {
return this.scope.behavior(
state$
.pipe(
map((state) => {
if (state.selectedDeviceId) {
return {
id: state.selectedDeviceId,
/** This is an iOS thing, always false for android*/
virtualEarpiece: false,
};
}
return undefined;
}),
distinctUntilChanged((a, b) => a?.id === b?.id),
)
.pipe(
tap((selected) => {
this.logger.debug(`selected device: ${selected?.id}`);
}),
),
);
}
private chooseEffectiveSelection(args: {
previousDevices: OutputDevice[];
availableDevices: OutputDevice[];
currentSelectedId: string | undefined;
preferredDeviceId: string | undefined;
}): string | undefined {
const {
previousDevices,
availableDevices,
currentSelectedId,
preferredDeviceId,
} = args;
this.logger.debug(`chooseEffectiveSelection with args:`, args);
// Take preferredDeviceId in priority or default to the last effective selection.
const activeSelectedDeviceId = preferredDeviceId || currentSelectedId;
const isAvailable = availableDevices.some(
(device) => device.id === activeSelectedDeviceId,
);
// If there is no current device, or it is not available anymore,
// choose the default device selection logic.
if (activeSelectedDeviceId === undefined || !isAvailable) {
this.logger.debug(
`No current device or it is not available, using default selection logic.`,
);
// use the default selection logic
return this.chooseDefaultDeviceId(availableDevices);
}
// Is there a new added device?
// If a device is added, we might want to switch to it if it's more preferred than the currently selected device.
const newDeviceWasAdded = availableDevices.some(
(device) => !previousDevices.some((d) => d.id === device.id),
);
if (newDeviceWasAdded) {
// TODO only want to check from the added device, not all devices.?
// check if the currently selected device is the most preferred one, if not switch to the most preferred one.
const mostPreferredDevice = availableDevices[0];
this.logger.debug(
`A new device was added, checking if we should switch to it.`,
mostPreferredDevice,
);
if (mostPreferredDevice.id !== activeSelectedDeviceId) {
// Given this is automatic switching, we want to be careful and only switch to a more private device
// (e.g. from speaker to a BT headset) but not switch from a more private device to a less private one
// (e.g. from a BT headset to the speaker), as that can be disruptive for the user if it happens unexpectedly.
if (mostPreferredDevice.isExternalHeadset == true) {
this.logger.info(
`The currently selected device ${mostPreferredDevice.id} is not the most preferred one, switching to the most preferred one ${activeSelectedDeviceId} instead.`,
);
// Let's switch as it is a more private device.
return mostPreferredDevice.id;
}
}
}
// no changes
return activeSelectedDeviceId;
}
/**
* The logic for the default is different based on the call type.
* For example for a voice call we want to default to the earpiece if it's available,
* but for a video call we want to default to the speaker.
* If the user is using a BT headset we want to default to that, as it's likely what they want to use for both video and voice calls.
*
* @param available the available audio output devices to choose from, keyed by their id, sorted by likelihood of it being used for communication.
*
*/
private chooseDefaultDeviceId(available: OutputDevice[]): string | undefined {
this.logger.debug(
`Android routing logic intent: ${this.initialIntent} finding best default...`,
);
if (this.initialIntent === "audio") {
const systemProposed = available[0];
// If no headset is connected, android will route to the speaker by default,
// but for a voice call we want to route to the earpiece instead,
// so override the system proposed routing in that case.
if (systemProposed?.isSpeaker == true) {
// search for the earpiece
const earpieceDevice = available.find(
(device) => device.isEarpiece == true,
);
if (earpieceDevice) {
this.logger.debug(
`Android routing: Switch to earpiece instead of speaker for voice call`,
);
return earpieceDevice.id;
} else {
this.logger.debug(
`Android routing: no earpiece found, cannot switch, use system proposed routing`,
);
return systemProposed.id;
}
} else {
this.logger.debug(
`Android routing: Use system proposed routing `,
systemProposed,
);
return systemProposed?.id;
}
} else {
// Use the system best proposed best routing.
return available[0]?.id;
}
}
}
// Utilities
function mapDeviceToLabel(device: OutputDevice): AudioOutputDeviceLabel {
const { name, isEarpiece, isSpeaker } = device;
if (isEarpiece) return { type: "earpiece" };
else if (isSpeaker) return { type: "speaker" };
else return { type: "name", name };
}

View File

@@ -0,0 +1,193 @@
/*
Copyright 2026 Element Corp.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { afterEach, beforeEach, describe, vi, it } from "vitest";
import * as ComponentsCore from "@livekit/components-core";
import { ObservableScope } from "./ObservableScope";
import { AudioOutput } from "./MediaDevices";
import { withTestScheduler } from "../utils/test";
const BT_SPEAKER = {
deviceId: "f9fc8f5f94578fe3abd89e086c1e78c08477aa564dd9e917950f0e7ebb37a6a2",
kind: "audiooutput",
label: "JBL (Bluetooth)",
groupId: "309a5c086cd8eb885a164046db6ec834c349be01d86448d02c1a5279456ff9e4",
} as unknown as MediaDeviceInfo;
const BUILT_IN_SPEAKER = {
deviceId: "acdbb8546ea6fa85ba2d861e9bcc0e71810d03bbaf6d1712c69e8d9c0c6c2e0a",
kind: "audiooutput",
label: "MacBook Speakers (Built-in)",
groupId: "08a5a3a486473aaa898eb81cda3113f3e21053fb8b84155f4e612fe3f8db5d17",
} as unknown as MediaDeviceInfo;
const BT_HEADSET = {
deviceId: "ff8e6edb4ebb512b2b421335bfd14994a5b4c7192b3e84a8696863d83cf46d12",
kind: "audiooutput",
label: "OpenMove (Bluetooth)",
groupId: "c2893c2438c44248368e0533300245c402764991506f42cd73818dc8c3ee9c88",
} as unknown as MediaDeviceInfo;
const AMAC_DEVICE_LIST = [BT_SPEAKER, BUILT_IN_SPEAKER];
const AMAC_DEVICE_LIST_WITH_DEFAULT = [
asDefault(BUILT_IN_SPEAKER),
...AMAC_DEVICE_LIST,
];
const AMAC_HS_DEVICE_LIST = [
asDefault(BT_HEADSET),
BT_SPEAKER,
BT_HEADSET,
BUILT_IN_SPEAKER,
];
const LAPTOP_SPEAKER = {
deviceId: "EcUxTMu8He2wz+3Y8m/u0fy6M92pUk=",
kind: "audiooutput",
label: "Raptor AVS Speaker",
groupId: "kSrdanhpEDLg3vN8z6Z9MJ1EdanB8zI+Q1dxA=",
} as unknown as MediaDeviceInfo;
const MONITOR_SPEAKER = {
deviceId: "gBryZdAdC8I/rrJpr9r6R+rZzKkoIK5cpU=",
kind: "audiooutput",
label: "Raptor AVS HDMI / DisplayPort 1 Output",
groupId: "kSrdanhpEDLg3vN8z6Z9MJ1EdanB8zI+Q1dxA=",
} as unknown as MediaDeviceInfo;
const DEVICE_LIST_B = [LAPTOP_SPEAKER, MONITOR_SPEAKER];
// On chrome, there is an additional synthetic device called "Default - <device name>",
// it represents what the OS default is now.
function asDefault(device: MediaDeviceInfo): MediaDeviceInfo {
return {
...device,
deviceId: "default",
label: `Default - ${device.label}`,
};
}
// When the authorization is not yet granted, every device is still listed
// but only with empty/blank labels and ids.
// This is a transition state.
function toBlankDevice(device: MediaDeviceInfo): MediaDeviceInfo {
return {
...device,
deviceId: "",
label: "",
groupId: "",
};
}
vi.mock("@livekit/components-core", () => ({
createMediaDeviceObserver: vi.fn(),
}));
describe("AudioOutput Tests", () => {
let testScope: ObservableScope;
beforeEach(() => {
testScope = new ObservableScope();
});
afterEach(() => {
testScope.end();
});
it("should select the default audio output device", () => {
// In a real life setup there would be first a blanked list
// then the real one.
withTestScheduler(({ behavior, cold, expectObservable }) => {
vi.mocked(ComponentsCore.createMediaDeviceObserver).mockReturnValue(
cold("ab", {
// In a real life setup there would be first a blanked list
// then the real one.
a: AMAC_DEVICE_LIST_WITH_DEFAULT.map(toBlankDevice),
b: AMAC_DEVICE_LIST_WITH_DEFAULT,
}),
);
const audioOutput = new AudioOutput(
behavior("a", { a: true }),
testScope,
);
expectObservable(audioOutput.selected$).toBe("ab", {
a: undefined,
b: { id: "default", virtualEarpiece: false },
});
});
});
it("Select the correct device when requested", () => {
// In a real life setup there would be first a blanked list
// then the real one.
withTestScheduler(({ behavior, cold, schedule, expectObservable }) => {
vi.mocked(ComponentsCore.createMediaDeviceObserver).mockReturnValue(
cold("ab", {
// In a real life setup there would be first a blanked list
// then the real one.
a: DEVICE_LIST_B.map(toBlankDevice),
b: DEVICE_LIST_B,
}),
);
const audioOutput = new AudioOutput(
behavior("a", { a: true }),
testScope,
);
schedule("--abc", {
a: () => audioOutput.select(MONITOR_SPEAKER.deviceId),
b: () => audioOutput.select(LAPTOP_SPEAKER.deviceId),
c: () => audioOutput.select(MONITOR_SPEAKER.deviceId),
});
expectObservable(audioOutput.selected$).toBe("abcde", {
a: undefined,
b: { id: LAPTOP_SPEAKER.deviceId, virtualEarpiece: false },
c: { id: MONITOR_SPEAKER.deviceId, virtualEarpiece: false },
d: { id: LAPTOP_SPEAKER.deviceId, virtualEarpiece: false },
e: { id: MONITOR_SPEAKER.deviceId, virtualEarpiece: false },
});
});
});
it("Test mappings", () => {
// In a real life setup there would be first a blanked list
// then the real one.
withTestScheduler(({ behavior, cold, schedule, expectObservable }) => {
vi.mocked(ComponentsCore.createMediaDeviceObserver).mockReturnValue(
cold("a", {
// In a real life setup there would be first a blanked list
// then the real one.
a: AMAC_HS_DEVICE_LIST,
}),
);
const audioOutput = new AudioOutput(
behavior("a", { a: true }),
testScope,
);
const expectedMappings = new Map([
[`default`, { type: "name", name: asDefault(BT_HEADSET).label }],
[BT_SPEAKER.deviceId, { type: "name", name: BT_SPEAKER.label }],
[BT_HEADSET.deviceId, { type: "name", name: BT_HEADSET.label }],
[
BUILT_IN_SPEAKER.deviceId,
{ type: "name", name: BUILT_IN_SPEAKER.label },
],
]);
expectObservable(audioOutput.available$).toBe("a", {
a: expectedMappings,
});
});
});
});

View File

@@ -89,7 +89,6 @@ export interface Props {
* `callPickupState$` The current call pickup state of the call.
* - "unknown": The client has not yet sent the notification event. We don't know if it will because it first needs to send its own membership.
* Then we can conclude if we were the first one to join or not.
* This may also be set if we are disconnected.
* - "ringing": The call is ringing on other devices in this room (This client should give audiovisual feedback that this is happening).
* - "timeout": No-one picked up in the defined time this call should be ringing on others devices.
* The call failed. If desired this can be used as a trigger to exit the call.
@@ -131,15 +130,9 @@ export function createCallNotificationLifecycle$({
) as Behavior<Epoch<boolean>>;
/**
* Whenever the RTC session tells us that it intends to ring the remote
* participant's devices, this emits an Observable tracking the current state of
* that ringing process.
* The state of the current ringing attempt, if the RTC session is indeed
* ringing the remote participant's devices. Otherwise `null`.
*/
// This is a behavior since we need to store the latest state for when we subscribe to this after `didSendCallNotification$`
// has already emitted but we still need the latest observable with a timeout timer that only gets created on after receiving `notificationEvent`.
// A behavior will emit the latest observable with the running timer to new subscribers.
// see also: callPickupState$ and in particular the line: `return this.ring$.pipe(mergeAll());` here we otherwise might get an EMPTY observable if
// `ring$` would not be a behavior.
const remoteRingState$: Behavior<"ringing" | "timeout" | "decline" | null> =
scope.behavior(
sentCallNotification$.pipe(

View File

@@ -46,9 +46,11 @@ import {
} from "../../utils/test.ts";
import { E2eeType } from "../../e2ee/e2eeType.ts";
import {
alice,
aliceId,
aliceParticipant,
aliceRtcMember,
aliceUserId,
bobId,
bobRtcMember,
local,
@@ -140,8 +142,8 @@ export interface SpotlightExpandedLayoutSummary {
export interface OneOnOneLayoutSummary {
type: "one-on-one";
local: string;
remote: string;
spotlight: string;
pip: string;
}
export interface PipLayoutSummary {
@@ -194,11 +196,11 @@ function summarizeLayout$(l$: Observable<Layout>): Observable<LayoutSummary> {
);
case "one-on-one":
return combineLatest(
[l.local.media$, l.remote.media$],
(local, remote) => ({
[l.spotlight.media$, l.pip.media$],
(spotlight, pip) => ({
type: l.type,
local: local.id,
remote: remote.id,
spotlight: spotlight.id,
pip: pip.id,
}),
);
case "pip":
@@ -537,8 +539,8 @@ describe.each([
b: {
// In a larger window, expect the normal one-on-one layout
type: "one-on-one",
local: `${localId}:0`,
remote: `${aliceId}:0`,
pip: `${localId}:0`,
spotlight: `${aliceId}:0`,
},
c: {
// In a PiP-sized window, we of course expect a PiP layout
@@ -840,8 +842,8 @@ describe.each([
},
b: {
type: "one-on-one",
local: `${localId}:0`,
remote: `${aliceId}:0`,
pip: `${localId}:0`,
spotlight: `${aliceId}:0`,
},
c: {
type: "grid",
@@ -883,8 +885,8 @@ describe.each([
},
b: {
type: "one-on-one",
local: `${localId}:0`,
remote: `${aliceId}:0`,
pip: `${localId}:0`,
spotlight: `${aliceId}:0`,
},
c: {
type: "grid",
@@ -893,8 +895,8 @@ describe.each([
},
d: {
type: "one-on-one",
local: `${localId}:0`,
remote: `${daveId}:0`,
pip: `${localId}:0`,
spotlight: `${daveId}:0`,
},
},
);
@@ -1087,83 +1089,81 @@ describe.each([
});
});
describe("waitForCallPickup$", () => {
it.skip("regression test: does stop ringing in case livekitConnectionState$ emits after didSendCallNotification$ has already emitted", () => {
withTestScheduler(({ schedule, expectObservable, behavior }) => {
withCallViewModel(
{
livekitConnectionState$: behavior("d 9ms c", {
d: ConnectionState.Disconnected,
c: ConnectionState.Connected,
}),
},
(vm, rtcSession) => {
// Fire a call notification IMMEDIATELY (its important for this test, that this happens before the livekitConnectionState$ emits)
schedule("n", {
n: () => {
rtcSession.emit(
MatrixRTCSessionEvent.DidSendCallNotification,
mockRingEvent("$notif1", 30),
);
},
});
test("recipient has placeholder tile while ringing or timed out", () => {
withTestScheduler(({ schedule, expectObservable }) => {
withCallViewModel(
{
roomMembers: [alice, local], // Simulate a DM
},
(vm, rtcSession) => {
// Fire a ringing notification
schedule("n", {
n: () => {
rtcSession.emit(
MatrixRTCSessionEvent.DidSendCallNotification,
mockRingEvent("$notif1", 30),
);
},
});
expectObservable(vm.callPickupState$).toBe("a 9ms b 19ms c", {
a: "unknown",
b: "ringing",
c: "timeout",
});
},
{
waitForCallPickup: true,
encryptionSystem: { kind: E2eeType.PER_PARTICIPANT },
},
);
});
// Should ring for 30ms and then time out
expectObservable(vm.ringing$).toBe("(ny) 26ms n", yesNo);
// Layout should show placeholder media for the participant we're
// ringing the entire time (even once timed out)
expectObservable(summarizeLayout$(vm.layout$)).toBe("a", {
a: {
type: "one-on-one",
spotlight: `${localId}:0`,
pip: `ringing:${aliceUserId}`,
},
});
},
{ waitForCallPickup: true },
);
});
});
it.skip("ringing -> unknown if we get disconnected", () => {
withTestScheduler(({ behavior, schedule, expectObservable }) => {
const connectionState$ = new BehaviorSubject(ConnectionState.Connected);
// Someone joins at 20ms (both LiveKit participant and MatrixRTC member)
withCallViewModel(
{
remoteParticipants$: behavior("a 19ms b", {
a: [],
b: [aliceParticipant],
}),
rtcMembers$: behavior("a 19ms b", {
a: [localRtcMember],
b: [localRtcMember, aliceRtcMember],
}),
livekitConnectionState$: connectionState$,
},
(vm, rtcSession) => {
// Notify at 5ms so we enter ringing, then get disconnected 5ms later
schedule(" 5ms r 5ms d", {
r: () => {
rtcSession.emit(
MatrixRTCSessionEvent.DidSendCallNotification,
mockRingEvent("$notif2", 100),
);
},
d: () => {
connectionState$.next(ConnectionState.Disconnected);
},
});
test("recipient's placeholder tile is replaced by their real tile once they answer", () => {
withTestScheduler(({ behavior, schedule, expectObservable }) => {
withCallViewModel(
{
// Alice answers after 20ms
rtcMembers$: behavior("a 20ms b", {
a: [localRtcMember],
b: [localRtcMember, aliceRtcMember],
}),
roomMembers: [alice, local], // Simulate a DM
},
(vm, rtcSession) => {
// Fire a ringing notification
schedule("n", {
n: () => {
rtcSession.emit(
MatrixRTCSessionEvent.DidSendCallNotification,
mockRingEvent("$notif1", 30),
);
},
});
expectObservable(vm.callPickupState$).toBe("a 4ms b 5ms c", {
a: "unknown",
b: "ringing",
c: "unknown",
});
},
{
waitForCallPickup: true,
encryptionSystem: { kind: E2eeType.PER_PARTICIPANT },
},
);
});
// Should ring until Alice joins
expectObservable(vm.ringing$).toBe("(ny) 17ms n", yesNo);
// Layout should show placeholder media for the participant we're
// ringing the entire time
expectObservable(summarizeLayout$(vm.layout$)).toBe("a 20ms b", {
a: {
type: "one-on-one",
spotlight: `${localId}:0`,
pip: `ringing:${aliceUserId}`,
},
b: {
type: "one-on-one",
spotlight: `${aliceId}:0`,
pip: `${localId}:0`,
},
});
},
{ waitForCallPickup: true },
);
});
});

View File

@@ -51,15 +51,9 @@ import { v4 as uuidv4 } from "uuid";
import { type IMembershipManager } from "matrix-js-sdk/lib/matrixrtc/IMembershipManager";
import {
LocalUserMediaViewModel,
type MediaViewModel,
type RemoteUserMediaViewModel,
ScreenShareViewModel,
type UserMediaViewModel,
} from "../MediaViewModel";
import {
accumulate,
createToggle$,
filterBehavior,
generateItem,
generateItems,
pauseWhen,
} from "../../utils/observable";
@@ -69,7 +63,7 @@ import {
playReactionsSound,
showReactions,
} from "../../settings/settings";
import { isFirefox } from "../../Platform";
import { isFirefox, platform } from "../../Platform";
import { setPipEnabled$ } from "../../controls";
import { TileStore } from "../TileStore";
import { gridLikeLayout } from "../GridLikeLayout";
@@ -91,8 +85,6 @@ import { type MuteStates } from "../MuteStates";
import { getUrlParams } from "../../UrlParams";
import { type ProcessorState } from "../../livekit/TrackProcessorContext";
import { ElementWidgetActions, widget } from "../../widget";
import { UserMedia } from "../UserMedia.ts";
import { ScreenShare } from "../ScreenShare.ts";
import {
type GridLayoutMedia,
type Layout,
@@ -136,13 +128,25 @@ import {
createSentCallNotification$,
} from "./CallNotificationLifecycle.ts";
import {
createDMMember$,
createMatrixMemberMetadata$,
createRoomMembers$,
} from "./remoteMembers/MatrixMemberMetadata.ts";
import { Publisher } from "./localMember/Publisher.ts";
import { type Connection } from "./remoteMembers/Connection.ts";
import { createLayoutModeSwitch } from "./LayoutSwitch.ts";
import {
createWrappedUserMedia,
type WrappedUserMediaViewModel,
} from "../media/WrappedUserMediaViewModel.ts";
import { type ScreenShareViewModel } from "../media/ScreenShareViewModel.ts";
import { type UserMediaViewModel } from "../media/UserMediaViewModel.ts";
import { type MediaViewModel } from "../media/MediaViewModel.ts";
import { type LocalUserMediaViewModel } from "../media/LocalUserMediaViewModel.ts";
import { type RemoteUserMediaViewModel } from "../media/RemoteUserMediaViewModel.ts";
import {
createRingingMedia,
type RingingMediaViewModel,
} from "../media/RingingMediaViewModel.ts";
const logger = rootLogger.getChild("[CallViewModel]");
//TODO
@@ -192,7 +196,6 @@ interface LayoutScanState {
tiles: TileStore;
}
type MediaItem = UserMedia | ScreenShare;
export type LivekitRoomItem = {
livekitRoom: LivekitRoom;
participants: string[];
@@ -211,21 +214,28 @@ export type LivekitRoomItem = {
export interface CallViewModel {
// lifecycle
autoLeave$: Observable<AutoLeaveReason>;
// TODO if we are in "unknown" state we need a loading rendering (or empty screen)
// Otherwise it looks like we already connected and only than the ringing starts which is weird.
callPickupState$: Behavior<
"unknown" | "ringing" | "timeout" | "decline" | "success" | null
>;
/**
* Whether we are ringing a call recipient.
*/
ringing$: Behavior<boolean>;
/** Observable that emits when the user should leave the call (hangup pressed, widget action, error).
* THIS DOES NOT LEAVE THE CALL YET. The only way to leave the call (send the hangup event) is by ending the scope.
* THIS DOES NOT LEAVE THE CALL YET. The only way to leave the call (send the hangup event) is
* - by ending the scope
* - or calling requestDisconnect
*
* TODO: it seems more reasonable to add a leave() method (that calls requestDisconnect) that will then update leave$ and remove the hangup pattern
*/
leave$: Observable<"user" | AutoLeaveReason>;
/** Call to initiate hangup. Use in conbination with reconnectino state track the async hangup process. */
/** Call to initiate hangup. Use in conbination with reconnection state track the async hangup process. */
hangup: () => void;
// joining
join: () => void;
/**
* calls requestDisconnect. The async leave state can than be observed via connected$
*/
leave: () => void;
// screen sharing
/**
* Callback to toggle screen sharing. If null, screen sharing is not possible.
@@ -274,7 +284,6 @@ export interface CallViewModel {
allConnections$: Behavior<ConnectionManagerData>;
/** Participants sorted by livekit room so they can be used in the audio rendering */
livekitRoomItems$: Behavior<LivekitRoomItem[]>;
userMedia$: Behavior<UserMedia[]>;
/** use the layout instead, this is just for the sdk export. */
matrixLivekitMembers$: Behavior<RemoteMatrixLivekitMember[]>;
localMatrixLivekitMember$: Behavior<LocalMatrixLivekitMember | null>;
@@ -283,13 +292,6 @@ export interface CallViewModel {
/** List of reactions. Keys are: membership.membershipId (currently predefined as: `${membershipEvent.userId}:${membershipEvent.deviceId}`)*/
reactions$: Behavior<Record<string, ReactionOption>>;
ringOverlay$: Behavior<null | {
name: string;
/** roomId or userId for the avatar generation. */
idForAvatar: string;
text: string;
avatarMxc?: string;
}>;
// sounds and events
joinSoundEffect$: Observable<void>;
leaveSoundEffect$: Observable<void>;
@@ -325,10 +327,6 @@ export interface CallViewModel {
gridMode$: Behavior<GridMode>;
setGridMode: (value: GridMode) => void;
// media view models and layout
grid$: Behavior<UserMediaViewModel[]>;
spotlight$: Behavior<MediaViewModel[]>;
pip$: Behavior<UserMediaViewModel | null>;
/**
* The layout of tiles in the call interface.
*/
@@ -436,35 +434,38 @@ export function createCallViewModel$(
memberId: uuidv4(),
};
const localTransport$ = createLocalTransport$({
scope: scope,
memberships$: memberships$,
ownMembershipIdentity,
client,
delayId$: scope.behavior(
(
fromEvent(
matrixRTCSession,
MembershipManagerEvent.DelayIdChanged,
// The type of reemitted event includes the original emitted as the second arg.
) as Observable<[string | undefined, IMembershipManager]>
).pipe(map(([delayId]) => delayId ?? null)),
matrixRTCSession.delayId ?? null,
),
roomId: matrixRoom.roomId,
forceJwtEndpoint$: scope.behavior(
matrixRTCMode$.pipe(
map((v) =>
v === MatrixRTCMode.Matrix_2_0
? JwtEndpointVersion.Matrix_2_0
: JwtEndpointVersion.Legacy,
),
const localTransport$ = scope.behavior(
matrixRTCMode$.pipe(
generateItem(
"CallViewModel localTransport$",
// Re-create LocalTransport whenever the mode changes
(mode) => ({ keys: [mode], data: undefined }),
(scope, _data$, mode) =>
createLocalTransport$({
scope: scope,
memberships$: memberships$,
ownMembershipIdentity,
client,
delayId$: scope.behavior(
(
fromEvent(
matrixRTCSession,
MembershipManagerEvent.DelayIdChanged,
// The type of reemitted event includes the original emitted as the second arg.
) as Observable<[string | undefined, IMembershipManager]>
).pipe(map(([delayId]) => delayId ?? null)),
matrixRTCSession.delayId ?? null,
),
roomId: matrixRoom.roomId,
forceJwtEndpoint:
mode === MatrixRTCMode.Matrix_2_0
? JwtEndpointVersion.Matrix_2_0
: JwtEndpointVersion.Legacy,
useOldestMember: mode === MatrixRTCMode.Legacy,
}),
),
),
useOldestMember$: scope.behavior(
matrixRTCMode$.pipe(map((v) => v === MatrixRTCMode.Legacy)),
),
});
);
const connectionFactory = new ECConnectionFactory(
client,
@@ -483,6 +484,7 @@ export function createCallViewModel$(
connectionFactory: connectionFactory,
localTransport$: scope.behavior(
localTransport$.pipe(
switchMap((t) => t.active$),
catchError((e: unknown) => {
logger.info(
"could not pass local transport to createConnectionManager$. localTransport$ threw an error",
@@ -516,13 +518,13 @@ export function createCallViewModel$(
);
const localMembership = createLocalMembership$({
scope: scope,
scope,
homeserverConnected: createHomeserverConnected$(
scope,
client,
matrixRTCSession,
),
muteStates: muteStates,
muteStates,
joinMatrixRTC: (transport: LivekitTransportConfig) => {
return enterRTCSession(
matrixRTCSession,
@@ -542,9 +544,11 @@ export function createCallViewModel$(
),
);
},
connectionManager: connectionManager,
matrixRTCSession: matrixRTCSession,
localTransport$: localTransport$,
connectionManager,
matrixRTCSession,
localTransport$: scope.behavior(
localTransport$.pipe(switchMap((t) => t.advertised$)),
),
logger: logger.getChild(`[${Date.now()}]`),
});
@@ -603,40 +607,6 @@ export function createCallViewModel$(
matrixRoomMembers$,
);
const dmMember$ = createDMMember$(scope, matrixRoomMembers$, matrixRoom);
const noUserToCallInRoom$ = scope.behavior(
matrixRoomMembers$.pipe(
map(
(roomMembersMap) =>
roomMembersMap.size === 1 && roomMembersMap.get(userId) !== undefined,
),
),
);
const ringOverlay$ = scope.behavior(
combineLatest([noUserToCallInRoom$, dmMember$, callPickupState$]).pipe(
map(([noUserToCallInRoom, dmMember, callPickupState]) => {
// No overlay if not in ringing state
if (callPickupState !== "ringing" || noUserToCallInRoom) return null;
const name = dmMember ? dmMember.rawDisplayName : matrixRoom.name;
const id = dmMember ? dmMember.userId : matrixRoom.roomId;
const text = dmMember
? `Waiting for ${name} to join…`
: "Waiting for other participants…";
const avatarMxc = dmMember
? (dmMember.getMxcAvatarUrl?.() ?? undefined)
: (matrixRoom.getMxcAvatarUrl() ?? undefined);
return {
name: name ?? id,
idForAvatar: id,
text,
avatarMxc,
};
}),
),
);
const allConnections$ = scope.behavior(
connectionManager.connectionManagerData$.pipe(map((d) => d.value)),
);
@@ -706,15 +676,16 @@ export function createCallViewModel$(
/**
* List of user media (camera feeds) that we want tiles for.
*/
const userMedia$ = scope.behavior<UserMedia[]>(
const userMedia$ = scope.behavior<WrappedUserMediaViewModel[]>(
combineLatest([
localMatrixLivekitMember$,
matrixLivekitMembers$,
duplicateTiles.value$,
]).pipe(
// Generate a collection of MediaItems from the list of expected (whether
// Generate a collection of user media from the list of expected (whether
// present or missing) LiveKit participants.
generateItems(
"CallViewModel userMedia$",
function* ([
localMatrixLivekitMember,
matrixLivekitMembers,
@@ -751,68 +722,100 @@ export function createCallViewModel$(
}
}
},
(scope, _, dup, mediaId, userId, participant, connection$, rtcId) => {
const livekitRoom$ = scope.behavior(
connection$.pipe(map((c) => c?.livekitRoom)),
);
const focusUrl$ = scope.behavior(
connection$.pipe(map((c) => c?.transport.livekit_service_url)),
);
const displayName$ = scope.behavior(
matrixMemberMetadataStore
.createDisplayNameBehavior$(userId)
.pipe(map((name) => name ?? userId)),
);
return new UserMedia(
scope,
`${mediaId}:${dup}`,
(scope, _, dup, mediaId, userId, participant, connection$, rtcId) =>
createWrappedUserMedia(scope, {
id: `${mediaId}:${dup}`,
userId,
rtcId,
rtcBackendIdentity: rtcId,
participant,
options.encryptionSystem,
livekitRoom$,
focusUrl$,
encryptionSystem: options.encryptionSystem,
livekitRoom$: scope.behavior(
connection$.pipe(map((c) => c?.livekitRoom)),
),
focusUrl$: scope.behavior(
connection$.pipe(map((c) => c?.transport.livekit_service_url)),
),
mediaDevices,
localMembership.reconnecting$,
displayName$,
matrixMemberMetadataStore.createAvatarUrlBehavior$(userId),
handsRaised$.pipe(map((v) => v[mediaId]?.time ?? null)),
reactions$.pipe(map((v) => v[mediaId] ?? undefined)),
);
},
pretendToBeDisconnected$: localMembership.reconnecting$,
displayName$: scope.behavior(
matrixMemberMetadataStore
.createDisplayNameBehavior$(userId)
.pipe(map((name) => name ?? userId)),
),
mxcAvatarUrl$:
matrixMemberMetadataStore.createAvatarUrlBehavior$(userId),
handRaised$: scope.behavior(
handsRaised$.pipe(map((v) => v[mediaId]?.time ?? null)),
),
reaction$: scope.behavior(
reactions$.pipe(map((v) => v[mediaId] ?? undefined)),
),
}),
),
),
);
const ringingMedia$ = scope.behavior<RingingMediaViewModel[]>(
combineLatest([userMedia$, matrixRoomMembers$, callPickupState$]).pipe(
generateItems(
"CallViewModel ringingMedia$",
function* ([userMedia, roomMembers, callPickupState]) {
if (
callPickupState === "ringing" ||
callPickupState === "timeout" ||
callPickupState === "decline"
) {
for (const member of roomMembers.values()) {
if (!userMedia.some((vm) => vm.userId === member.userId))
yield {
keys: [member.userId],
data: callPickupState,
};
}
}
},
(scope, pickupState$, userId) =>
createRingingMedia({
id: `ringing:${userId}`,
userId,
displayName$: scope.behavior(
matrixRoomMembers$.pipe(
map((members) => members.get(userId)?.rawDisplayName || userId),
),
),
mxcAvatarUrl$:
matrixMemberMetadataStore.createAvatarUrlBehavior$(userId),
pickupState$,
muteStates,
}),
),
distinctUntilChanged(shallowEquals),
tap((ringingMedia) => {
if (ringingMedia.length > 1)
// Warn that UI may do something unexpected in this case
logger.warn(
`Ringing more than one participant is not supported (ringing ${ringingMedia.map((vm) => vm.userId).join(", ")})`,
);
}),
),
);
/**
* List of all media items (user media and screen share media) that we want
* tiles for.
* All screen share media that we want to display.
*/
const mediaItems$ = scope.behavior<MediaItem[]>(
const screenShares$ = scope.behavior<ScreenShareViewModel[]>(
userMedia$.pipe(
switchMap((userMedia) =>
userMedia.length === 0
? of([])
: combineLatest(
userMedia.map((m) => m.screenShares$),
(...screenShares) => [...userMedia, ...screenShares.flat(1)],
(...screenShares) => screenShares.flat(1),
),
),
),
);
/**
* List of MediaItems that we want to display, that are of type ScreenShare
*/
const screenShares$ = scope.behavior<ScreenShare[]>(
mediaItems$.pipe(
map((mediaItems) =>
mediaItems.filter((m): m is ScreenShare => m instanceof ScreenShare),
),
),
);
const joinSoundEffect$ = userMedia$.pipe(
pairwise(),
filter(
@@ -872,39 +875,39 @@ export function createCallViewModel$(
merge(userHangup$, widgetHangup$).pipe(map(() => "user" as const)),
).pipe(scope.share);
const spotlightSpeaker$ = scope.behavior<UserMediaViewModel | null>(
const spotlightSpeaker$ = scope.behavior<UserMediaViewModel | undefined>(
userMedia$.pipe(
switchMap((mediaItems) =>
mediaItems.length === 0
? of([])
: combineLatest(
mediaItems.map((m) =>
m.vm.speaking$.pipe(map((s) => [m, s] as const)),
m.speaking$.pipe(map((s) => [m, s] as const)),
),
),
),
scan<(readonly [UserMedia, boolean])[], UserMedia | undefined, null>(
(prev, mediaItems) => {
// Only remote users that are still in the call should be sticky
const [stickyMedia, stickySpeaking] =
(!prev?.vm.local && mediaItems.find(([m]) => m === prev)) || [];
// Decide who to spotlight:
// If the previous speaker is still speaking, stick with them rather
// than switching eagerly to someone else
return stickySpeaking
? stickyMedia!
: // Otherwise, select any remote user who is speaking
(mediaItems.find(([m, s]) => !m.vm.local && s)?.[0] ??
// Otherwise, stick with the person who was last speaking
stickyMedia ??
// Otherwise, spotlight an arbitrary remote user
mediaItems.find(([m]) => !m.vm.local)?.[0] ??
// Otherwise, spotlight the local user
mediaItems.find(([m]) => m.vm.local)?.[0]);
},
null,
),
map((speaker) => speaker?.vm ?? null),
scan<
(readonly [UserMediaViewModel, boolean])[],
UserMediaViewModel | undefined,
undefined
>((prev, mediaItems) => {
// Only remote users that are still in the call should be sticky
const [stickyMedia, stickySpeaking] =
(!prev?.local && mediaItems.find(([m]) => m === prev)) || [];
// Decide who to spotlight:
// If the previous speaker is still speaking, stick with them rather
// than switching eagerly to someone else
return stickySpeaking
? stickyMedia!
: // Otherwise, select any remote user who is speaking
(mediaItems.find(([m, s]) => !m.local && s)?.[0] ??
// Otherwise, stick with the person who was last speaking
stickyMedia ??
// Otherwise, spotlight an arbitrary remote user
mediaItems.find(([m]) => !m.local)?.[0] ??
// Otherwise, spotlight the local user
mediaItems.find(([m]) => m.local)?.[0]);
}, undefined),
),
);
@@ -918,71 +921,71 @@ export function createCallViewModel$(
return bins.length === 0
? of([])
: combineLatest(bins, (...bins) =>
bins.sort(([, bin1], [, bin2]) => bin1 - bin2).map(([m]) => m.vm),
bins.sort(([, bin1], [, bin2]) => bin1 - bin2).map(([m]) => m),
);
}),
distinctUntilChanged(shallowEquals),
),
);
const spotlight$ = scope.behavior<MediaViewModel[]>(
screenShares$.pipe(
switchMap((screenShares) => {
if (screenShares.length > 0) {
return of(screenShares.map((m) => m.vm));
}
return spotlightSpeaker$.pipe(
map((speaker) => (speaker ? [speaker] : [])),
/**
* Local user media suitable for displaying in a PiP (undefined if not found
* or if user prefers to not see themselves).
*/
const localUserMediaForPip$ = scope.behavior<
LocalUserMediaViewModel | undefined
>(
userMedia$.pipe(
switchMap((userMedia) => {
const localUserMedia = userMedia.find(
(m): m is WrappedUserMediaViewModel & LocalUserMediaViewModel =>
m.type === "user" && m.local,
);
if (!localUserMedia) return of(undefined);
return localUserMedia.alwaysShow$.pipe(
map((alwaysShow) => (alwaysShow ? localUserMedia : undefined)),
);
}),
distinctUntilChanged<MediaViewModel[]>(shallowEquals),
),
);
const pip$ = scope.behavior<UserMediaViewModel | null>(
combineLatest([
// TODO This also needs epoch logic to dedupe the screenshares and mediaItems emits
screenShares$,
spotlightSpeaker$,
mediaItems$,
]).pipe(
switchMap(([screenShares, spotlight, mediaItems]) => {
if (screenShares.length > 0) {
return spotlightSpeaker$;
}
if (!spotlight || spotlight.local) {
return of(null);
}
const spotlightAndPip$ = scope.behavior<{
spotlight: MediaViewModel[];
pip$: Behavior<UserMediaViewModel | undefined>;
}>(
ringingMedia$.pipe(
switchMap((ringingMedia) => {
if (ringingMedia.length > 0)
return of({ spotlight: ringingMedia, pip$: localUserMediaForPip$ });
const localUserMedia = mediaItems.find(
(m) => m.vm instanceof LocalUserMediaViewModel,
) as UserMedia | undefined;
return screenShares$.pipe(
switchMap((screenShares) => {
if (screenShares.length > 0)
return of({ spotlight: screenShares, pip$: spotlightSpeaker$ });
const localUserMediaViewModel = localUserMedia?.vm as
| LocalUserMediaViewModel
| undefined;
if (!localUserMediaViewModel) {
return of(null);
}
return localUserMediaViewModel.alwaysShow$.pipe(
map((alwaysShow) => {
if (alwaysShow) {
return localUserMediaViewModel;
}
return null;
return spotlightSpeaker$.pipe(
map((speaker) => ({
spotlight: speaker ? [speaker] : [],
pip$: localUserMediaForPip$,
})),
);
}),
);
}),
),
);
const spotlight$ = scope.behavior<MediaViewModel[]>(
spotlightAndPip$.pipe(
map(({ spotlight }) => spotlight),
distinctUntilChanged<MediaViewModel[]>(shallowEquals),
),
);
const hasRemoteScreenShares$ = scope.behavior<boolean>(
spotlight$.pipe(
map((spotlight) =>
spotlight.some((vm) => !vm.local && vm instanceof ScreenShareViewModel),
spotlight.some((vm) => vm.type === "screen share" && !vm.local),
),
),
);
@@ -1023,8 +1026,10 @@ export function createCallViewModel$(
);
const spotlightExpandedToggle$ = new Subject<void>();
const spotlightExpanded$ = scope.behavior<boolean>(
spotlightExpandedToggle$.pipe(accumulate(false, (expanded) => !expanded)),
const spotlightExpanded$ = createToggle$(
scope,
false,
spotlightExpandedToggle$,
);
const { setGridMode, gridMode$ } = createLayoutModeSwitch(
@@ -1037,7 +1042,7 @@ export function createCallViewModel$(
[grid$, spotlight$],
(grid, spotlight) => ({
type: "grid",
spotlight: spotlight.some((vm) => vm instanceof ScreenShareViewModel)
spotlight: spotlight.some((vm) => vm.type === "screen share")
? spotlight
: undefined,
grid,
@@ -1059,28 +1064,61 @@ export function createCallViewModel$(
}));
const spotlightExpandedLayoutMedia$: Observable<SpotlightExpandedLayoutMedia> =
combineLatest([spotlight$, pip$], (spotlight, pip) => ({
type: "spotlight-expanded",
spotlight,
pip: pip ?? undefined,
}));
spotlightAndPip$.pipe(
switchMap(({ spotlight, pip$ }) =>
pip$.pipe(
map((pip) => ({
type: "spotlight-expanded" as const,
spotlight,
pip: pip ?? undefined,
})),
),
),
);
const oneOnOneLayoutMedia$: Observable<OneOnOneLayoutMedia | null> =
mediaItems$.pipe(
map((mediaItems) => {
if (mediaItems.length !== 2) return null;
const local = mediaItems.find((vm) => vm.vm.local)?.vm as
| LocalUserMediaViewModel
| undefined;
const remote = mediaItems.find((vm) => !vm.vm.local)?.vm as
| RemoteUserMediaViewModel
| undefined;
// There might not be a remote tile if there are screen shares, or if
// only the local user is in the call and they're using the duplicate
// tiles option
if (!remote || !local) return null;
userMedia$.pipe(
switchMap((userMedia) => {
if (userMedia.length <= 2) {
const local = userMedia.find(
(vm): vm is WrappedUserMediaViewModel & LocalUserMediaViewModel =>
vm.type === "user" && vm.local,
);
return { type: "one-on-one", local, remote };
if (local !== undefined) {
const remote = userMedia.find(
(
vm,
): vm is WrappedUserMediaViewModel & RemoteUserMediaViewModel =>
vm.type === "user" && !vm.local,
);
if (remote !== undefined)
return of({
type: "one-on-one" as const,
spotlight: remote,
pip: local,
});
// If there's no other user media in the call (could still happen in
// this branch due to the duplicate tiles option), we could possibly
// show ringing media instead
if (userMedia.length === 1)
return ringingMedia$.pipe(
map((ringingMedia) => {
return ringingMedia.length === 1
? {
type: "one-on-one" as const,
spotlight: local,
pip: ringingMedia[0],
}
: null;
}),
);
}
}
return of(null);
}),
);
@@ -1122,7 +1160,7 @@ export function createCallViewModel$(
oneOnOne === null
? combineLatest([grid$, spotlight$], (grid, spotlight) =>
grid.length > smallMobileCallThreshold ||
spotlight.some((vm) => vm instanceof ScreenShareViewModel)
spotlight.some((vm) => vm.type === "screen share")
? spotlightPortraitLayoutMedia$
: gridLayoutMedia$,
).pipe(switchAll())
@@ -1229,7 +1267,7 @@ export function createCallViewModel$(
// screen sharing feeds are in the spotlight we still need them.
return l.spotlight.media$.pipe(
map((models: MediaViewModel[]) =>
models.some((m) => m instanceof ScreenShareViewModel),
models.some((m) => m.type === "screen share"),
),
);
// In expanded spotlight layout, the active speaker is always shown in
@@ -1280,7 +1318,7 @@ export function createCallViewModel$(
switchMap((mode) => {
switch (mode) {
case "pip":
return of(false);
return of(platform === "desktop" ? true : false);
case "normal":
case "narrow":
return of(true);
@@ -1491,11 +1529,13 @@ export function createCallViewModel$(
return {
autoLeave$: autoLeave$,
callPickupState$: callPickupState$,
ringOverlay$: ringOverlay$,
ringing$: scope.behavior(
callPickupState$.pipe(map((state) => state === "ringing")),
),
leave$: leave$,
hangup: (): void => userHangup$.next(),
join: localMembership.requestJoinAndPublish,
leave: localMembership.requestDisconnect,
toggleScreenSharing: toggleScreenSharing,
sharingScreen$: sharingScreen$,
@@ -1535,17 +1575,21 @@ export function createCallViewModel$(
toggleSpotlightExpanded$: toggleSpotlightExpanded$,
gridMode$: gridMode$,
setGridMode: setGridMode,
grid$: grid$,
spotlight$: spotlight$,
pip$: pip$,
layout$: layout$,
userMedia$,
localMatrixLivekitMember$,
matrixLivekitMembers$: scope.behavior(
matrixLivekitMembers$.pipe(
map((members) => members.value),
tap((v) => {
logger.debug("matrixLivekitMembers$ updated (exported)", v);
const listForLogs = v
.map(
(m) =>
m.membership$.value.userId + "|" + m.membership$.value.deviceId,
)
.join(",");
logger.debug(
`matrixLivekitMembers$ updated (exported) [${listForLogs}]`,
);
}),
),
),

View File

@@ -17,7 +17,7 @@ import {
import { SyncState } from "matrix-js-sdk/lib/sync";
import { BehaviorSubject, type Observable, map, of } from "rxjs";
import { onTestFinished, vi } from "vitest";
import { ClientEvent, type MatrixClient } from "matrix-js-sdk";
import { ClientEvent, type RoomMember, type MatrixClient } from "matrix-js-sdk";
import EventEmitter from "events";
import * as ComponentsCore from "@livekit/components-core";
@@ -63,15 +63,10 @@ const carol = local;
const dave = mockMatrixRoomMember(daveRTLRtcMember, { rawDisplayName: "Dave" });
const roomMembers = new Map(
[alice, aliceDoppelganger, bob, bobZeroWidthSpace, carol, dave, daveRTL].map(
(p) => [p.userId, p],
),
);
export interface CallViewModelInputs {
remoteParticipants$: Behavior<RemoteParticipant[]>;
rtcMembers$: Behavior<Partial<CallMembership>[]>;
roomMembers: RoomMember[];
livekitConnectionState$: Behavior<ConnectionState>;
speaking: Map<Participant, Observable<boolean>>;
mediaDevices: MediaDevices;
@@ -86,6 +81,15 @@ export function withCallViewModel(mode: MatrixRTCMode) {
{
remoteParticipants$ = constant([]),
rtcMembers$ = constant([localRtcMember]),
roomMembers = [
alice,
aliceDoppelganger,
bob,
bobZeroWidthSpace,
carol,
dave,
daveRTL,
],
livekitConnectionState$: connectionState$ = constant(
ConnectionState.Connected,
),
@@ -128,8 +132,8 @@ export function withCallViewModel(mode: MatrixRTCMode) {
return syncState;
}
})() as Partial<MatrixClient> as MatrixClient,
getMembers: () => Array.from(roomMembers.values()),
getMembersWithMembership: () => Array.from(roomMembers.values()),
getMembers: () => roomMembers,
getMembersWithMembership: () => roomMembers,
});
const rtcSession = new MockRTCSession(room, []).withMemberships(
rtcMembers$,

View File

@@ -39,7 +39,6 @@ import { constant } from "../../Behavior";
import { ConnectionManagerData } from "../remoteMembers/ConnectionManager";
import { ConnectionState, type Connection } from "../remoteMembers/Connection";
import { type Publisher } from "./Publisher";
import { type LocalTransportWithSFUConfig } from "./LocalTransport";
import { initializeWidget } from "../../../widget";
initializeWidget();
@@ -216,11 +215,10 @@ describe("LocalMembership", () => {
it("throws error on missing RTC config error", () => {
withTestScheduler(({ scope, hot, expectObservable }) => {
const localTransport$ =
scope.behavior<null | LocalTransportWithSFUConfig>(
hot("1ms #", {}, new MatrixRTCTransportMissingError("domain.com")),
null,
);
const localTransport$ = scope.behavior<null | LivekitTransportConfig>(
hot("1ms #", {}, new MatrixRTCTransportMissingError("domain.com")),
null,
);
// we do not need any connection data since we want to fail before reaching that.
const mockConnectionManager = {
@@ -279,23 +277,11 @@ describe("LocalMembership", () => {
});
const aTransport = {
transport: {
livekit_service_url: "a",
} as LivekitTransportConfig,
sfuConfig: {
url: "sfu-url",
jwt: "sfu-token",
},
} as LocalTransportWithSFUConfig;
livekit_service_url: "a",
} as LivekitTransportConfig;
const bTransport = {
transport: {
livekit_service_url: "b",
} as LivekitTransportConfig,
sfuConfig: {
url: "sfu-url",
jwt: "sfu-token",
},
} as LocalTransportWithSFUConfig;
livekit_service_url: "b",
} as LivekitTransportConfig;
const connectionTransportAConnected = {
livekitRoom: mockLivekitRoom({
@@ -305,7 +291,7 @@ describe("LocalMembership", () => {
} as unknown as LocalParticipant,
}),
state$: constant(ConnectionState.LivekitConnected),
transport: aTransport.transport,
transport: aTransport,
} as unknown as Connection;
const connectionTransportAConnecting = {
...connectionTransportAConnected,
@@ -314,7 +300,7 @@ describe("LocalMembership", () => {
} as unknown as Connection;
const connectionTransportBConnected = {
state$: constant(ConnectionState.LivekitConnected),
transport: bTransport.transport,
transport: bTransport,
livekitRoom: mockLivekitRoom({}),
} as unknown as Connection;
@@ -368,12 +354,8 @@ describe("LocalMembership", () => {
// stop the first Publisher and let the second one life.
expect(publishers[0].destroy).toHaveBeenCalled();
expect(publishers[1].destroy).not.toHaveBeenCalled();
expect(publisherFactory.mock.calls[0][0].transport).toBe(
aTransport.transport,
);
expect(publisherFactory.mock.calls[1][0].transport).toBe(
bTransport.transport,
);
expect(publisherFactory.mock.calls[0][0].transport).toBe(aTransport);
expect(publisherFactory.mock.calls[1][0].transport).toBe(bTransport);
scope.end();
await flushPromises();
// stop all tracks after ending scopes
@@ -446,8 +428,9 @@ describe("LocalMembership", () => {
const scope = new ObservableScope();
const connectionManagerData = new ConnectionManagerData();
const localTransport$ =
new BehaviorSubject<null | LocalTransportWithSFUConfig>(null);
const localTransport$ = new BehaviorSubject<null | LivekitTransportConfig>(
null,
);
const connectionManagerData$ = new BehaviorSubject(
new Epoch(connectionManagerData),
);
@@ -519,7 +502,7 @@ describe("LocalMembership", () => {
});
(
connectionManagerData2.getConnectionForTransport(aTransport.transport)!
connectionManagerData2.getConnectionForTransport(aTransport)!
.state$ as BehaviorSubject<ConnectionState>
).next(ConnectionState.LivekitConnected);
expect(localMembership.localMemberState$.value).toStrictEqual({

View File

@@ -62,7 +62,6 @@ import {
} from "../remoteMembers/Connection.ts";
import { type HomeserverConnected } from "./HomeserverConnected.ts";
import { and$ } from "../../../utils/observable.ts";
import { type LocalTransportWithSFUConfig } from "./LocalTransport.ts";
export enum TransportState {
/** Not even a transport is available to the LocalMembership */
@@ -128,7 +127,7 @@ interface Props {
createPublisherFactory: (connection: Connection) => Publisher;
joinMatrixRTC: (transport: LivekitTransportConfig) => void;
homeserverConnected: HomeserverConnected;
localTransport$: Behavior<LocalTransportWithSFUConfig | null>;
localTransport$: Behavior<LivekitTransportConfig | null>;
matrixRTCSession: Pick<
MatrixRTCSession,
"updateCallIntent" | "leaveRoomSession"
@@ -147,7 +146,7 @@ interface Props {
* @param props.createPublisherFactory Factory to create a publisher once we have a connection.
* @param props.joinMatrixRTC Callback to join the matrix RTC session once we have a transport.
* @param props.homeserverConnected The homeserver connected state.
* @param props.localTransport$ The local transport to use for publishing.
* @param props.localTransport$ The transport to advertise in our membership.
* @param props.logger The logger to use.
* @param props.muteStates The mute states for video and audio.
* @param props.matrixRTCSession The matrix RTC session to join.
@@ -237,9 +236,7 @@ export const createLocalMembership$ = ({
return null;
}
return connectionData.getConnectionForTransport(
localTransport.transport,
);
return connectionData.getConnectionForTransport(localTransport);
}),
tap((connection) => {
logger.info(
@@ -549,7 +546,7 @@ export const createLocalMembership$ = ({
if (!shouldConnect) return;
try {
joinMatrixRTC(transport.transport);
joinMatrixRTC(transport);
} catch (error) {
logger.error("Error entering RTC session", error);
if (error instanceof Error)

View File

@@ -13,15 +13,24 @@ import {
it,
type MockedObject,
vi,
type MockInstance,
} from "vitest";
import { type CallMembership } from "matrix-js-sdk/lib/matrixrtc";
import {
type CallMembership,
type LivekitTransportConfig,
} from "matrix-js-sdk/lib/matrixrtc";
import { BehaviorSubject, lastValueFrom } from "rxjs";
import fetchMock from "fetch-mock";
import { mockConfig, flushPromises, ownMemberMock } from "../../../utils/test";
import {
mockConfig,
flushPromises,
ownMemberMock,
mockRtcMembership,
} from "../../../utils/test";
import { createLocalTransport$, JwtEndpointVersion } from "./LocalTransport";
import { constant } from "../../Behavior";
import { Epoch, ObservableScope } from "../../ObservableScope";
import { Epoch, ObservableScope, trackEpoch } from "../../ObservableScope";
import {
MatrixRTCTransportMissingError,
FailToGetOpenIdToken,
@@ -43,10 +52,10 @@ describe("LocalTransport", () => {
afterEach(() => scope.end());
it("throws if config is missing", async () => {
const localTransport$ = createLocalTransport$({
const { advertised$, active$ } = createLocalTransport$({
scope,
roomId: "!room:example.org",
useOldestMember$: constant(false),
useOldestMember: false,
memberships$: constant(new Epoch<CallMembership[]>([])),
client: {
// eslint-disable-next-line @typescript-eslint/naming-convention
@@ -58,14 +67,15 @@ describe("LocalTransport", () => {
getDeviceId: vi.fn(),
},
ownMembershipIdentity: ownMemberMock,
forceJwtEndpoint$: constant(JwtEndpointVersion.Legacy),
forceJwtEndpoint: JwtEndpointVersion.Legacy,
delayId$: constant("delay_id_mock"),
});
await flushPromises();
expect(() => localTransport$.value).toThrow(
expect(() => advertised$.value).toThrow(
new MatrixRTCTransportMissingError(""),
);
expect(() => active$.value).toThrow(new MatrixRTCTransportMissingError(""));
});
it("throws FailToGetOpenIdToken when OpenID fetch fails", async () => {
@@ -83,10 +93,10 @@ describe("LocalTransport", () => {
);
const observations: unknown[] = [];
const errors: Error[] = [];
const localTransport$ = createLocalTransport$({
const { advertised$, active$ } = createLocalTransport$({
scope,
roomId: "!example_room_id",
useOldestMember$: constant(false),
useOldestMember: false,
memberships$: constant(new Epoch<CallMembership[]>([])),
client: {
baseUrl: "https://lk.example.org",
@@ -98,10 +108,10 @@ describe("LocalTransport", () => {
getDeviceId: vi.fn(),
},
ownMembershipIdentity: ownMemberMock,
forceJwtEndpoint$: constant(JwtEndpointVersion.Legacy),
forceJwtEndpoint: JwtEndpointVersion.Legacy,
delayId$: constant("delay_id_mock"),
});
localTransport$.subscribe(
active$.subscribe(
(o) => observations.push(o),
(e) => errors.push(e),
);
@@ -111,7 +121,8 @@ describe("LocalTransport", () => {
const expectedError = new FailToGetOpenIdToken(new Error("no openid"));
expect(observations).toStrictEqual([null]);
expect(errors).toStrictEqual([expectedError]);
expect(() => localTransport$.value).toThrow(expectedError);
expect(() => advertised$.value).toThrow(expectedError);
expect(() => active$.value).toThrow(expectedError);
});
it("emits preferred transport after OpenID resolves", async () => {
@@ -126,10 +137,10 @@ describe("LocalTransport", () => {
openIdResolver.promise,
);
const localTransport$ = createLocalTransport$({
const { advertised$, active$ } = createLocalTransport$({
scope,
roomId: "!room:example.org",
useOldestMember$: constant(false),
useOldestMember: false,
memberships$: constant(new Epoch<CallMembership[]>([])),
client: {
// eslint-disable-next-line @typescript-eslint/naming-convention
@@ -140,7 +151,7 @@ describe("LocalTransport", () => {
baseUrl: "https://lk.example.org",
},
ownMembershipIdentity: ownMemberMock,
forceJwtEndpoint$: constant(JwtEndpointVersion.Legacy),
forceJwtEndpoint: JwtEndpointVersion.Legacy,
delayId$: constant("delay_id_mock"),
});
@@ -150,14 +161,17 @@ describe("LocalTransport", () => {
livekitAlias: "Akph4alDMhen",
livekitIdentity: ownMemberMock.userId + ":" + ownMemberMock.deviceId,
});
expect(localTransport$.value).toBe(null);
expect(advertised$.value).toBe(null);
expect(active$.value).toBe(null);
await flushPromises();
// final
expect(localTransport$.value).toStrictEqual({
transport: {
livekit_service_url: "https://lk.example.org",
type: "livekit",
},
const expectedTransport = {
livekit_service_url: "https://lk.example.org",
type: "livekit",
};
expect(advertised$.value).toStrictEqual(expectedTransport);
expect(active$.value).toStrictEqual({
transport: expectedTransport,
sfuConfig: {
jwt: "jwt",
livekitAlias: "Akph4alDMhen",
@@ -167,51 +181,122 @@ describe("LocalTransport", () => {
});
});
it("updates local transport when oldest member changes", async () => {
// Use config so transport discovery succeeds, but delay OpenID JWT fetch
mockConfig({
livekit: { livekit_service_url: "https://lk.example.org" },
describe("oldest member mode", () => {
const aliceTransport: LivekitTransportConfig = {
type: "livekit",
livekit_service_url: "https://alice.example.org",
};
const bobTransport: LivekitTransportConfig = {
type: "livekit",
livekit_service_url: "https://bob.example.org",
};
const aliceMembership = mockRtcMembership("@alice:example.org", "AAA", {
fociPreferred: [aliceTransport],
});
const memberships$ = new BehaviorSubject(new Epoch([]));
const openIdResolver = Promise.withResolvers<openIDSFU.SFUConfig>();
vi.spyOn(openIDSFU, "getSFUConfigWithOpenID").mockReturnValue(
openIdResolver.promise,
);
const localTransport$ = createLocalTransport$({
scope,
roomId: "!example_room_id",
useOldestMember$: constant(true),
memberships$,
client: {
getDomain: () => "",
// eslint-disable-next-line @typescript-eslint/naming-convention
_unstable_getRTCTransports: async () => Promise.resolve([]),
getOpenIdToken: vi.fn(),
getDeviceId: vi.fn(),
baseUrl: "https://lk.example.org",
},
ownMembershipIdentity: ownMemberMock,
forceJwtEndpoint$: constant(JwtEndpointVersion.Legacy),
delayId$: constant("delay_id_mock"),
const bobMembership = mockRtcMembership("@bob:example.org", "BBB", {
fociPreferred: [bobTransport],
});
openIdResolver.resolve?.(openIdResponse);
expect(localTransport$.value).toBe(null);
await flushPromises();
// final
expect(localTransport$.value).toStrictEqual({
transport: {
livekit_service_url: "https://lk.example.org",
type: "livekit",
},
sfuConfig: {
jwt: "e30=.eyJzdWIiOiJAbWU6ZXhhbXBsZS5vcmc6QUJDREVGIiwidmlkZW8iOnsicm9vbSI6IiFleGFtcGxlX3Jvb21faWQifX0=.e30=",
livekitAlias: "Akph4alDMhen",
livekitIdentity: "@lk_user:ABCDEF",
url: "https://lk.example.org",
},
let openIdSpy: MockInstance<(typeof openIDSFU)["getSFUConfigWithOpenID"]>;
beforeEach(() => {
openIdSpy = vi
.spyOn(openIDSFU, "getSFUConfigWithOpenID")
.mockResolvedValue(openIdResponse);
});
it("updates active transport when oldest member changes", async () => {
// Initially, Alice is the only member
const memberships$ = new BehaviorSubject([aliceMembership]);
const { advertised$, active$ } = createLocalTransport$({
scope,
roomId: "!example_room_id",
useOldestMember: true,
memberships$: scope.behavior(memberships$.pipe(trackEpoch())),
client: {
getDomain: () => "",
// eslint-disable-next-line @typescript-eslint/naming-convention
_unstable_getRTCTransports: async () => Promise.resolve([]),
getOpenIdToken: vi.fn(),
getDeviceId: vi.fn(),
baseUrl: "https://lk.example.org",
},
ownMembershipIdentity: ownMemberMock,
forceJwtEndpoint: JwtEndpointVersion.Legacy,
delayId$: constant("delay_id_mock"),
});
expect(active$.value).toBe(null);
await flushPromises();
// SFU config should've been fetched
expect(openIdSpy).toHaveBeenCalled();
// Alice's transport should be active and advertised
expect(active$.value?.transport).toStrictEqual(aliceTransport);
expect(advertised$.value).toStrictEqual(aliceTransport);
// Now Bob joins the call, but Alice is still the oldest member
openIdSpy.mockClear();
memberships$.next([aliceMembership, bobMembership]);
await flushPromises();
// No new SFU config should've been fetched
expect(openIdSpy).not.toHaveBeenCalled();
// Alice's transport should still be active and advertised
expect(active$.value?.transport).toStrictEqual(aliceTransport);
expect(advertised$.value).toStrictEqual(aliceTransport);
// Now Bob takes Alice's place as the oldest member
openIdSpy.mockClear();
memberships$.next([bobMembership, aliceMembership]);
// Active transport should reset to null until we have Bob's SFU config
expect(active$.value).toStrictEqual(null);
await flushPromises();
// Bob's SFU config should've been fetched
expect(openIdSpy).toHaveBeenCalled();
// Bob's transport should be active, but Alice's should remain advertised
// (since we don't want the change in oldest member to cause a wave of new
// state events)
expect(active$.value?.transport).toStrictEqual(bobTransport);
expect(advertised$.value).toStrictEqual(aliceTransport);
});
it("advertises preferred transport when no other member exists", async () => {
// Initially, there are no members
const memberships$ = new BehaviorSubject<CallMembership[]>([]);
const { advertised$, active$ } = createLocalTransport$({
scope,
roomId: "!example_room_id",
useOldestMember: true,
memberships$: scope.behavior(memberships$.pipe(trackEpoch())),
client: {
getDomain: () => "",
// eslint-disable-next-line @typescript-eslint/naming-convention
_unstable_getRTCTransports: async () =>
Promise.resolve([aliceTransport]),
getOpenIdToken: vi.fn(),
getDeviceId: vi.fn(),
baseUrl: "https://lk.example.org",
},
ownMembershipIdentity: ownMemberMock,
forceJwtEndpoint: JwtEndpointVersion.Legacy,
delayId$: constant("delay_id_mock"),
});
expect(active$.value).toBe(null);
await flushPromises();
// Our own preferred transport should be advertised
expect(advertised$.value).toStrictEqual(aliceTransport);
// No transport should be active however (there is still no oldest member)
expect(active$.value).toBe(null);
// Now Bob joins the call and becomes the oldest member
memberships$.next([bobMembership]);
await flushPromises();
// We should still advertise our own preferred transport (to avoid
// unnecessary state changes)
expect(advertised$.value).toStrictEqual(aliceTransport);
// Bob's transport should become active
expect(active$.value?.transport).toBe(bobTransport);
});
});
@@ -229,8 +314,8 @@ describe("LocalTransport", () => {
ownMembershipIdentity: ownMemberMock,
scope,
roomId: "!example_room_id",
useOldestMember$: constant(false),
forceJwtEndpoint$: constant(JwtEndpointVersion.Legacy),
useOldestMember: false,
forceJwtEndpoint: JwtEndpointVersion.Legacy,
delayId$: constant(null),
memberships$: constant(new Epoch<CallMembership[]>([])),
client: {
@@ -256,15 +341,19 @@ describe("LocalTransport", () => {
mockConfig({
livekit: { livekit_service_url: "https://lk.example.org" },
});
const localTransport$ = createLocalTransport$(localTransportOpts);
const { advertised$, active$ } =
createLocalTransport$(localTransportOpts);
openIdResolver.resolve?.(openIdResponse);
expect(localTransport$.value).toBe(null);
expect(advertised$.value).toBe(null);
expect(active$.value).toBe(null);
await flushPromises();
expect(localTransport$.value).toStrictEqual({
transport: {
livekit_service_url: "https://lk.example.org",
type: "livekit",
},
const expectedTransport = {
livekit_service_url: "https://lk.example.org",
type: "livekit",
};
expect(advertised$.value).toStrictEqual(expectedTransport);
expect(active$.value).toStrictEqual({
transport: expectedTransport,
sfuConfig: {
jwt: "e30=.eyJzdWIiOiJAbWU6ZXhhbXBsZS5vcmc6QUJDREVGIiwidmlkZW8iOnsicm9vbSI6IiFleGFtcGxlX3Jvb21faWQifX0=.e30=",
livekitAlias: "Akph4alDMhen",
@@ -273,13 +362,15 @@ describe("LocalTransport", () => {
},
});
});
it("supports getting transport via user settings", async () => {
customLivekitUrl.setValue("https://lk.example.org");
const localTransport$ = createLocalTransport$(localTransportOpts);
const { advertised$, active$ } =
createLocalTransport$(localTransportOpts);
openIdResolver.resolve?.(openIdResponse);
expect(localTransport$.value).toBe(null);
expect(advertised$.value).toBe(null);
await flushPromises();
expect(localTransport$.value).toStrictEqual({
expect(active$.value).toStrictEqual({
transport: {
livekit_service_url: "https://lk.example.org",
type: "livekit",
@@ -292,19 +383,24 @@ describe("LocalTransport", () => {
},
});
});
it("supports getting transport via backend", async () => {
localTransportOpts.client._unstable_getRTCTransports.mockResolvedValue([
{ type: "livekit", livekit_service_url: "https://lk.example.org" },
]);
const localTransport$ = createLocalTransport$(localTransportOpts);
const { advertised$, active$ } =
createLocalTransport$(localTransportOpts);
openIdResolver.resolve?.(openIdResponse);
expect(localTransport$.value).toBe(null);
expect(advertised$.value).toBe(null);
expect(active$.value).toBe(null);
await flushPromises();
expect(localTransport$.value).toStrictEqual({
transport: {
livekit_service_url: "https://lk.example.org",
type: "livekit",
},
const expectedTransport = {
livekit_service_url: "https://lk.example.org",
type: "livekit",
};
expect(advertised$.value).toStrictEqual(expectedTransport);
expect(active$.value).toStrictEqual({
transport: expectedTransport,
sfuConfig: {
jwt: "e30=.eyJzdWIiOiJAbWU6ZXhhbXBsZS5vcmc6QUJDREVGIiwidmlkZW8iOnsicm9vbSI6IiFleGFtcGxlX3Jvb21faWQifX0=.e30=",
livekitAlias: "Akph4alDMhen",
@@ -313,6 +409,7 @@ describe("LocalTransport", () => {
},
});
});
it("fails fast if the openID request fails for backend config", async () => {
localTransportOpts.client._unstable_getRTCTransports.mockResolvedValue([
{ type: "livekit", livekit_service_url: "https://lk.example.org" },
@@ -320,13 +417,11 @@ describe("LocalTransport", () => {
openIdResolver.reject(
new FailToGetOpenIdToken(new Error("Test driven error")),
);
try {
await lastValueFrom(createLocalTransport$(localTransportOpts));
throw Error("Expected test to throw");
} catch (ex) {
expect(ex).toBeInstanceOf(FailToGetOpenIdToken);
}
await expect(async () =>
lastValueFrom(createLocalTransport$(localTransportOpts).active$),
).rejects.toThrow(expect.any(FailToGetOpenIdToken));
});
it("supports getting transport via well-known", async () => {
localTransportOpts.client.getDomain.mockReturnValue("example.org");
fetchMock.getOnce("https://example.org/.well-known/matrix/client", {
@@ -334,15 +429,19 @@ describe("LocalTransport", () => {
{ type: "livekit", livekit_service_url: "https://lk.example.org" },
],
});
const localTransport$ = createLocalTransport$(localTransportOpts);
const { advertised$, active$ } =
createLocalTransport$(localTransportOpts);
openIdResolver.resolve?.(openIdResponse);
expect(localTransport$.value).toBe(null);
expect(advertised$.value).toBe(null);
expect(active$.value).toBe(null);
await flushPromises();
expect(localTransport$.value).toStrictEqual({
transport: {
livekit_service_url: "https://lk.example.org",
type: "livekit",
},
const expectedTransport = {
livekit_service_url: "https://lk.example.org",
type: "livekit",
};
expect(advertised$.value).toStrictEqual(expectedTransport);
expect(active$.value).toStrictEqual({
transport: expectedTransport,
sfuConfig: {
jwt: "e30=.eyJzdWIiOiJAbWU6ZXhhbXBsZS5vcmc6QUJDREVGIiwidmlkZW8iOnsicm9vbSI6IiFleGFtcGxlX3Jvb21faWQifX0=.e30=",
livekitAlias: "Akph4alDMhen",
@@ -352,6 +451,7 @@ describe("LocalTransport", () => {
});
expect(fetchMock.done()).toEqual(true);
});
it("fails fast if the openId request fails for the well-known config", async () => {
localTransportOpts.client.getDomain.mockReturnValue("example.org");
fetchMock.getOnce("https://example.org/.well-known/matrix/client", {
@@ -362,20 +462,18 @@ describe("LocalTransport", () => {
openIdResolver.reject(
new FailToGetOpenIdToken(new Error("Test driven error")),
);
try {
await lastValueFrom(createLocalTransport$(localTransportOpts));
throw Error("Expected test to throw");
} catch (ex) {
expect(ex).toBeInstanceOf(FailToGetOpenIdToken);
}
await expect(async () =>
lastValueFrom(createLocalTransport$(localTransportOpts).active$),
).rejects.toThrow(expect.any(FailToGetOpenIdToken));
});
it("throws if no options are available", async () => {
const localTransport$ = createLocalTransport$({
const { advertised$, active$ } = createLocalTransport$({
scope,
ownMembershipIdentity: ownMemberMock,
roomId: "!example_room_id",
useOldestMember$: constant(false),
forceJwtEndpoint$: constant(JwtEndpointVersion.Legacy),
useOldestMember: false,
forceJwtEndpoint: JwtEndpointVersion.Legacy,
delayId$: constant(null),
memberships$: constant(new Epoch<CallMembership[]>([])),
client: {
@@ -390,7 +488,10 @@ describe("LocalTransport", () => {
});
await flushPromises();
expect(() => localTransport$.value).toThrow(
expect(() => advertised$.value).toThrow(
new MatrixRTCTransportMissingError(""),
);
expect(() => active$.value).toThrow(
new MatrixRTCTransportMissingError(""),
);
});

View File

@@ -13,12 +13,15 @@ import {
} from "matrix-js-sdk/lib/matrixrtc";
import { MatrixError, type MatrixClient } from "matrix-js-sdk";
import {
combineLatest,
distinctUntilChanged,
first,
from,
map,
merge,
of,
startWith,
switchMap,
tap,
} from "rxjs";
import { logger as rootLogger } from "matrix-js-sdk/lib/logger";
import { AutoDiscovery } from "matrix-js-sdk/lib/autodiscovery";
@@ -58,8 +61,8 @@ interface Props {
OpenIDClientParts;
// Used by the jwt service to create the livekit room and compute the livekit alias.
roomId: string;
useOldestMember$: Behavior<boolean>;
forceJwtEndpoint$: Behavior<JwtEndpointVersion>;
useOldestMember: boolean;
forceJwtEndpoint: JwtEndpointVersion;
delayId$: Behavior<string | null>;
}
@@ -93,23 +96,35 @@ export interface LocalTransportWithSFUConfig {
transport: LivekitTransportConfig;
sfuConfig: SFUConfig;
}
export function isLocalTransportWithSFUConfig(
obj: LivekitTransportConfig | LocalTransportWithSFUConfig,
): obj is LocalTransportWithSFUConfig {
return "transport" in obj && "sfuConfig" in obj;
}
interface LocalTransport {
/**
* The transport to be advertised in our MatrixRTC membership. `null` when not
* yet fetched/validated.
*/
advertised$: Behavior<LivekitTransportConfig | null>;
/**
* The transport to connect to and publish media on. `null` when not yet known
* or available.
*/
active$: Behavior<LocalTransportWithSFUConfig | null>;
}
/**
* This class is responsible for managing the local transport.
* "Which transport is the local member going to use"
* Connects to the JWT service and determines the transports that the local member should use.
*
* @prop useOldestMember Whether to use the same transport as the oldest member.
* This will only update once the first oldest member appears. Will not recompute if the oldest member leaves.
*
* @prop useOldJwtEndpoint$ Whether to set forceOldJwtEndpoint on the returned transport and to use the old JWT endpoint.
* @prop useOldJwtEndpoint Whether to set forceOldJwtEndpoint on the returned transport and to use the old JWT endpoint.
* This is used when the connection manager needs to know if it has to use the legacy endpoint which implies a string concatenated rtcBackendIdentity.
* (which is expected for non sticky event based rtc member events)
* @returns The local transport. It will be created using the correct sfu endpoint based on the useOldJwtEndpoint$ value.
* @returns The transport to advertise in the local MatrixRTC membership, along with the transport to actively publish media to.
* @throws MatrixRTCTransportMissingError | FailToGetOpenIdToken
*/
export const createLocalTransport$ = ({
@@ -118,114 +133,156 @@ export const createLocalTransport$ = ({
ownMembershipIdentity,
client,
roomId,
useOldestMember$,
forceJwtEndpoint$,
useOldestMember,
forceJwtEndpoint,
delayId$,
}: Props): Behavior<LocalTransportWithSFUConfig | null> => {
}: Props): LocalTransport => {
/**
* The transport over which we should be actively publishing our media.
* undefined when not joined.
* The LiveKit transport in use by the oldest RTC membership. `null` when the
* oldest member has no such transport.
*/
const oldestMemberTransport$ =
scope.behavior<LocalTransportWithSFUConfig | null>(
combineLatest([memberships$, useOldestMember$]).pipe(
map(([memberships, useOldestMember]) => {
if (!useOldestMember) return null; // No need to do any prefetching if not using oldest member
const oldestMember = memberships.value[0];
const transport = oldestMember?.getTransport(oldestMember);
if (!transport) return null;
return transport;
}),
switchMap((transport) => {
if (transport !== null && isLivekitTransportConfig(transport)) {
// Get the open jwt token to connect to the sfu
const computeLocalTransportWithSFUConfig =
async (): Promise<LocalTransportWithSFUConfig> => {
return {
transport,
sfuConfig: await getSFUConfigWithOpenID(
client,
ownMembershipIdentity,
transport.livekit_service_url,
roomId,
{ forceJwtEndpoint: JwtEndpointVersion.Legacy },
logger,
),
};
};
return from(computeLocalTransportWithSFUConfig());
}
return of(null);
}),
),
null,
);
const oldestMemberTransport$ = scope.behavior<LivekitTransportConfig | null>(
memberships$.pipe(
map((memberships) => {
const oldestMember = memberships.value[0];
if (oldestMember === undefined) {
logger.info("Oldest member: not found");
return null;
}
const transport = oldestMember.getTransport(oldestMember);
if (transport === undefined) {
logger.warn(
`Oldest member: ${oldestMember.userId}|${oldestMember.deviceId}|${oldestMember.memberId} has no transport`,
);
return null;
}
if (!isLivekitTransportConfig(transport)) {
logger.warn(
`Oldest member: ${oldestMember.userId}|${oldestMember.deviceId}|${oldestMember.memberId} has invalid transport`,
);
return null;
}
logger.info(
"Oldest member: ${oldestMember.userId}|${oldestMember.deviceId}|${oldestMember.memberId} has valid transport",
);
return transport;
}),
distinctUntilChanged(areLivekitTransportsEqual),
),
);
/**
* The transport that we would personally prefer to publish on (if not for the
* transport preferences of others, perhaps).
* transport preferences of others, perhaps). `null` until fetched and
* validated.
*
* @throws MatrixRTCTransportMissingError | FailToGetOpenIdToken
*/
const preferredTransport$ = scope.behavior(
// preferredTransport$ (used for multi sfu) needs to know if we are using the old or new
// jwt endpoint (`get_token` vs `sfu/get`) based on that the jwt endpoint will compute the rtcBackendIdentity
// differently. (sha(`${userId}|${deviceId}|${memberId}`) vs `${userId}|${deviceId}|${memberId}`)
// When using sticky events (we need to use the new endpoint).
combineLatest([customLivekitUrl.value$, delayId$, forceJwtEndpoint$]).pipe(
switchMap(([customUrl, delayId, forceEndpoint]) => {
logger.info(
"Creating preferred transport based on: ",
"customUrl: ",
customUrl,
"delayId: ",
delayId,
"forceEndpoint: ",
forceEndpoint,
);
return from(
makeTransport(
client,
ownMembershipIdentity,
roomId,
customUrl,
forceEndpoint,
delayId ?? undefined,
const preferredTransport$ =
scope.behavior<LocalTransportWithSFUConfig | null>(
// preferredTransport$ (used for multi sfu) needs to know if we are using the old or new
// jwt endpoint (`get_token` vs `sfu/get`) based on that the jwt endpoint will compute the rtcBackendIdentity
// differently. (sha(`${userId}|${deviceId}|${memberId}`) vs `${userId}|${deviceId}|${memberId}`)
// When using sticky events (we need to use the new endpoint).
customLivekitUrl.value$.pipe(
switchMap((customUrl) =>
startWith<LocalTransportWithSFUConfig | null>(null)(
// Fetch the SFU config, and repeat this asynchronously for every
// change in delay ID.
delayId$.pipe(
switchMap(async (delayId) => {
logger.info(
"Creating preferred transport based on: ",
"customUrl: ",
customUrl,
"delayId: ",
delayId,
"forceJwtEndpoint: ",
forceJwtEndpoint,
);
return makeTransport(
client,
ownMembershipIdentity,
roomId,
customUrl,
forceJwtEndpoint,
delayId ?? undefined,
);
}),
// We deliberately hide any changes to the SFU config because we
// do not actually want the app to reconnect whenever the JWT
// token changes due to us delegating a new delayed event. The
// initial SFU config for the transport is all the app needs.
distinctUntilChanged((prev, next) =>
areLivekitTransportsEqual(prev.transport, next.transport),
),
),
),
);
}),
),
null,
);
),
),
);
/**
* The chosen transport we should advertise in our MatrixRTC membership.
*/
return scope.behavior(
combineLatest([
useOldestMember$,
oldestMemberTransport$,
preferredTransport$,
]).pipe(
map(([useOldestMember, oldestMemberTransport, preferredTransport]) => {
return useOldestMember
? (oldestMemberTransport ?? preferredTransport)
: preferredTransport;
}),
distinctUntilChanged((t1, t2) => {
logger.info(
"Local Transport Update from:",
t1?.transport.livekit_service_url,
" to ",
t2?.transport.livekit_service_url,
);
return areLivekitTransportsEqual(
t1?.transport ?? null,
t2?.transport ?? null,
);
}),
if (useOldestMember) {
// --- Oldest member mode ---
return {
// Never update the transport that we advertise in our membership. Just
// take the first valid oldest member or preferred transport that we learn
// about, and stick with that. This avoids unnecessary SFU hops and room
// state changes.
advertised$: scope.behavior(
merge(
oldestMemberTransport$,
preferredTransport$.pipe(map((t) => t?.transport ?? null)),
).pipe(
first((t) => t !== null),
tap((t) =>
logger.info(`Advertise transport: ${t.livekit_service_url}`),
),
),
null,
),
// Publish on the transport used by the oldest member.
active$: scope.behavior(
oldestMemberTransport$.pipe(
switchMap((transport) => {
// Oldest member not available (or invalid SFU config).
if (transport === null) return of(null);
// Oldest member available: fetch the SFU config.
const fetchOldestMemberTransport =
async (): Promise<LocalTransportWithSFUConfig> => ({
transport,
sfuConfig: await getSFUConfigWithOpenID(
client,
ownMembershipIdentity,
transport.livekit_service_url,
roomId,
{ forceJwtEndpoint: JwtEndpointVersion.Legacy },
logger,
),
});
return from(fetchOldestMemberTransport()).pipe(startWith(null));
}),
tap((t) =>
logger.info(
`Publish on transport: ${t?.transport.livekit_service_url}`,
),
),
),
),
};
}
// --- Multi-SFU mode ---
// Always publish on and advertise the preferred transport.
return {
advertised$: scope.behavior(
preferredTransport$.pipe(
map((t) => t?.transport ?? null),
distinctUntilChanged(areLivekitTransportsEqual),
),
),
);
active$: preferredTransport$,
};
};
const FOCI_WK_KEY = "org.matrix.msc4143.rtc_foci";

View File

@@ -30,7 +30,7 @@ import {
trackProcessorSync,
} from "../../../livekit/TrackProcessorContext.tsx";
import { getUrlParams } from "../../../UrlParams.ts";
import { observeTrackReference$ } from "../../MediaViewModel.ts";
import { observeTrackReference$ } from "../../observeTrackReference";
import { type Connection } from "../remoteMembers/Connection.ts";
import { ObservableScope } from "../../ObservableScope.ts";

View File

@@ -90,7 +90,7 @@ export interface IConnectionManager {
* @param props - Configuration object
* @param props.scope - The observable scope used by this object
* @param props.connectionFactory - Used to create new connections
* @param props.localTransport$ - The local transport to use. (deduplicated with remoteTransports$)
* @param props.localTransport$ - The transport to publish local media on. (deduplicated with remoteTransports$)
* @param props.remoteTransports$ - All other transports. The connection manager will create connections for each transport. (deduplicated with localTransport$)
* @param props.ownMembershipIdentity - The own membership identity to use.
* @param props.logger - The logger to use.
@@ -162,22 +162,23 @@ export function createConnectionManager$({
const connections$ = scope.behavior(
localAndRemoteTransports$.pipe(
generateItemsWithEpoch(
"ConnectionManager connections$",
function* (transports) {
for (const transportWithOrWithoutSfuConfig of transports) {
if (
isLocalTransportWithSFUConfig(transportWithOrWithoutSfuConfig)
) {
// This is the local transport only the `LocalTransportWithSFUConfig` has a `sfuConfig` field
const { transport, sfuConfig } = transportWithOrWithoutSfuConfig;
for (const transport of transports) {
if (isLocalTransportWithSFUConfig(transport)) {
// This is the local transport; only the `LocalTransportWithSFUConfig` has a `sfuConfig` field.
yield {
keys: [transport.livekit_service_url, sfuConfig],
keys: [
transport.transport.livekit_service_url,
transport.sfuConfig,
],
data: undefined,
};
} else {
yield {
keys: [
transportWithOrWithoutSfuConfig.livekit_service_url,
undefined as undefined | SFUConfig,
transport.livekit_service_url,
undefined as SFUConfig | undefined,
],
data: undefined,
};
@@ -193,6 +194,8 @@ export function createConnectionManager$({
},
ownMembershipIdentity,
logger,
// TODO: This whole optional SFUConfig parameter is not particularly elegant.
// I would like it if connections always fetched the SFUConfig by themselves.
sfuConfig,
);
// Start the connection immediately

View File

@@ -11,7 +11,6 @@ import {
type LivekitTransportConfig,
} from "matrix-js-sdk/lib/matrixrtc";
import { combineLatest, filter, map } from "rxjs";
import { logger as rootLogger } from "matrix-js-sdk/lib/logger";
import { type Behavior } from "../../Behavior";
import { type IConnectionManager } from "./ConnectionManager";
@@ -19,8 +18,6 @@ import { Epoch, type ObservableScope } from "../../ObservableScope";
import { type Connection } from "./Connection";
import { generateItemsWithEpoch } from "../../../utils/observable";
const logger = rootLogger.getChild("[MatrixLivekitMembers]");
interface LocalTaggedParticipant {
type: "local";
value$: Behavior<LocalParticipant | null>;
@@ -94,9 +91,10 @@ export function createMatrixLivekitMembers$({
),
map(([ms, data]) => new Epoch([ms.value, data.value] as const, ms.epoch)),
generateItemsWithEpoch(
"MatrixLivekitMembers",
// Generator function.
// creates an array of `{key, data}[]`
// Each change in the keys (new key, missing key) will result in a call to the factory function.
// Each change in the keys (new key) will result in a call to the factory function.
function* ([membershipsWithTransport, managerData]) {
for (const { membership, transport } of membershipsWithTransport) {
const participants = transport
@@ -111,26 +109,23 @@ export function createMatrixLivekitMembers$({
: null;
yield {
// This could also just be the memberId without the other fields.
// In theory we should never have the same memberId for different userIds (they are UUIDs)
// This still makes us resilient agains someone who intentionally tries to use the same memberId.
// If they want to do this they would now need to also use the same sender which is impossible.
// This could just be the backend identity without the other keys.
// The user ID, device ID, and member ID are included however so
// they show up in debug logs.
keys: [
membership.userId,
membership.deviceId,
membership.memberId,
membership.rtcBackendIdentity,
],
data: { membership, participant, connection },
};
}
},
// Each update where the key of the generator array do not change will result in updates to the `data$` observable in the factory.
(scope, data$, userId, deviceId, memberId) => {
logger.debug(
`Generating member for livekitIdentity: ${data$.value.membership.rtcBackendIdentity},keys userId:deviceId:memberId ${userId}:${deviceId}:${memberId}`,
);
// Each update where the key of the generator array do not change will result in updates to the `data$` behavior.
(scope, data$, userId, _deviceId, _memberId, _rtcBackendIdentity) => {
const { participant$, ...rest } = scope.splitBehavior(data$);
// will only get called once per `participantId, userId` pair.
// will only get called once per backend identity.
// updates to data$ and as a result to displayName$ and mxcAvatarUrl$ are more frequent.
return {
userId,

View File

@@ -54,31 +54,6 @@ export function createRoomMembers$(
);
}
/**
* creates the member that this DM is with in case it is a DM (two members) otherwise null
*/
export function createDMMember$(
scope: ObservableScope,
roomMembers$: Behavior<RoomMemberMap>,
matrixRoom: MatrixRoom,
): Behavior<Pick<
RoomMember,
"userId" | "getMxcAvatarUrl" | "rawDisplayName"
> | null> {
// We cannot use the normal direct check from matrix since we do not have access to the account data.
// use primitive member count === 2 check instead.
return scope.behavior(
roomMembers$.pipe(
map((membersMap) => {
// primitive appraoch do to no access to account data.
const isDM = membersMap.size === 2;
if (!isDM) return null;
return matrixRoom.getMember(matrixRoom.guessDMUserId());
}),
),
);
}
/**
* Displayname for each member of the call. This will disambiguate
* any displayname that clashes with another member. Only members

View File

@@ -0,0 +1,132 @@
/*
Copyright 2026 Element Corp.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { logger as rootLogger } from "matrix-js-sdk/lib/logger";
import { combineLatest, merge, startWith, Subject, tap } from "rxjs";
import {
availableOutputDevices$ as controlledAvailableOutputDevices$,
outputDevice$ as controlledOutputSelection$,
} from "../controls.ts";
import type { Behavior } from "./Behavior.ts";
import type { ObservableScope } from "./ObservableScope.ts";
import {
type AudioOutputDeviceLabel,
availableRawDevices$,
iosDeviceMenu$,
type MediaDevice,
type SelectedAudioOutputDevice,
} from "./MediaDevices.ts";
// This hardcoded id is used in EX ios! It can only be changed in coordination with
// the ios swift team.
const EARPIECE_CONFIG_ID = "earpiece-id";
/**
* A special implementation of audio output that allows the hosting application
* to have more control over the device selection process. This is used when the
* `controlledAudioDevices` URL parameter is set, which is currently only true on mobile.
*/
export class IOSControlledAudioOutput implements MediaDevice<
AudioOutputDeviceLabel,
SelectedAudioOutputDevice
> {
private logger = rootLogger.getChild("[MediaDevices ControlledAudioOutput]");
// We need to subscribe to the raw devices so that the OS does update the input
// back to what it was before. otherwise we will switch back to the default
// whenever we allocate a new stream.
public readonly availableRaw$ = availableRawDevices$(
"audiooutput",
this.usingNames$,
this.scope,
this.logger,
);
public readonly available$ = this.scope.behavior(
combineLatest(
[controlledAvailableOutputDevices$.pipe(startWith([])), iosDeviceMenu$],
(availableRaw, iosDeviceMenu) => {
const available = new Map<string, AudioOutputDeviceLabel>(
availableRaw.map(
({ id, name, isEarpiece, isSpeaker /*,isExternalHeadset*/ }) => {
let deviceLabel: AudioOutputDeviceLabel;
// if (isExternalHeadset) // Do we want this?
if (isEarpiece) deviceLabel = { type: "earpiece" };
else if (isSpeaker) deviceLabel = { type: "speaker" };
else deviceLabel = { type: "name", name };
return [id, deviceLabel];
},
),
);
// Create a virtual earpiece device in case a non-earpiece device is
// designated for this purpose
if (iosDeviceMenu && availableRaw.some((d) => d.forEarpiece)) {
this.logger.info(
`IOS Add virtual earpiece device with id ${EARPIECE_CONFIG_ID}`,
);
available.set(EARPIECE_CONFIG_ID, { type: "earpiece" });
}
return available;
},
),
);
private readonly deviceSelection$ = new Subject<string>();
public select(id: string): void {
this.logger.info(`select device: ${id}`);
this.deviceSelection$.next(id);
}
public readonly selected$ = this.scope.behavior(
combineLatest(
[
this.available$,
merge(
controlledOutputSelection$.pipe(startWith(undefined)),
this.deviceSelection$,
),
],
(available, preferredId) => {
const id = preferredId ?? available.keys().next().value;
return id === undefined
? undefined
: { id, virtualEarpiece: id === EARPIECE_CONFIG_ID };
},
).pipe(
tap((selected) => {
this.logger.debug(`selected device: ${selected?.id}`);
}),
),
);
public constructor(
private readonly usingNames$: Behavior<boolean>,
private readonly scope: ObservableScope,
) {
this.selected$.subscribe((device) => {
// Let the hosting application know which output device has been selected.
// This information is probably only of interest if the earpiece mode has
// been selected - for example, Element X iOS listens to this to determine
// whether it should enable the proximity sensor.
if (device !== undefined) {
this.logger.info("onAudioDeviceSelect called:", device);
window.controls.onAudioDeviceSelect?.(device.id);
// Also invoke the deprecated callback for backward compatibility
window.controls.onOutputDeviceSelect?.(device.id);
}
});
this.available$.subscribe((available) => {
this.logger.debug("available devices:", available);
});
this.availableRaw$.subscribe((availableRaw) => {
this.logger.debug("available raw devices:", availableRaw);
});
}
}

View File

@@ -9,35 +9,28 @@ import {
combineLatest,
filter,
map,
merge,
type Observable,
pairwise,
startWith,
Subject,
switchMap,
type Observable,
} from "rxjs";
import { createMediaDeviceObserver } from "@livekit/components-core";
import { type Logger, logger as rootLogger } from "matrix-js-sdk/lib/logger";
import {
alwaysShowIphoneEarpiece as alwaysShowIphoneEarpieceSetting,
audioInput as audioInputSetting,
audioOutput as audioOutputSetting,
videoInput as videoInputSetting,
alwaysShowIphoneEarpiece as alwaysShowIphoneEarpieceSetting,
} from "../settings/settings";
import { type ObservableScope } from "./ObservableScope";
import {
outputDevice$ as controlledOutputSelection$,
availableOutputDevices$ as controlledAvailableOutputDevices$,
} from "../controls";
import { availableOutputDevices$ as controlledAvailableOutputDevices$ } from "../controls";
import { getUrlParams } from "../UrlParams";
import { platform } from "../Platform";
import { switchWhen } from "../utils/observable";
import { type Behavior, constant } from "./Behavior";
// This hardcoded id is used in EX ios! It can only be changed in coordination with
// the ios swift team.
const EARPIECE_CONFIG_ID = "earpiece-id";
import { AndroidControlledAudioOutput } from "./AndroidControlledAudioOutput.ts";
import { IOSControlledAudioOutput } from "./IOSControlledAudioOutput.ts";
export type DeviceLabel =
| { type: "name"; name: string }
@@ -49,10 +42,18 @@ export type AudioOutputDeviceLabel =
| { type: "earpiece" }
| { type: "default"; name: string | null };
/**
* Base selected-device value shared by all media kinds.
*
* `id` is the effective device identifier used by browser media APIs.
*/
export interface SelectedDevice {
id: string;
}
/**
* Selected audio input value with audio-input-specific metadata.
*/
export interface SelectedAudioInputDevice extends SelectedDevice {
/**
* Emits whenever we think that this audio input device has logically changed
@@ -61,6 +62,9 @@ export interface SelectedAudioInputDevice extends SelectedDevice {
hardwareDeviceChange$: Observable<void>;
}
/**
* Selected audio output value with output-routing-specific metadata.
*/
export interface SelectedAudioOutputDevice extends SelectedDevice {
/**
* Whether this device is a "virtual earpiece" device. If so, we should output
@@ -69,23 +73,42 @@ export interface SelectedAudioOutputDevice extends SelectedDevice {
virtualEarpiece: boolean;
}
/**
* Common reactive contract for selectable input/output media devices (mic, speaker, camera).
*
* `Label` is the type used to represent a device in UI lists.
* `Selected` is the type used to represent the active selection for a device kind.
*/
export interface MediaDevice<Label, Selected> {
/**
* A map from available device IDs to labels.
* Reactive map of currently available devices keyed by device ID.
*
* `Label` defines the UI-facing label data structure for each device type.
*/
available$: Behavior<Map<string, Label>>;
/**
* The selected device.
* The active device selection.
* Can be `undefined` when no device is yet selected.
*
* When defined, `Selected` contains the selected device ID plus any
* type-specific metadata.
*/
selected$: Behavior<Selected | undefined>;
/**
* Selects a new device.
* Requests selection of a device by ID.
*
* Implementations typically persist this preference and let `selected$`
* converge to the effective device (which may differ if the requested ID is
* unavailable).
*/
select(id: string): void;
}
/**
* An observable that represents if we should display the devices menu for iOS.
*
* This implies the following
* - hide any input devices (they do not work anyhow on ios)
* - Show a button to show the native output picker instead.
@@ -95,7 +118,7 @@ export interface MediaDevice<Label, Selected> {
export const iosDeviceMenu$ =
platform === "ios" ? constant(true) : alwaysShowIphoneEarpieceSetting.value$;
function availableRawDevices$(
export function availableRawDevices$(
kind: MediaDeviceKind,
usingNames$: Behavior<boolean>,
scope: ObservableScope,
@@ -146,16 +169,23 @@ function selectDevice$<Label>(
): Observable<string | undefined> {
return combineLatest([available$, preferredId$], (available, preferredId) => {
if (available.size) {
// If the preferred device is available, use it. Or if every available
// device ID is falsy, the browser is probably just being paranoid about
// fingerprinting and we should still try using the preferred device.
// Worst case it is not available and the browser will gracefully fall
// back to some other device for us when requesting the media stream.
// Otherwise, select the first available device.
return (preferredId !== undefined && available.has(preferredId)) ||
(available.size === 1 && available.has(""))
? preferredId
: available.keys().next().value;
if (preferredId !== undefined && available.has(preferredId)) {
// If the preferred device is available, use it.
return preferredId;
} else if (available.size === 1 && available.has("")) {
// In some cases the enumerateDevices will list the devices with empty string details:
// `{deviceId:'', kind:'audiooutput|audioinput|videoinput', label:'', groupId:''}`
// This can happen when:
// 1. The user has not yet granted permissions to microphone/devices
// 2. The page is not running in a secure context (e.g. localhost or https)
// 3. In embedded WebViews, restrictions are often tighter, need active capture..
// 3. The browser is blocking access to device details for privacy reasons (?)
// This is most likely transitional, so keep the current device selected until we get a more accurate enumerateDevices.
return preferredId;
} else {
// No preferred, so pick a default.
return available.keys().next().value;
}
}
return undefined;
});
@@ -212,7 +242,7 @@ class AudioInput implements MediaDevice<DeviceLabel, SelectedAudioInputDevice> {
}
}
class AudioOutput implements MediaDevice<
export class AudioOutput implements MediaDevice<
AudioOutputDeviceLabel,
SelectedAudioOutputDevice
> {
@@ -251,14 +281,16 @@ class AudioOutput implements MediaDevice<
public readonly selected$ = this.scope.behavior(
selectDevice$(this.available$, audioOutputSetting.value$).pipe(
map((id) =>
id === undefined
? undefined
: {
id,
virtualEarpiece: false,
},
),
map((id) => {
if (id === undefined) {
return undefined;
} else {
return {
id,
virtualEarpiece: false,
};
}
}),
),
);
public select(id: string): void {
@@ -275,103 +307,6 @@ class AudioOutput implements MediaDevice<
}
}
class ControlledAudioOutput implements MediaDevice<
AudioOutputDeviceLabel,
SelectedAudioOutputDevice
> {
private logger = rootLogger.getChild("[MediaDevices ControlledAudioOutput]");
// We need to subscribe to the raw devices so that the OS does update the input
// back to what it was before. otherwise we will switch back to the default
// whenever we allocate a new stream.
public readonly availableRaw$ = availableRawDevices$(
"audiooutput",
this.usingNames$,
this.scope,
this.logger,
);
public readonly available$ = this.scope.behavior(
combineLatest(
[controlledAvailableOutputDevices$.pipe(startWith([])), iosDeviceMenu$],
(availableRaw, iosDeviceMenu) => {
const available = new Map<string, AudioOutputDeviceLabel>(
availableRaw.map(
({ id, name, isEarpiece, isSpeaker /*,isExternalHeadset*/ }) => {
let deviceLabel: AudioOutputDeviceLabel;
// if (isExternalHeadset) // Do we want this?
if (isEarpiece) deviceLabel = { type: "earpiece" };
else if (isSpeaker) deviceLabel = { type: "speaker" };
else deviceLabel = { type: "name", name };
return [id, deviceLabel];
},
),
);
// Create a virtual earpiece device in case a non-earpiece device is
// designated for this purpose
if (iosDeviceMenu && availableRaw.some((d) => d.forEarpiece))
available.set(EARPIECE_CONFIG_ID, { type: "earpiece" });
return available;
},
),
);
private readonly deviceSelection$ = new Subject<string>();
public select(id: string): void {
this.deviceSelection$.next(id);
}
public readonly selected$ = this.scope.behavior(
combineLatest(
[
this.available$,
merge(
controlledOutputSelection$.pipe(startWith(undefined)),
this.deviceSelection$,
),
],
(available, preferredId) => {
const id = preferredId ?? available.keys().next().value;
return id === undefined
? undefined
: { id, virtualEarpiece: id === EARPIECE_CONFIG_ID };
},
),
);
public constructor(
private readonly usingNames$: Behavior<boolean>,
private readonly scope: ObservableScope,
) {
this.selected$.subscribe((device) => {
// Let the hosting application know which output device has been selected.
// This information is probably only of interest if the earpiece mode has
// been selected - for example, Element X iOS listens to this to determine
// whether it should enable the proximity sensor.
if (device !== undefined) {
this.logger.info(
"[controlled-output] onAudioDeviceSelect called:",
device,
);
window.controls.onAudioDeviceSelect?.(device.id);
// Also invoke the deprecated callback for backward compatibility
window.controls.onOutputDeviceSelect?.(device.id);
}
});
this.available$.subscribe((available) => {
this.logger.info("[controlled-output] available devices:", available);
});
this.availableRaw$.subscribe((availableRaw) => {
this.logger.info(
"[controlled-output] available raw devices:",
availableRaw,
);
});
}
}
class VideoInput implements MediaDevice<DeviceLabel, SelectedDevice> {
private logger = rootLogger.getChild("[MediaDevices VideoInput]");
@@ -434,7 +369,14 @@ export class MediaDevices {
AudioOutputDeviceLabel,
SelectedAudioOutputDevice
> = getUrlParams().controlledAudioDevices
? new ControlledAudioOutput(this.usingNames$, this.scope)
? platform == "android"
? new AndroidControlledAudioOutput(
controlledAvailableOutputDevices$,
this.scope,
getUrlParams().callIntent,
window.controls,
)
: new IOSControlledAudioOutput(this.usingNames$, this.scope)
: new AudioOutput(this.usingNames$, this.scope);
public readonly videoInput: MediaDevice<DeviceLabel, SelectedDevice> =

View File

@@ -1,807 +0,0 @@
/*
Copyright 2023, 2024 New Vector Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import {
type AudioSource,
type VideoSource,
type TrackReference,
observeParticipantEvents,
observeParticipantMedia,
roomEventSelector,
} from "@livekit/components-core";
import {
type LocalParticipant,
LocalTrack,
LocalVideoTrack,
type Participant,
ParticipantEvent,
type RemoteParticipant,
Track,
TrackEvent,
facingModeFromLocalTrack,
type Room as LivekitRoom,
RoomEvent as LivekitRoomEvent,
RemoteTrack,
} from "livekit-client";
import { logger } from "matrix-js-sdk/lib/logger";
import {
BehaviorSubject,
type Observable,
Subject,
combineLatest,
filter,
fromEvent,
interval,
map,
merge,
of,
startWith,
switchMap,
throttleTime,
distinctUntilChanged,
} from "rxjs";
import { alwaysShowSelf } from "../settings/settings";
import { showConnectionStats } from "../settings/settings";
import { accumulate } from "../utils/observable";
import { type EncryptionSystem } from "../e2ee/sharedKeyManagement";
import { E2eeType } from "../e2ee/e2eeType";
import { type ReactionOption } from "../reactions";
import { platform } from "../Platform";
import { type MediaDevices } from "./MediaDevices";
import { type Behavior } from "./Behavior";
import { type ObservableScope } from "./ObservableScope";
export function observeTrackReference$(
participant: Participant,
source: Track.Source,
): Observable<TrackReference | undefined> {
return observeParticipantMedia(participant).pipe(
map(() => participant.getTrackPublication(source)),
distinctUntilChanged(),
map((publication) => publication && { participant, publication, source }),
);
}
export function observeRtpStreamStats$(
participant: Participant,
source: Track.Source,
type: "inbound-rtp" | "outbound-rtp",
): Observable<
RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats | undefined
> {
return combineLatest([
observeTrackReference$(participant, source),
interval(1000).pipe(startWith(0)),
]).pipe(
switchMap(async ([trackReference]) => {
const track = trackReference?.publication?.track;
if (
!track ||
!(track instanceof RemoteTrack || track instanceof LocalTrack)
) {
return undefined;
}
const report = await track.getRTCStatsReport();
if (!report) {
return undefined;
}
for (const v of report.values()) {
if (v.type === type) {
return v;
}
}
return undefined;
}),
startWith(undefined),
);
}
export function observeInboundRtpStreamStats$(
participant: Participant,
source: Track.Source,
): Observable<RTCInboundRtpStreamStats | undefined> {
return observeRtpStreamStats$(participant, source, "inbound-rtp").pipe(
map((x) => x as RTCInboundRtpStreamStats | undefined),
);
}
export function observeOutboundRtpStreamStats$(
participant: Participant,
source: Track.Source,
): Observable<RTCOutboundRtpStreamStats | undefined> {
return observeRtpStreamStats$(participant, source, "outbound-rtp").pipe(
map((x) => x as RTCOutboundRtpStreamStats | undefined),
);
}
function observeRemoteTrackReceivingOkay$(
participant: Participant,
source: Track.Source,
): Observable<boolean | undefined> {
let lastStats: {
framesDecoded: number | undefined;
framesDropped: number | undefined;
framesReceived: number | undefined;
} = {
framesDecoded: undefined,
framesDropped: undefined,
framesReceived: undefined,
};
return observeInboundRtpStreamStats$(participant, source).pipe(
map((stats) => {
if (!stats) return undefined;
const { framesDecoded, framesDropped, framesReceived } = stats;
return {
framesDecoded,
framesDropped,
framesReceived,
};
}),
filter((newStats) => !!newStats),
map((newStats): boolean | undefined => {
const oldStats = lastStats;
lastStats = newStats;
if (
typeof newStats.framesReceived === "number" &&
typeof oldStats.framesReceived === "number" &&
typeof newStats.framesDecoded === "number" &&
typeof oldStats.framesDecoded === "number"
) {
const framesReceivedDelta =
newStats.framesReceived - oldStats.framesReceived;
const framesDecodedDelta =
newStats.framesDecoded - oldStats.framesDecoded;
// if we received >0 frames and managed to decode >0 frames then we treat that as success
if (framesReceivedDelta > 0) {
return framesDecodedDelta > 0;
}
}
// no change
return undefined;
}),
filter((x) => typeof x === "boolean"),
startWith(undefined),
);
}
function encryptionErrorObservable$(
room$: Behavior<LivekitRoom | undefined>,
participant: Participant,
encryptionSystem: EncryptionSystem,
criteria: string,
): Observable<boolean> {
return room$.pipe(
switchMap((room) => {
if (room === undefined) return of(false);
return roomEventSelector(room, LivekitRoomEvent.EncryptionError).pipe(
map((e) => {
const [err] = e;
if (encryptionSystem.kind === E2eeType.PER_PARTICIPANT) {
return (
// Ideally we would pull the participant identity from the field on the error.
// However, it gets lost in the serialization process between workers.
// So, instead we do a string match
(err?.message.includes(participant.identity) &&
err?.message.includes(criteria)) ??
false
);
} else if (encryptionSystem.kind === E2eeType.SHARED_KEY) {
return !!err?.message.includes(criteria);
}
return false;
}),
);
}),
distinctUntilChanged(),
throttleTime(1000), // Throttle to avoid spamming the UI
startWith(false),
);
}
export enum EncryptionStatus {
Connecting,
Okay,
KeyMissing,
KeyInvalid,
PasswordInvalid,
}
abstract class BaseMediaViewModel {
/**
* The LiveKit video track for this media.
*/
public readonly video$: Behavior<TrackReference | undefined>;
/**
* Whether there should be a warning that this media is unencrypted.
*/
public readonly unencryptedWarning$: Behavior<boolean>;
public readonly encryptionStatus$: Behavior<EncryptionStatus>;
/**
* Whether this media corresponds to the local participant.
*/
public abstract readonly local: boolean;
private observeTrackReference$(
source: Track.Source,
): Behavior<TrackReference | undefined> {
return this.scope.behavior(
this.participant$.pipe(
switchMap((p) =>
!p ? of(undefined) : observeTrackReference$(p, source),
),
),
);
}
public constructor(
protected readonly scope: ObservableScope,
/**
* An opaque identifier for this media.
*/
public readonly id: string,
/**
* The Matrix user to which this media belongs.
*/
public readonly userId: string,
public readonly rtcBackendIdentity: string,
// We don't necessarily have a participant if a user connects via MatrixRTC but not (yet) through
// livekit.
protected readonly participant$: Observable<
LocalParticipant | RemoteParticipant | null
>,
encryptionSystem: EncryptionSystem,
audioSource: AudioSource,
videoSource: VideoSource,
protected readonly livekitRoom$: Behavior<LivekitRoom | undefined>,
public readonly focusUrl$: Behavior<string | undefined>,
public readonly displayName$: Behavior<string>,
public readonly mxcAvatarUrl$: Behavior<string | undefined>,
) {
const audio$ = this.observeTrackReference$(audioSource);
this.video$ = this.observeTrackReference$(videoSource);
this.unencryptedWarning$ = this.scope.behavior(
combineLatest(
[audio$, this.video$],
(a, v) =>
encryptionSystem.kind !== E2eeType.NONE &&
(a?.publication.isEncrypted === false ||
v?.publication.isEncrypted === false),
),
);
this.encryptionStatus$ = this.scope.behavior(
this.participant$.pipe(
switchMap((participant): Observable<EncryptionStatus> => {
if (!participant) {
return of(EncryptionStatus.Connecting);
} else if (
participant.isLocal ||
encryptionSystem.kind === E2eeType.NONE
) {
return of(EncryptionStatus.Okay);
} else if (encryptionSystem.kind === E2eeType.PER_PARTICIPANT) {
return combineLatest([
encryptionErrorObservable$(
livekitRoom$,
participant,
encryptionSystem,
"MissingKey",
),
encryptionErrorObservable$(
livekitRoom$,
participant,
encryptionSystem,
"InvalidKey",
),
observeRemoteTrackReceivingOkay$(participant, audioSource),
observeRemoteTrackReceivingOkay$(participant, videoSource),
]).pipe(
map(([keyMissing, keyInvalid, audioOkay, videoOkay]) => {
if (keyMissing) return EncryptionStatus.KeyMissing;
if (keyInvalid) return EncryptionStatus.KeyInvalid;
if (audioOkay || videoOkay) return EncryptionStatus.Okay;
return undefined; // no change
}),
filter((x) => !!x),
startWith(EncryptionStatus.Connecting),
);
} else {
return combineLatest([
encryptionErrorObservable$(
livekitRoom$,
participant,
encryptionSystem,
"InvalidKey",
),
observeRemoteTrackReceivingOkay$(participant, audioSource),
observeRemoteTrackReceivingOkay$(participant, videoSource),
]).pipe(
map(
([keyInvalid, audioOkay, videoOkay]):
| EncryptionStatus
| undefined => {
if (keyInvalid) return EncryptionStatus.PasswordInvalid;
if (audioOkay || videoOkay) return EncryptionStatus.Okay;
return undefined; // no change
},
),
filter((x) => !!x),
startWith(EncryptionStatus.Connecting),
);
}
}),
),
);
}
}
/**
* Some participant's media.
*/
export type MediaViewModel = UserMediaViewModel | ScreenShareViewModel;
export type UserMediaViewModel =
| LocalUserMediaViewModel
| RemoteUserMediaViewModel;
/**
* Some participant's user media.
*/
abstract class BaseUserMediaViewModel extends BaseMediaViewModel {
private readonly _speaking$ = this.scope.behavior(
this.participant$.pipe(
switchMap((p) =>
p
? observeParticipantEvents(
p,
ParticipantEvent.IsSpeakingChanged,
).pipe(map((p) => p.isSpeaking))
: of(false),
),
),
);
/**
* Whether the participant is speaking.
*/
// Getter backed by a private field so that subclasses can override it
public get speaking$(): Behavior<boolean> {
return this._speaking$;
}
/**
* Whether this participant is sending audio (i.e. is unmuted on their side).
*/
public readonly audioEnabled$: Behavior<boolean>;
private readonly _videoEnabled$: Behavior<boolean>;
/**
* Whether this participant is sending video.
*/
// Getter backed by a private field so that subclasses can override it
public get videoEnabled$(): Behavior<boolean> {
return this._videoEnabled$;
}
private readonly _cropVideo$ = new BehaviorSubject(true);
/**
* Whether the tile video should be contained inside the tile or be cropped to fit.
*/
public readonly cropVideo$: Behavior<boolean> = this._cropVideo$;
public constructor(
scope: ObservableScope,
id: string,
userId: string,
rtcBackendIdentity: string,
participant$: Observable<LocalParticipant | RemoteParticipant | null>,
encryptionSystem: EncryptionSystem,
livekitRoom$: Behavior<LivekitRoom | undefined>,
focusUrl$: Behavior<string | undefined>,
displayName$: Behavior<string>,
mxcAvatarUrl$: Behavior<string | undefined>,
public readonly handRaised$: Behavior<Date | null>,
public readonly reaction$: Behavior<ReactionOption | null>,
) {
super(
scope,
id,
userId,
rtcBackendIdentity,
participant$,
encryptionSystem,
Track.Source.Microphone,
Track.Source.Camera,
livekitRoom$,
focusUrl$,
displayName$,
mxcAvatarUrl$,
);
const media$ = this.scope.behavior(
participant$.pipe(
switchMap((p) => (p && observeParticipantMedia(p)) ?? of(undefined)),
),
);
this.audioEnabled$ = this.scope.behavior(
media$.pipe(map((m) => m?.microphoneTrack?.isMuted === false)),
);
this._videoEnabled$ = this.scope.behavior(
media$.pipe(map((m) => m?.cameraTrack?.isMuted === false)),
);
}
public toggleFitContain(): void {
this._cropVideo$.next(!this._cropVideo$.value);
}
public get local(): boolean {
return this instanceof LocalUserMediaViewModel;
}
public abstract get audioStreamStats$(): Observable<
RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats | undefined
>;
public abstract get videoStreamStats$(): Observable<
RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats | undefined
>;
}
/**
* The local participant's user media.
*/
export class LocalUserMediaViewModel extends BaseUserMediaViewModel {
/**
* The local video track as an observable that emits whenever the track
* changes, the camera is switched, or the track is muted.
*/
private readonly videoTrack$: Observable<LocalVideoTrack | null> =
this.video$.pipe(
switchMap((v) => {
const track = v?.publication.track;
if (!(track instanceof LocalVideoTrack)) return of(null);
return merge(
// Watch for track restarts because they indicate a camera switch.
// This event is also emitted when unmuting the track object.
fromEvent(track, TrackEvent.Restarted).pipe(
startWith(null),
map(() => track),
),
// When the track object is muted, reset it to null.
fromEvent(track, TrackEvent.Muted).pipe(map(() => null)),
);
}),
);
/**
* Whether the video should be mirrored.
*/
public readonly mirror$ = this.scope.behavior(
this.videoTrack$.pipe(
// Mirror only front-facing cameras (those that face the user)
map(
(track) =>
track !== null &&
facingModeFromLocalTrack(track).facingMode === "user",
),
),
);
/**
* Whether to show this tile in a highly visible location near the start of
* the grid.
*/
public readonly alwaysShow$ = alwaysShowSelf.value$;
public readonly setAlwaysShow = alwaysShowSelf.setValue;
/**
* Callback for switching between the front and back cameras.
*/
public readonly switchCamera$: Behavior<(() => void) | null> =
this.scope.behavior(
platform === "desktop"
? of(null)
: this.videoTrack$.pipe(
map((track) => {
if (track === null) return null;
const facingMode = facingModeFromLocalTrack(track).facingMode;
// If the camera isn't front or back-facing, don't provide a switch
// camera shortcut at all
if (facingMode !== "user" && facingMode !== "environment")
return null;
// Restart the track with a camera facing the opposite direction
return (): void =>
void track
.restartTrack({
facingMode: facingMode === "user" ? "environment" : "user",
})
.then(() => {
// Inform the MediaDevices which camera was chosen
const deviceId =
track.mediaStreamTrack.getSettings().deviceId;
if (deviceId !== undefined)
this.mediaDevices.videoInput.select(deviceId);
})
.catch((e) =>
logger.error("Failed to switch camera", facingMode, e),
);
}),
),
);
public constructor(
scope: ObservableScope,
id: string,
userId: string,
rtcBackendIdentity: string,
participant$: Behavior<LocalParticipant | null>,
encryptionSystem: EncryptionSystem,
livekitRoom$: Behavior<LivekitRoom | undefined>,
focusUrl$: Behavior<string | undefined>,
private readonly mediaDevices: MediaDevices,
displayName$: Behavior<string>,
mxcAvatarUrl$: Behavior<string | undefined>,
handRaised$: Behavior<Date | null>,
reaction$: Behavior<ReactionOption | null>,
) {
super(
scope,
id,
userId,
rtcBackendIdentity,
participant$,
encryptionSystem,
livekitRoom$,
focusUrl$,
displayName$,
mxcAvatarUrl$,
handRaised$,
reaction$,
);
}
public audioStreamStats$ = combineLatest([
this.participant$,
showConnectionStats.value$,
]).pipe(
switchMap(([p, showConnectionStats]) => {
if (!p || !showConnectionStats) return of(undefined);
return observeOutboundRtpStreamStats$(p, Track.Source.Microphone);
}),
);
public videoStreamStats$ = combineLatest([
this.participant$,
showConnectionStats.value$,
]).pipe(
switchMap(([p, showConnectionStats]) => {
if (!p || !showConnectionStats) return of(undefined);
return observeOutboundRtpStreamStats$(p, Track.Source.Camera);
}),
);
}
/**
* A remote participant's user media.
*/
export class RemoteUserMediaViewModel extends BaseUserMediaViewModel {
/**
* Whether we are waiting for this user's LiveKit participant to exist. This
* could be because either we or the remote party are still connecting.
*/
public readonly waitingForMedia$ = this.scope.behavior<boolean>(
combineLatest(
[this.livekitRoom$, this.participant$],
(livekitRoom, participant) =>
// If livekitRoom is undefined, the user is not attempting to publish on
// any transport and so we shouldn't expect a participant. (They might
// be a subscribe-only bot for example.)
livekitRoom !== undefined && participant === null,
),
);
// This private field is used to override the value from the superclass
private __speaking$: Behavior<boolean>;
public get speaking$(): Behavior<boolean> {
return this.__speaking$;
}
private readonly locallyMutedToggle$ = new Subject<void>();
private readonly localVolumeAdjustment$ = new Subject<number>();
private readonly localVolumeCommit$ = new Subject<void>();
/**
* The volume to which this participant's audio is set, as a scalar
* multiplier.
*/
public readonly localVolume$ = this.scope.behavior<number>(
merge(
this.locallyMutedToggle$.pipe(map(() => "toggle mute" as const)),
this.localVolumeAdjustment$,
this.localVolumeCommit$.pipe(map(() => "commit" as const)),
).pipe(
accumulate({ volume: 1, committedVolume: 1 }, (state, event) => {
switch (event) {
case "toggle mute":
return {
...state,
volume: state.volume === 0 ? state.committedVolume : 0,
};
case "commit":
// Dragging the slider to zero should have the same effect as
// muting: keep the original committed volume, as if it were never
// dragged
return {
...state,
committedVolume:
state.volume === 0 ? state.committedVolume : state.volume,
};
default:
// Volume adjustment
return { ...state, volume: event };
}
}),
map(({ volume }) => volume),
),
);
// This private field is used to override the value from the superclass
private __videoEnabled$: Behavior<boolean>;
public get videoEnabled$(): Behavior<boolean> {
return this.__videoEnabled$;
}
/**
* Whether this participant's audio is disabled.
*/
public readonly locallyMuted$ = this.scope.behavior<boolean>(
this.localVolume$.pipe(map((volume) => volume === 0)),
);
public constructor(
scope: ObservableScope,
id: string,
userId: string,
rtcBackendIdentity: string,
participant$: Observable<RemoteParticipant | null>,
encryptionSystem: EncryptionSystem,
livekitRoom$: Behavior<LivekitRoom | undefined>,
focusUrl$: Behavior<string | undefined>,
private readonly pretendToBeDisconnected$: Behavior<boolean>,
displayName$: Behavior<string>,
mxcAvatarUrl$: Behavior<string | undefined>,
handRaised$: Behavior<Date | null>,
reaction$: Behavior<ReactionOption | null>,
) {
super(
scope,
id,
userId,
rtcBackendIdentity,
participant$,
encryptionSystem,
livekitRoom$,
focusUrl$,
displayName$,
mxcAvatarUrl$,
handRaised$,
reaction$,
);
this.__speaking$ = this.scope.behavior(
pretendToBeDisconnected$.pipe(
switchMap((disconnected) =>
disconnected ? of(false) : super.speaking$,
),
),
);
this.__videoEnabled$ = this.scope.behavior(
pretendToBeDisconnected$.pipe(
switchMap((disconnected) =>
disconnected ? of(false) : super.videoEnabled$,
),
),
);
// Sync the local volume with LiveKit
combineLatest([
participant$,
// The local volume, taking into account whether we're supposed to pretend
// that the audio stream is disconnected (since we don't necessarily want
// that to modify the UI state).
this.pretendToBeDisconnected$.pipe(
switchMap((disconnected) => (disconnected ? of(0) : this.localVolume$)),
this.scope.bind(),
),
]).subscribe(([p, volume]) => p?.setVolume(volume));
}
public toggleLocallyMuted(): void {
this.locallyMutedToggle$.next();
}
public setLocalVolume(value: number): void {
this.localVolumeAdjustment$.next(value);
}
public commitLocalVolume(): void {
this.localVolumeCommit$.next();
}
public audioStreamStats$ = combineLatest([
this.participant$,
showConnectionStats.value$,
]).pipe(
switchMap(([p, showConnectionStats]) => {
if (!p || !showConnectionStats) return of(undefined);
return observeInboundRtpStreamStats$(p, Track.Source.Microphone);
}),
);
public videoStreamStats$ = combineLatest([
this.participant$,
showConnectionStats.value$,
]).pipe(
switchMap(([p, showConnectionStats]) => {
if (!p || !showConnectionStats) return of(undefined);
return observeInboundRtpStreamStats$(p, Track.Source.Camera);
}),
);
}
/**
* Some participant's screen share media.
*/
export class ScreenShareViewModel extends BaseMediaViewModel {
/**
* Whether this screen share's video should be displayed.
*/
public readonly videoEnabled$ = this.scope.behavior(
this.pretendToBeDisconnected$.pipe(map((disconnected) => !disconnected)),
);
public constructor(
scope: ObservableScope,
id: string,
userId: string,
rtcBackendIdentity: string,
participant$: Observable<LocalParticipant | RemoteParticipant>,
encryptionSystem: EncryptionSystem,
livekitRoom$: Behavior<LivekitRoom | undefined>,
focusUrl$: Behavior<string | undefined>,
private readonly pretendToBeDisconnected$: Behavior<boolean>,
displayName$: Behavior<string>,
mxcAvatarUrl$: Behavior<string | undefined>,
public readonly local: boolean,
) {
super(
scope,
id,
userId,
rtcBackendIdentity,
participant$,
encryptionSystem,
Track.Source.ScreenShareAudio,
Track.Source.ScreenShare,
livekitRoom$,
focusUrl$,
displayName$,
mxcAvatarUrl$,
);
}
}

View File

@@ -16,14 +16,14 @@ export function oneOnOneLayout(
prevTiles: TileStore,
): [OneOnOneLayout, TileStore] {
const update = prevTiles.from(2);
update.registerGridTile(media.local);
update.registerGridTile(media.remote);
update.registerGridTile(media.pip);
update.registerGridTile(media.spotlight);
const tiles = update.build();
return [
{
type: media.type,
local: tiles.gridTilesByMedia.get(media.local)!,
remote: tiles.gridTilesByMedia.get(media.remote)!,
spotlight: tiles.gridTilesByMedia.get(media.spotlight)!,
pip: tiles.gridTilesByMedia.get(media.pip)!,
},
tiles,
];

View File

@@ -5,6 +5,7 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { platform } from "../Platform.ts";
import { type PipLayout, type PipLayoutMedia } from "./layout-types.ts";
import { type TileStore } from "./TileStore";
@@ -16,7 +17,11 @@ export function pipLayout(
prevTiles: TileStore,
): [PipLayout, TileStore] {
const update = prevTiles.from(0);
update.registerSpotlight(media.spotlight, true);
// Dont maximise in pip on EW since we want the rounded corners and the footer
update.registerSpotlight(
media.spotlight,
platform === "desktop" ? false : true,
);
const tiles = update.build();
return [
{

View File

@@ -1,55 +0,0 @@
/*
Copyright 2025 New Vector Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { of } from "rxjs";
import {
type LocalParticipant,
type RemoteParticipant,
type Room as LivekitRoom,
} from "livekit-client";
import { type ObservableScope } from "./ObservableScope.ts";
import { ScreenShareViewModel } from "./MediaViewModel.ts";
import type { EncryptionSystem } from "../e2ee/sharedKeyManagement.ts";
import type { Behavior } from "./Behavior.ts";
/**
* A screen share media item to be presented in a tile. This is a thin wrapper
* around ScreenShareViewModel which essentially just establishes an
* ObservableScope for behaviors that the view model depends on.
*/
export class ScreenShare {
public readonly vm: ScreenShareViewModel;
public constructor(
private readonly scope: ObservableScope,
id: string,
userId: string,
rtcBackendIdentity: string,
participant: LocalParticipant | RemoteParticipant,
encryptionSystem: EncryptionSystem,
livekitRoom$: Behavior<LivekitRoom | undefined>,
focusUrl$: Behavior<string | undefined>,
pretendToBeDisconnected$: Behavior<boolean>,
displayName$: Behavior<string>,
mxcAvatarUrl$: Behavior<string | undefined>,
) {
this.vm = new ScreenShareViewModel(
this.scope,
id,
userId,
rtcBackendIdentity,
of(participant),
encryptionSystem,
livekitRoom$,
focusUrl$,
pretendToBeDisconnected$,
displayName$,
mxcAvatarUrl$,
participant.isLocal,
);
}
}

View File

@@ -8,10 +8,12 @@ Please see LICENSE in the repository root for full details.
import { BehaviorSubject } from "rxjs";
import { logger } from "matrix-js-sdk/lib/logger";
import { type MediaViewModel, type UserMediaViewModel } from "./MediaViewModel";
import { GridTileViewModel, SpotlightTileViewModel } from "./TileViewModel";
import { fillGaps } from "../utils/iter";
import { debugTileLayout } from "../settings/settings";
import { type MediaViewModel } from "./media/MediaViewModel";
import { type UserMediaViewModel } from "./media/UserMediaViewModel";
import { type RingingMediaViewModel } from "./media/RingingMediaViewModel";
function debugEntries(entries: GridTileData[]): string[] {
return entries.map((e) => e.media.displayName$.value);
@@ -47,8 +49,10 @@ class SpotlightTileData {
}
class GridTileData {
private readonly media$: BehaviorSubject<UserMediaViewModel>;
public get media(): UserMediaViewModel {
private readonly media$: BehaviorSubject<
UserMediaViewModel | RingingMediaViewModel
>;
public get media(): UserMediaViewModel | RingingMediaViewModel {
return this.media$.value;
}
public set media(value: UserMediaViewModel) {
@@ -57,7 +61,7 @@ class GridTileData {
public readonly vm: GridTileViewModel;
public constructor(media: UserMediaViewModel) {
public constructor(media: UserMediaViewModel | RingingMediaViewModel) {
this.media$ = new BehaviorSubject(media);
this.vm = new GridTileViewModel(this.media$);
}
@@ -177,7 +181,9 @@ export class TileStoreBuilder {
* Sets up a grid tile for the given media. If this is never called for some
* media, then that media will have no grid tile.
*/
public registerGridTile(media: UserMediaViewModel): void {
public registerGridTile(
media: UserMediaViewModel | RingingMediaViewModel,
): void {
if (DEBUG_ENABLED)
logger.debug(
`[TileStore, ${this.generation}] register grid tile: ${media.displayName$.value}`,
@@ -186,7 +192,11 @@ export class TileStoreBuilder {
if (this.spotlight !== null) {
// We actually *don't* want spotlight speakers to appear in both the
// spotlight and the grid, so they're filtered out here
if (!media.local && this.spotlight.media.includes(media)) return;
if (
!(media.type === "user" && media.local) &&
this.spotlight.media.includes(media)
)
return;
// When the spotlight speaker changes, we would see one grid tile appear
// and another grid tile disappear. This would be an undesirable layout
// shift, so instead what we do is take the speaker's grid tile and swap

View File

@@ -5,8 +5,10 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type MediaViewModel, type UserMediaViewModel } from "./MediaViewModel";
import { type Behavior } from "./Behavior";
import { type MediaViewModel } from "./media/MediaViewModel";
import { type RingingMediaViewModel } from "./media/RingingMediaViewModel";
import { type UserMediaViewModel } from "./media/UserMediaViewModel";
let nextId = 0;
function createId(): string {
@@ -16,7 +18,11 @@ function createId(): string {
export class GridTileViewModel {
public readonly id = createId();
public constructor(public readonly media$: Behavior<UserMediaViewModel>) {}
public constructor(
public readonly media$: Behavior<
UserMediaViewModel | RingingMediaViewModel
>,
) {}
}
export class SpotlightTileViewModel {

View File

@@ -1,209 +0,0 @@
/*
Copyright 2025 New Vector Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { combineLatest, map, type Observable, of, switchMap } from "rxjs";
import {
type LocalParticipant,
ParticipantEvent,
type RemoteParticipant,
type Room as LivekitRoom,
} from "livekit-client";
import { observeParticipantEvents } from "@livekit/components-core";
import { type ObservableScope } from "./ObservableScope.ts";
import {
LocalUserMediaViewModel,
RemoteUserMediaViewModel,
type UserMediaViewModel,
} from "./MediaViewModel.ts";
import type { Behavior } from "./Behavior.ts";
import type { EncryptionSystem } from "../e2ee/sharedKeyManagement.ts";
import type { MediaDevices } from "./MediaDevices.ts";
import type { ReactionOption } from "../reactions";
import { observeSpeaker$ } from "./observeSpeaker.ts";
import { generateItems } from "../utils/observable.ts";
import { ScreenShare } from "./ScreenShare.ts";
import { type TaggedParticipant } from "./CallViewModel/remoteMembers/MatrixLivekitMembers.ts";
/**
* Sorting bins defining the order in which media tiles appear in the layout.
*/
enum SortingBin {
/**
* Yourself, when the "always show self" option is on.
*/
SelfAlwaysShown,
/**
* Participants that are sharing their screen.
*/
Presenters,
/**
* Participants that have been speaking recently.
*/
Speakers,
/**
* Participants that have their hand raised.
*/
HandRaised,
/**
* Participants with video.
*/
Video,
/**
* Participants not sharing any video.
*/
NoVideo,
/**
* Yourself, when the "always show self" option is off.
*/
SelfNotAlwaysShown,
}
/**
* A user media item to be presented in a tile. This is a thin wrapper around
* UserMediaViewModel which additionally determines the media item's sorting bin
* for inclusion in the call layout and tracks associated screen shares.
*/
export class UserMedia {
public readonly vm: UserMediaViewModel =
this.participant.type === "local"
? new LocalUserMediaViewModel(
this.scope,
this.id,
this.userId,
this.rtcBackendIdentity,
this.participant.value$,
this.encryptionSystem,
this.livekitRoom$,
this.focusUrl$,
this.mediaDevices,
this.displayName$,
this.mxcAvatarUrl$,
this.scope.behavior(this.handRaised$),
this.scope.behavior(this.reaction$),
)
: new RemoteUserMediaViewModel(
this.scope,
this.id,
this.userId,
this.rtcBackendIdentity,
this.participant.value$,
this.encryptionSystem,
this.livekitRoom$,
this.focusUrl$,
this.pretendToBeDisconnected$,
this.displayName$,
this.mxcAvatarUrl$,
this.scope.behavior(this.handRaised$),
this.scope.behavior(this.reaction$),
);
private readonly speaker$ = this.scope.behavior(
observeSpeaker$(this.vm.speaking$),
);
// TypeScript needs this widening of the type to happen in a separate statement
private readonly participant$: Behavior<
LocalParticipant | RemoteParticipant | null
> = this.participant.value$;
/**
* All screen share media associated with this user media.
*/
public readonly screenShares$ = this.scope.behavior(
this.participant$.pipe(
switchMap((p) =>
p === null
? of([])
: observeParticipantEvents(
p,
ParticipantEvent.TrackPublished,
ParticipantEvent.TrackUnpublished,
ParticipantEvent.LocalTrackPublished,
ParticipantEvent.LocalTrackUnpublished,
).pipe(
// Technically more than one screen share might be possible... our
// MediaViewModels don't support it though since they look for a unique
// track for the given source. So generateItems here is a bit overkill.
generateItems(
function* (p) {
if (p.isScreenShareEnabled)
yield {
keys: ["screen-share"],
data: undefined,
};
},
(scope, _data$, key) =>
new ScreenShare(
scope,
`${this.id}:${key}`,
this.userId,
this.rtcBackendIdentity,
p,
this.encryptionSystem,
this.livekitRoom$,
this.focusUrl$,
this.pretendToBeDisconnected$,
this.displayName$,
this.mxcAvatarUrl$,
),
),
),
),
),
);
private readonly presenter$ = this.scope.behavior(
this.screenShares$.pipe(map((screenShares) => screenShares.length > 0)),
);
/**
* Which sorting bin the media item should be placed in.
*/
// This is exposed here rather than by UserMediaViewModel because it's only
// relevant to the layout algorithms; the MediaView component should be
// ignorant of this value.
public readonly bin$ = combineLatest(
[
this.speaker$,
this.presenter$,
this.vm.videoEnabled$,
this.vm.handRaised$,
this.vm instanceof LocalUserMediaViewModel
? this.vm.alwaysShow$
: of(false),
],
(speaker, presenter, video, handRaised, alwaysShow) => {
if (this.vm.local)
return alwaysShow
? SortingBin.SelfAlwaysShown
: SortingBin.SelfNotAlwaysShown;
else if (presenter) return SortingBin.Presenters;
else if (speaker) return SortingBin.Speakers;
else if (handRaised) return SortingBin.HandRaised;
else if (video) return SortingBin.Video;
else return SortingBin.NoVideo;
},
);
public constructor(
private readonly scope: ObservableScope,
public readonly id: string,
private readonly userId: string,
private readonly rtcBackendIdentity: string,
private readonly participant: TaggedParticipant,
private readonly encryptionSystem: EncryptionSystem,
private readonly livekitRoom$: Behavior<LivekitRoom | undefined>,
private readonly focusUrl$: Behavior<string | undefined>,
private readonly mediaDevices: MediaDevices,
private readonly pretendToBeDisconnected$: Behavior<boolean>,
private readonly displayName$: Behavior<string>,
private readonly mxcAvatarUrl$: Behavior<string | undefined>,
private readonly handRaised$: Observable<Date | null>,
private readonly reaction$: Observable<ReactionOption | null>,
) {}
}

101
src/state/VolumeControls.ts Normal file
View File

@@ -0,0 +1,101 @@
/*
Copyright 2026 Element Software Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { combineLatest, map, merge, of, Subject, switchMap } from "rxjs";
import { type Behavior } from "./Behavior";
import { type ObservableScope } from "./ObservableScope";
import { accumulate } from "../utils/observable";
/**
* Controls for audio playback volume.
*/
export interface VolumeControls {
/**
* The volume to which the audio is set, as a scalar multiplier.
*/
playbackVolume$: Behavior<number>;
/**
* Whether playback of this audio is disabled.
*/
playbackMuted$: Behavior<boolean>;
togglePlaybackMuted: () => void;
adjustPlaybackVolume: (value: number) => void;
commitPlaybackVolume: () => void;
}
interface VolumeControlsInputs {
pretendToBeDisconnected$: Behavior<boolean>;
/**
* The callback to run to notify the module performing audio playback of the
* requested volume.
*/
sink$: Behavior<(volume: number) => void>;
}
/**
* Creates a set of controls for audio playback volume and syncs this with the
* audio playback module for the duration of the scope.
*/
export function createVolumeControls(
scope: ObservableScope,
{ pretendToBeDisconnected$, sink$ }: VolumeControlsInputs,
): VolumeControls {
const toggleMuted$ = new Subject<"toggle mute">();
const adjustVolume$ = new Subject<number>();
const commitVolume$ = new Subject<"commit">();
const playbackVolume$ = scope.behavior<number>(
merge(toggleMuted$, adjustVolume$, commitVolume$).pipe(
accumulate({ volume: 1, committedVolume: 1 }, (state, event) => {
switch (event) {
case "toggle mute":
return {
...state,
volume: state.volume === 0 ? state.committedVolume : 0,
};
case "commit":
// Dragging the slider to zero should have the same effect as
// muting: keep the original committed volume, as if it were never
// dragged
return {
...state,
committedVolume:
state.volume === 0 ? state.committedVolume : state.volume,
};
default:
// Volume adjustment
return { ...state, volume: event };
}
}),
map(({ volume }) => volume),
),
);
// Sync the requested volume with the audio playback module
combineLatest([
sink$,
// The playback volume, taking into account whether we're supposed to
// pretend that the audio stream is disconnected (since we don't necessarily
// want that to modify the UI state).
pretendToBeDisconnected$.pipe(
switchMap((disconnected) => (disconnected ? of(0) : playbackVolume$)),
),
])
.pipe(scope.bind())
.subscribe(([sink, volume]) => sink(volume));
return {
playbackVolume$,
playbackMuted$: scope.behavior<boolean>(
playbackVolume$.pipe(map((volume) => volume === 0)),
),
togglePlaybackMuted: () => toggleMuted$.next("toggle mute"),
adjustPlaybackVolume: (value: number) => adjustVolume$.next(value),
commitPlaybackVolume: () => commitVolume$.next("commit"),
};
}

View File

@@ -5,14 +5,14 @@ SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type LocalUserMediaViewModel } from "./media/LocalUserMediaViewModel.ts";
import { type MediaViewModel } from "./media/MediaViewModel.ts";
import { type RingingMediaViewModel } from "./media/RingingMediaViewModel.ts";
import { type UserMediaViewModel } from "./media/UserMediaViewModel.ts";
import {
type GridTileViewModel,
type SpotlightTileViewModel,
} from "./TileViewModel.ts";
import {
type MediaViewModel,
type UserMediaViewModel,
} from "./MediaViewModel.ts";
export interface GridLayoutMedia {
type: "grid";
@@ -40,8 +40,8 @@ export interface SpotlightExpandedLayoutMedia {
export interface OneOnOneLayoutMedia {
type: "one-on-one";
local: UserMediaViewModel;
remote: UserMediaViewModel;
spotlight: UserMediaViewModel;
pip: LocalUserMediaViewModel | RingingMediaViewModel;
}
export interface PipLayoutMedia {
@@ -86,8 +86,8 @@ export interface SpotlightExpandedLayout {
export interface OneOnOneLayout {
type: "one-on-one";
local: GridTileViewModel;
remote: GridTileViewModel;
spotlight: GridTileViewModel;
pip: GridTileViewModel;
}
export interface PipLayout {

View File

@@ -0,0 +1,32 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type LocalParticipant } from "livekit-client";
import { type Behavior } from "../Behavior";
import {
type BaseScreenShareInputs,
type BaseScreenShareViewModel,
createBaseScreenShare,
} from "./ScreenShareViewModel";
import { type ObservableScope } from "../ObservableScope";
export interface LocalScreenShareViewModel extends BaseScreenShareViewModel {
local: true;
}
export interface LocalScreenShareInputs extends BaseScreenShareInputs {
participant$: Behavior<LocalParticipant | null>;
}
export function createLocalScreenShare(
scope: ObservableScope,
inputs: LocalScreenShareInputs,
): LocalScreenShareViewModel {
return { ...createBaseScreenShare(scope, inputs), local: true };
}

View File

@@ -0,0 +1,137 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import {
facingModeFromLocalTrack,
type LocalParticipant,
LocalVideoTrack,
TrackEvent,
} from "livekit-client";
import {
fromEvent,
map,
merge,
type Observable,
of,
startWith,
switchMap,
} from "rxjs";
import { logger } from "matrix-js-sdk/lib/logger";
import { type Behavior } from "../Behavior";
import {
type BaseUserMediaInputs,
type BaseUserMediaViewModel,
createBaseUserMedia,
} from "./UserMediaViewModel";
import { type ObservableScope } from "../ObservableScope";
import { alwaysShowSelf } from "../../settings/settings";
import { platform } from "../../Platform";
import { type MediaDevices } from "../MediaDevices";
export interface LocalUserMediaViewModel extends BaseUserMediaViewModel {
local: true;
/**
* Whether the video should be mirrored.
*/
mirror$: Behavior<boolean>;
/**
* Whether to show this tile in a highly visible location near the start of
* the grid.
*/
alwaysShow$: Behavior<boolean>;
setAlwaysShow: (value: boolean) => void;
switchCamera$: Behavior<(() => void) | null>;
}
export interface LocalUserMediaInputs extends Omit<
BaseUserMediaInputs,
"statsType"
> {
participant$: Behavior<LocalParticipant | null>;
mediaDevices: MediaDevices;
}
export function createLocalUserMedia(
scope: ObservableScope,
{ mediaDevices, ...inputs }: LocalUserMediaInputs,
): LocalUserMediaViewModel {
const baseUserMedia = createBaseUserMedia(scope, {
...inputs,
statsType: "outbound-rtp",
});
/**
* The local video track as an observable that emits whenever the track
* changes, the camera is switched, or the track is muted.
*/
const videoTrack$: Observable<LocalVideoTrack | null> =
baseUserMedia.video$.pipe(
switchMap((v) => {
const track = v?.publication.track;
if (!(track instanceof LocalVideoTrack)) return of(null);
return merge(
// Watch for track restarts because they indicate a camera switch.
// This event is also emitted when unmuting the track object.
fromEvent(track, TrackEvent.Restarted).pipe(
startWith(null),
map(() => track),
),
// When the track object is muted, reset it to null.
fromEvent(track, TrackEvent.Muted).pipe(map(() => null)),
);
}),
);
return {
...baseUserMedia,
local: true,
mirror$: scope.behavior(
videoTrack$.pipe(
// Mirror only front-facing cameras (those that face the user)
map(
(track) =>
track !== null &&
facingModeFromLocalTrack(track).facingMode === "user",
),
),
),
alwaysShow$: alwaysShowSelf.value$,
setAlwaysShow: alwaysShowSelf.setValue,
switchCamera$: scope.behavior(
platform === "desktop"
? of(null)
: videoTrack$.pipe(
map((track) => {
if (track === null) return null;
const facingMode = facingModeFromLocalTrack(track).facingMode;
// If the camera isn't front or back-facing, don't provide a switch
// camera shortcut at all
if (facingMode !== "user" && facingMode !== "environment")
return null;
// Restart the track with a camera facing the opposite direction
return (): void =>
void track
.restartTrack({
facingMode: facingMode === "user" ? "environment" : "user",
})
.then(() => {
// Inform the MediaDevices which camera was chosen
const deviceId =
track.mediaStreamTrack.getSettings().deviceId;
if (deviceId !== undefined)
mediaDevices.videoInput.select(deviceId);
})
.catch((e) =>
logger.error("Failed to switch camera", facingMode, e),
);
}),
),
),
};
}

View File

@@ -9,6 +9,7 @@ import { expect, onTestFinished, test, vi } from "vitest";
import {
type LocalTrackPublication,
LocalVideoTrack,
Track,
TrackEvent,
} from "livekit-client";
import { waitFor } from "@testing-library/dom";
@@ -17,13 +18,13 @@ import {
mockLocalParticipant,
mockMediaDevices,
mockRtcMembership,
createLocalMedia,
createRemoteMedia,
mockLocalMedia,
mockRemoteMedia,
withTestScheduler,
mockRemoteParticipant,
} from "../utils/test";
import { getValue } from "../utils/observable";
import { constant } from "./Behavior";
mockRemoteScreenShare,
} from "../../utils/test";
import { constant } from "../Behavior";
global.MediaStreamTrack = class {} as unknown as {
new (): MediaStreamTrack;
@@ -35,7 +36,7 @@ global.MediaStream = class {} as unknown as {
};
const platformMock = vi.hoisted(() => vi.fn(() => "desktop"));
vi.mock("../Platform", () => ({
vi.mock("../../Platform", () => ({
get platform(): string {
return platformMock();
},
@@ -45,7 +46,7 @@ const rtcMembership = mockRtcMembership("@alice:example.org", "AAAA");
test("control a participant's volume", () => {
const setVolumeSpy = vi.fn();
const vm = createRemoteMedia(
const vm = mockRemoteMedia(
rtcMembership,
{},
mockRemoteParticipant({ setVolume: setVolumeSpy }),
@@ -54,33 +55,33 @@ test("control a participant's volume", () => {
schedule("-ab---c---d|", {
a() {
// Try muting by toggling
vm.toggleLocallyMuted();
vm.togglePlaybackMuted();
expect(setVolumeSpy).toHaveBeenLastCalledWith(0);
},
b() {
// Try unmuting by dragging the slider back up
vm.setLocalVolume(0.6);
vm.setLocalVolume(0.8);
vm.commitLocalVolume();
vm.adjustPlaybackVolume(0.6);
vm.adjustPlaybackVolume(0.8);
vm.commitPlaybackVolume();
expect(setVolumeSpy).toHaveBeenCalledWith(0.6);
expect(setVolumeSpy).toHaveBeenLastCalledWith(0.8);
},
c() {
// Try muting by dragging the slider back down
vm.setLocalVolume(0.2);
vm.setLocalVolume(0);
vm.commitLocalVolume();
vm.adjustPlaybackVolume(0.2);
vm.adjustPlaybackVolume(0);
vm.commitPlaybackVolume();
expect(setVolumeSpy).toHaveBeenCalledWith(0.2);
expect(setVolumeSpy).toHaveBeenLastCalledWith(0);
},
d() {
// Try unmuting by toggling
vm.toggleLocallyMuted();
vm.togglePlaybackMuted();
// The volume should return to the last non-zero committed volume
expect(setVolumeSpy).toHaveBeenLastCalledWith(0.8);
},
});
expectObservable(vm.localVolume$).toBe("ab(cd)(ef)g", {
expectObservable(vm.playbackVolume$).toBe("ab(cd)(ef)g", {
a: 1,
b: 0,
c: 0.6,
@@ -92,23 +93,75 @@ test("control a participant's volume", () => {
});
});
test("toggle fit/contain for a participant's video", () => {
const vm = createRemoteMedia(rtcMembership, {}, mockRemoteParticipant({}));
test("control a participant's screen share volume", () => {
const setVolumeSpy = vi.fn();
const vm = mockRemoteScreenShare(
rtcMembership,
{},
mockRemoteParticipant({ setVolume: setVolumeSpy }),
);
withTestScheduler(({ expectObservable, schedule }) => {
schedule("-ab|", {
a: () => vm.toggleFitContain(),
b: () => vm.toggleFitContain(),
schedule("-ab---c---d|", {
a() {
// Try muting by toggling
vm.togglePlaybackMuted();
expect(setVolumeSpy).toHaveBeenLastCalledWith(
0,
Track.Source.ScreenShareAudio,
);
},
b() {
// Try unmuting by dragging the slider back up
vm.adjustPlaybackVolume(0.6);
vm.adjustPlaybackVolume(0.8);
vm.commitPlaybackVolume();
expect(setVolumeSpy).toHaveBeenCalledWith(
0.6,
Track.Source.ScreenShareAudio,
);
expect(setVolumeSpy).toHaveBeenLastCalledWith(
0.8,
Track.Source.ScreenShareAudio,
);
},
c() {
// Try muting by dragging the slider back down
vm.adjustPlaybackVolume(0.2);
vm.adjustPlaybackVolume(0);
vm.commitPlaybackVolume();
expect(setVolumeSpy).toHaveBeenCalledWith(
0.2,
Track.Source.ScreenShareAudio,
);
expect(setVolumeSpy).toHaveBeenLastCalledWith(
0,
Track.Source.ScreenShareAudio,
);
},
d() {
// Try unmuting by toggling
vm.togglePlaybackMuted();
// The volume should return to the last non-zero committed volume
expect(setVolumeSpy).toHaveBeenLastCalledWith(
0.8,
Track.Source.ScreenShareAudio,
);
},
});
expectObservable(vm.cropVideo$).toBe("abc", {
a: true,
b: false,
c: true,
expectObservable(vm.playbackVolume$).toBe("ab(cd)(ef)g", {
a: 1,
b: 0,
c: 0.6,
d: 0.8,
e: 0.2,
f: 0,
g: 0.8,
});
});
});
test("local media remembers whether it should always be shown", () => {
const vm1 = createLocalMedia(
const vm1 = mockLocalMedia(
rtcMembership,
{},
mockLocalParticipant({}),
@@ -120,7 +173,7 @@ test("local media remembers whether it should always be shown", () => {
});
// Next local media should start out *not* always shown
const vm2 = createLocalMedia(
const vm2 = mockLocalMedia(
rtcMembership,
{},
mockLocalParticipant({}),
@@ -166,7 +219,7 @@ test("switch cameras", async () => {
const selectVideoInput = vi.fn();
const vm = createLocalMedia(
const vm = mockLocalMedia(
rtcMembership,
{},
mockLocalParticipant({
@@ -184,7 +237,7 @@ test("switch cameras", async () => {
);
// Switch to back camera
getValue(vm.switchCamera$)!();
vm.switchCamera$.value!();
expect(restartTrack).toHaveBeenCalledExactlyOnceWith({
facingMode: "environment",
});
@@ -195,7 +248,7 @@ test("switch cameras", async () => {
expect(deviceId).toBe("back camera");
// Switch to front camera
getValue(vm.switchCamera$)!();
vm.switchCamera$.value!();
expect(restartTrack).toHaveBeenCalledTimes(2);
expect(restartTrack).toHaveBeenLastCalledWith({ facingMode: "user" });
await waitFor(() => {
@@ -206,17 +259,17 @@ test("switch cameras", async () => {
});
test("remote media is in waiting state when participant has not yet connected", () => {
const vm = createRemoteMedia(rtcMembership, {}, null); // null participant
const vm = mockRemoteMedia(rtcMembership, {}, null); // null participant
expect(vm.waitingForMedia$.value).toBe(true);
});
test("remote media is not in waiting state when participant is connected", () => {
const vm = createRemoteMedia(rtcMembership, {}, mockRemoteParticipant({}));
const vm = mockRemoteMedia(rtcMembership, {}, mockRemoteParticipant({}));
expect(vm.waitingForMedia$.value).toBe(false);
});
test("remote media is not in waiting state when participant is connected with no publications", () => {
const vm = createRemoteMedia(
const vm = mockRemoteMedia(
rtcMembership,
{},
mockRemoteParticipant({
@@ -228,7 +281,7 @@ test("remote media is not in waiting state when participant is connected with no
});
test("remote media is not in waiting state when user does not intend to publish anywhere", () => {
const vm = createRemoteMedia(
const vm = mockRemoteMedia(
rtcMembership,
{},
mockRemoteParticipant({}),

View File

@@ -0,0 +1,48 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type Behavior } from "../Behavior";
import { type RingingMediaViewModel } from "./RingingMediaViewModel";
import { type ScreenShareViewModel } from "./ScreenShareViewModel";
import { type UserMediaViewModel } from "./UserMediaViewModel";
/**
* A participant's media.
*/
export type MediaViewModel =
| UserMediaViewModel
| ScreenShareViewModel
| RingingMediaViewModel;
/**
* Properties which are common to all MediaViewModels.
*/
export interface BaseMediaViewModel {
/**
* An opaque identifier for this media.
*/
id: string;
/**
* The Matrix user to which this media belongs.
*/
userId: string;
displayName$: Behavior<string>;
mxcAvatarUrl$: Behavior<string | undefined>;
}
export type BaseMediaInputs = BaseMediaViewModel;
// All this function does is strip out superfluous data from the input object
export function createBaseMedia({
id,
userId,
displayName$,
mxcAvatarUrl$,
}: BaseMediaInputs): BaseMediaViewModel {
return { id, userId, displayName$, mxcAvatarUrl$ };
}

View File

@@ -0,0 +1,279 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import {
type Room as LivekitRoom,
RoomEvent as LivekitRoomEvent,
type Participant,
type Track,
} from "livekit-client";
import {
type AudioSource,
roomEventSelector,
type TrackReference,
type VideoSource,
} from "@livekit/components-core";
import { type LocalParticipant, type RemoteParticipant } from "livekit-client";
import {
combineLatest,
distinctUntilChanged,
filter,
map,
type Observable,
of,
startWith,
switchMap,
throttleTime,
} from "rxjs";
import { type Behavior } from "../Behavior";
import { type BaseMediaViewModel, createBaseMedia } from "./MediaViewModel";
import { type EncryptionSystem } from "../../e2ee/sharedKeyManagement";
import { type ObservableScope } from "../ObservableScope";
import { observeTrackReference$ } from "../observeTrackReference";
import { E2eeType } from "../../e2ee/e2eeType";
import { observeInboundRtpStreamStats$ } from "./observeRtpStreamStats";
import { type UserMediaViewModel } from "./UserMediaViewModel";
import { type ScreenShareViewModel } from "./ScreenShareViewModel";
// TODO: Encryption status is kinda broken and thus unused right now. Remove?
export enum EncryptionStatus {
Connecting,
Okay,
KeyMissing,
KeyInvalid,
PasswordInvalid,
}
/**
* Properties common to all MemberMediaViewModels.
*/
export interface BaseMemberMediaViewModel extends BaseMediaViewModel {
/**
* The LiveKit video track for this media.
*/
video$: Behavior<TrackReference | undefined>;
/**
* The URL of the LiveKit focus on which this member should be publishing.
* Exposed for debugging.
*/
focusUrl$: Behavior<string | undefined>;
/**
* Whether there should be a warning that this media is unencrypted.
*/
unencryptedWarning$: Behavior<boolean>;
encryptionStatus$: Behavior<EncryptionStatus>;
}
export interface MemberMediaInputs extends BaseMediaViewModel {
participant$: Behavior<LocalParticipant | RemoteParticipant | null>;
livekitRoom$: Behavior<LivekitRoom | undefined>;
audioSource: AudioSource;
videoSource: VideoSource;
focusUrl$: Behavior<string | undefined>;
encryptionSystem: EncryptionSystem;
}
export function createMemberMedia(
scope: ObservableScope,
{
participant$,
livekitRoom$,
audioSource,
videoSource,
focusUrl$,
encryptionSystem,
...inputs
}: MemberMediaInputs,
): BaseMemberMediaViewModel {
const trackBehavior$ = (
source: Track.Source,
): Behavior<TrackReference | undefined> =>
scope.behavior(
participant$.pipe(
switchMap((p) =>
!p ? of(undefined) : observeTrackReference$(p, source),
),
),
);
const audio$ = trackBehavior$(audioSource);
const video$ = trackBehavior$(videoSource);
return {
...createBaseMedia(inputs),
video$,
focusUrl$,
unencryptedWarning$: scope.behavior(
combineLatest(
[audio$, video$],
(a, v) =>
encryptionSystem.kind !== E2eeType.NONE &&
(a?.publication.isEncrypted === false ||
v?.publication.isEncrypted === false),
),
),
encryptionStatus$: scope.behavior(
participant$.pipe(
switchMap((participant): Observable<EncryptionStatus> => {
if (!participant) {
return of(EncryptionStatus.Connecting);
} else if (
participant.isLocal ||
encryptionSystem.kind === E2eeType.NONE
) {
return of(EncryptionStatus.Okay);
} else if (encryptionSystem.kind === E2eeType.PER_PARTICIPANT) {
return combineLatest([
encryptionErrorObservable$(
livekitRoom$,
participant,
encryptionSystem,
"MissingKey",
),
encryptionErrorObservable$(
livekitRoom$,
participant,
encryptionSystem,
"InvalidKey",
),
observeRemoteTrackReceivingOkay$(participant, audioSource),
observeRemoteTrackReceivingOkay$(participant, videoSource),
]).pipe(
map(([keyMissing, keyInvalid, audioOkay, videoOkay]) => {
if (keyMissing) return EncryptionStatus.KeyMissing;
if (keyInvalid) return EncryptionStatus.KeyInvalid;
if (audioOkay || videoOkay) return EncryptionStatus.Okay;
return undefined; // no change
}),
filter((x) => !!x),
startWith(EncryptionStatus.Connecting),
);
} else {
return combineLatest([
encryptionErrorObservable$(
livekitRoom$,
participant,
encryptionSystem,
"InvalidKey",
),
observeRemoteTrackReceivingOkay$(participant, audioSource),
observeRemoteTrackReceivingOkay$(participant, videoSource),
]).pipe(
map(
([keyInvalid, audioOkay, videoOkay]):
| EncryptionStatus
| undefined => {
if (keyInvalid) return EncryptionStatus.PasswordInvalid;
if (audioOkay || videoOkay) return EncryptionStatus.Okay;
return undefined; // no change
},
),
filter((x) => !!x),
startWith(EncryptionStatus.Connecting),
);
}
}),
),
),
};
}
function encryptionErrorObservable$(
room$: Behavior<LivekitRoom | undefined>,
participant: Participant,
encryptionSystem: EncryptionSystem,
criteria: string,
): Observable<boolean> {
return room$.pipe(
switchMap((room) => {
if (room === undefined) return of(false);
return roomEventSelector(room, LivekitRoomEvent.EncryptionError).pipe(
map((e) => {
const [err] = e;
if (encryptionSystem.kind === E2eeType.PER_PARTICIPANT) {
return (
// Ideally we would pull the participant identity from the field on the error.
// However, it gets lost in the serialization process between workers.
// So, instead we do a string match
(err?.message.includes(participant.identity) &&
err?.message.includes(criteria)) ??
false
);
} else if (encryptionSystem.kind === E2eeType.SHARED_KEY) {
return !!err?.message.includes(criteria);
}
return false;
}),
);
}),
distinctUntilChanged(),
throttleTime(1000), // Throttle to avoid spamming the UI
startWith(false),
);
}
function observeRemoteTrackReceivingOkay$(
participant: Participant,
source: Track.Source,
): Observable<boolean | undefined> {
let lastStats: {
framesDecoded: number | undefined;
framesDropped: number | undefined;
framesReceived: number | undefined;
} = {
framesDecoded: undefined,
framesDropped: undefined,
framesReceived: undefined,
};
return observeInboundRtpStreamStats$(participant, source).pipe(
map((stats) => {
if (!stats) return undefined;
const { framesDecoded, framesDropped, framesReceived } = stats;
return {
framesDecoded,
framesDropped,
framesReceived,
};
}),
filter((newStats) => !!newStats),
map((newStats): boolean | undefined => {
const oldStats = lastStats;
lastStats = newStats;
if (
typeof newStats.framesReceived === "number" &&
typeof oldStats.framesReceived === "number" &&
typeof newStats.framesDecoded === "number" &&
typeof oldStats.framesDecoded === "number"
) {
const framesReceivedDelta =
newStats.framesReceived - oldStats.framesReceived;
const framesDecodedDelta =
newStats.framesDecoded - oldStats.framesDecoded;
// if we received >0 frames and managed to decode >0 frames then we treat that as success
if (framesReceivedDelta > 0) {
return framesDecodedDelta > 0;
}
}
// no change
return undefined;
}),
filter((x) => typeof x === "boolean"),
startWith(undefined),
);
}
/**
* Media belonging to an active member of the call.
*/
export type MemberMediaViewModel = UserMediaViewModel | ScreenShareViewModel;

View File

@@ -0,0 +1,72 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { Track, type RemoteParticipant } from "livekit-client";
import { map, of, switchMap } from "rxjs";
import { type Behavior } from "../Behavior";
import {
type BaseScreenShareInputs,
type BaseScreenShareViewModel,
createBaseScreenShare,
} from "./ScreenShareViewModel";
import { type ObservableScope } from "../ObservableScope";
import { createVolumeControls, type VolumeControls } from "../VolumeControls";
import { observeTrackReference$ } from "../observeTrackReference";
export interface RemoteScreenShareViewModel
extends BaseScreenShareViewModel, VolumeControls {
local: false;
/**
* Whether this screen share's video should be displayed.
*/
videoEnabled$: Behavior<boolean>;
/**
* Whether this screen share should be considered to have an audio track.
*/
audioEnabled$: Behavior<boolean>;
}
export interface RemoteScreenShareInputs extends BaseScreenShareInputs {
participant$: Behavior<RemoteParticipant | null>;
pretendToBeDisconnected$: Behavior<boolean>;
}
export function createRemoteScreenShare(
scope: ObservableScope,
{ pretendToBeDisconnected$, ...inputs }: RemoteScreenShareInputs,
): RemoteScreenShareViewModel {
return {
...createBaseScreenShare(scope, inputs),
...createVolumeControls(scope, {
pretendToBeDisconnected$,
sink$: scope.behavior(
inputs.participant$.pipe(
map(
(p) => (volume) =>
p?.setVolume(volume, Track.Source.ScreenShareAudio),
),
),
),
}),
local: false,
videoEnabled$: scope.behavior(
pretendToBeDisconnected$.pipe(map((disconnected) => !disconnected)),
),
audioEnabled$: scope.behavior(
inputs.participant$.pipe(
switchMap((p) =>
p
? observeTrackReference$(p, Track.Source.ScreenShareAudio)
: of(null),
),
map(Boolean),
),
),
};
}

View File

@@ -0,0 +1,82 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type RemoteParticipant } from "livekit-client";
import { combineLatest, map, of, switchMap } from "rxjs";
import { type Behavior } from "../Behavior";
import { createVolumeControls, type VolumeControls } from "../VolumeControls";
import {
type BaseUserMediaInputs,
type BaseUserMediaViewModel,
createBaseUserMedia,
} from "./UserMediaViewModel";
import { type ObservableScope } from "../ObservableScope";
export interface RemoteUserMediaViewModel
extends BaseUserMediaViewModel, VolumeControls {
local: false;
/**
* Whether we are waiting for this user's LiveKit participant to exist. This
* could be because either we or the remote party are still connecting.
*/
waitingForMedia$: Behavior<boolean>;
}
export interface RemoteUserMediaInputs extends Omit<
BaseUserMediaInputs,
"statsType"
> {
participant$: Behavior<RemoteParticipant | null>;
pretendToBeDisconnected$: Behavior<boolean>;
}
export function createRemoteUserMedia(
scope: ObservableScope,
{ pretendToBeDisconnected$, ...inputs }: RemoteUserMediaInputs,
): RemoteUserMediaViewModel {
const baseUserMedia = createBaseUserMedia(scope, {
...inputs,
statsType: "inbound-rtp",
});
return {
...baseUserMedia,
...createVolumeControls(scope, {
pretendToBeDisconnected$,
sink$: scope.behavior(
inputs.participant$.pipe(map((p) => (volume) => p?.setVolume(volume))),
),
}),
local: false,
speaking$: scope.behavior(
pretendToBeDisconnected$.pipe(
switchMap((disconnected) =>
disconnected ? of(false) : baseUserMedia.speaking$,
),
),
),
videoEnabled$: scope.behavior(
pretendToBeDisconnected$.pipe(
switchMap((disconnected) =>
disconnected ? of(false) : baseUserMedia.videoEnabled$,
),
),
),
waitingForMedia$: scope.behavior(
combineLatest(
[inputs.livekitRoom$, inputs.participant$],
(livekitRoom, participant) =>
// If livekitRoom is undefined, the user is not attempting to publish on
// any transport and so we shouldn't expect a participant. (They might
// be a subscribe-only bot for example.)
livekitRoom !== undefined && participant === null,
),
),
};
}

View File

@@ -0,0 +1,51 @@
/*
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { type Behavior } from "../Behavior";
import { type MuteStates } from "../MuteStates";
import {
type BaseMediaInputs,
type BaseMediaViewModel,
createBaseMedia,
} from "./MediaViewModel";
/**
* Media representing a user who is not yet part of the call — one that we are
* *ringing*.
*/
export interface RingingMediaViewModel extends BaseMediaViewModel {
type: "ringing";
pickupState$: Behavior<"ringing" | "timeout" | "decline">;
/**
* Whether this media would be expected to have video, were it not simply a
* placeholder.
*/
videoEnabled$: Behavior<boolean>;
}
export interface RingingMediaInputs extends BaseMediaInputs {
pickupState$: Behavior<"ringing" | "timeout" | "decline">;
/**
* The local user's own mute states.
*/
muteStates: MuteStates;
}
export function createRingingMedia({
pickupState$,
muteStates,
...inputs
}: RingingMediaInputs): RingingMediaViewModel {
return {
...createBaseMedia(inputs),
type: "ringing",
pickupState$,
// If our own video is enabled, then this is a video call and we would
// expect remote media to have video as well
videoEnabled$: muteStates.video.enabled$,
};
}

View File

@@ -0,0 +1,51 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { Track } from "livekit-client";
import { type ObservableScope } from "../ObservableScope";
import { type LocalScreenShareViewModel } from "./LocalScreenShareViewModel";
import {
createMemberMedia,
type MemberMediaInputs,
type BaseMemberMediaViewModel,
} from "./MemberMediaViewModel";
import { type RemoteScreenShareViewModel } from "./RemoteScreenShareViewModel";
/**
* A participant's screen share media.
*/
export type ScreenShareViewModel =
| LocalScreenShareViewModel
| RemoteScreenShareViewModel;
/**
* Properties which are common to all ScreenShareViewModels.
*/
export interface BaseScreenShareViewModel extends BaseMemberMediaViewModel {
type: "screen share";
}
export type BaseScreenShareInputs = Omit<
MemberMediaInputs,
"audioSource" | "videoSource"
>;
export function createBaseScreenShare(
scope: ObservableScope,
inputs: BaseScreenShareInputs,
): BaseScreenShareViewModel {
return {
...createMemberMedia(scope, {
...inputs,
audioSource: Track.Source.ScreenShareAudio,
videoSource: Track.Source.ScreenShare,
}),
type: "screen share",
};
}

View File

@@ -0,0 +1,164 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import {
BehaviorSubject,
combineLatest,
map,
type Observable,
of,
Subject,
switchMap,
} from "rxjs";
import {
observeParticipantEvents,
observeParticipantMedia,
} from "@livekit/components-core";
import { ParticipantEvent, Track } from "livekit-client";
import { type ReactionOption } from "../../reactions";
import { type Behavior } from "../Behavior";
import { type LocalUserMediaViewModel } from "./LocalUserMediaViewModel";
import {
createMemberMedia,
type MemberMediaInputs,
type BaseMemberMediaViewModel,
} from "./MemberMediaViewModel";
import { type RemoteUserMediaViewModel } from "./RemoteUserMediaViewModel";
import { type ObservableScope } from "../ObservableScope";
import { showConnectionStats } from "../../settings/settings";
import { observeRtpStreamStats$ } from "./observeRtpStreamStats";
import { videoFit$, videoSizeFromParticipant$ } from "../../utils/videoFit.ts";
/**
* A participant's user media (i.e. their microphone and camera feed).
*/
export type UserMediaViewModel =
| LocalUserMediaViewModel
| RemoteUserMediaViewModel;
export interface BaseUserMediaViewModel extends BaseMemberMediaViewModel {
type: "user";
speaking$: Behavior<boolean>;
audioEnabled$: Behavior<boolean>;
videoEnabled$: Behavior<boolean>;
videoFit$: Behavior<"cover" | "contain">;
toggleCropVideo: () => void;
/**
* The expected identity of the LiveKit participant. Exposed for debugging.
*/
rtcBackendIdentity: string;
handRaised$: Behavior<Date | null>;
reaction$: Behavior<ReactionOption | null>;
audioStreamStats$: Observable<
RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats | undefined
>;
videoStreamStats$: Observable<
RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats | undefined
>;
/**
* Set the target dimensions of the HTML element (final dimension after anim).
* This can be used to determine the best video fit (fit to frame / keep ratio).
* @param targetWidth - The target width of the HTML element displaying the video.
* @param targetHeight - The target height of the HTML element displaying the video.
*/
setTargetDimensions: (targetWidth: number, targetHeight: number) => void;
}
export interface BaseUserMediaInputs extends Omit<
MemberMediaInputs,
"audioSource" | "videoSource"
> {
rtcBackendIdentity: string;
handRaised$: Behavior<Date | null>;
reaction$: Behavior<ReactionOption | null>;
statsType: "inbound-rtp" | "outbound-rtp";
}
export function createBaseUserMedia(
scope: ObservableScope,
{
rtcBackendIdentity,
handRaised$,
reaction$,
statsType,
...inputs
}: BaseUserMediaInputs,
): BaseUserMediaViewModel {
const { participant$ } = inputs;
const media$ = scope.behavior(
participant$.pipe(
switchMap((p) => (p && observeParticipantMedia(p)) ?? of(undefined)),
),
);
const toggleCropVideo$ = new Subject<void>();
// The target size of the video element, used to determine the best video fit.
// The target size is the final size of the HTML element after any animations have completed.
const targetSize$ = new BehaviorSubject<
{ width: number; height: number } | undefined
>(undefined);
return {
...createMemberMedia(scope, {
...inputs,
audioSource: Track.Source.Microphone,
videoSource: Track.Source.Camera,
}),
type: "user",
speaking$: scope.behavior(
participant$.pipe(
switchMap((p) =>
p
? observeParticipantEvents(
p,
ParticipantEvent.IsSpeakingChanged,
).pipe(map((p) => p.isSpeaking))
: of(false),
),
),
),
audioEnabled$: scope.behavior(
media$.pipe(map((m) => m?.microphoneTrack?.isMuted === false)),
),
videoEnabled$: scope.behavior(
media$.pipe(map((m) => m?.cameraTrack?.isMuted === false)),
),
videoFit$: videoFit$(
scope,
videoSizeFromParticipant$(participant$),
targetSize$,
),
toggleCropVideo: () => toggleCropVideo$.next(),
rtcBackendIdentity,
handRaised$,
reaction$,
audioStreamStats$: combineLatest([
participant$,
showConnectionStats.value$,
]).pipe(
switchMap(([p, showConnectionStats]) => {
//
if (!p || !showConnectionStats) return of(undefined);
return observeRtpStreamStats$(p, Track.Source.Microphone, statsType);
}),
),
videoStreamStats$: combineLatest([
participant$,
showConnectionStats.value$,
]).pipe(
switchMap(([p, showConnectionStats]) => {
if (!p || !showConnectionStats) return of(undefined);
return observeRtpStreamStats$(p, Track.Source.Camera, statsType);
}),
),
setTargetDimensions: (targetWidth: number, targetHeight: number): void => {
targetSize$.next({ width: targetWidth, height: targetHeight });
},
};
}

View File

@@ -0,0 +1,196 @@
/*
Copyright 2025-2026 Element Software Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import { combineLatest, map, of, switchMap } from "rxjs";
import {
type LocalParticipant,
ParticipantEvent,
type RemoteParticipant,
} from "livekit-client";
import { observeParticipantEvents } from "@livekit/components-core";
import { type ObservableScope } from "../ObservableScope.ts";
import type { Behavior } from "../Behavior.ts";
import type { MediaDevices } from "../MediaDevices.ts";
import { observeSpeaker$ } from "./observeSpeaker.ts";
import { generateItems } from "../../utils/observable.ts";
import { type TaggedParticipant } from "../CallViewModel/remoteMembers/MatrixLivekitMembers.ts";
import { type UserMediaViewModel } from "./UserMediaViewModel.ts";
import { type ScreenShareViewModel } from "./ScreenShareViewModel.ts";
import {
createLocalUserMedia,
type LocalUserMediaInputs,
} from "./LocalUserMediaViewModel.ts";
import {
createRemoteUserMedia,
type RemoteUserMediaInputs,
} from "./RemoteUserMediaViewModel.ts";
import { createLocalScreenShare } from "./LocalScreenShareViewModel.ts";
import { createRemoteScreenShare } from "./RemoteScreenShareViewModel.ts";
/**
* Sorting bins defining the order in which media tiles appear in the layout.
*/
enum SortingBin {
/**
* Yourself, when the "always show self" option is on.
*/
SelfAlwaysShown,
/**
* Participants that are sharing their screen.
*/
Presenters,
/**
* Participants that have been speaking recently.
*/
Speakers,
/**
* Participants that have their hand raised.
*/
HandRaised,
/**
* Participants with video.
*/
Video,
/**
* Participants not sharing any video.
*/
NoVideo,
/**
* Yourself, when the "always show self" option is off.
*/
SelfNotAlwaysShown,
}
/**
* A user media item to be presented in a tile. This is a thin wrapper around
* UserMediaViewModel which additionally carries data relevant to the tile
* layout algorithms (data which the MediaView component should be ignorant of).
*/
export type WrappedUserMediaViewModel = UserMediaViewModel & {
/**
* All screen share media associated with this user media.
*/
screenShares$: Behavior<ScreenShareViewModel[]>;
/**
* Which sorting bin the media item should be placed in.
*/
bin$: Behavior<SortingBin>;
};
interface WrappedUserMediaInputs extends Omit<
LocalUserMediaInputs & RemoteUserMediaInputs,
"participant$"
> {
participant: TaggedParticipant;
mediaDevices: MediaDevices;
pretendToBeDisconnected$: Behavior<boolean>;
}
export function createWrappedUserMedia(
scope: ObservableScope,
{
participant,
mediaDevices,
pretendToBeDisconnected$,
...inputs
}: WrappedUserMediaInputs,
): WrappedUserMediaViewModel {
const userMedia =
participant.type === "local"
? createLocalUserMedia(scope, {
participant$: participant.value$,
mediaDevices,
...inputs,
})
: createRemoteUserMedia(scope, {
participant$: participant.value$,
pretendToBeDisconnected$,
...inputs,
});
// TypeScript needs this widening of the type to happen in a separate statement
const participant$: Behavior<LocalParticipant | RemoteParticipant | null> =
participant.value$;
const screenShares$ = scope.behavior(
participant$.pipe(
switchMap((p) =>
p === null
? of([])
: observeParticipantEvents(
p,
ParticipantEvent.TrackPublished,
ParticipantEvent.TrackUnpublished,
ParticipantEvent.LocalTrackPublished,
ParticipantEvent.LocalTrackUnpublished,
).pipe(
// Technically more than one screen share might be possible... our
// MediaViewModels don't support it though since they look for a unique
// track for the given source. So generateItems here is a bit overkill.
generateItems(
`${inputs.id} screenShares$`,
function* (p) {
if (p.isScreenShareEnabled)
yield {
keys: ["screen-share"],
data: undefined,
};
},
(scope, _data$, key) => {
const id = `${inputs.id}:${key}`;
return participant.type === "local"
? createLocalScreenShare(scope, {
...inputs,
id,
participant$: participant.value$,
})
: createRemoteScreenShare(scope, {
...inputs,
id,
participant$: participant.value$,
pretendToBeDisconnected$,
});
},
),
),
),
),
);
const speaker$ = scope.behavior(observeSpeaker$(userMedia.speaking$));
const presenter$ = scope.behavior(
screenShares$.pipe(map((screenShares) => screenShares.length > 0)),
);
return {
...userMedia,
screenShares$,
bin$: scope.behavior(
combineLatest(
[
speaker$,
presenter$,
userMedia.videoEnabled$,
userMedia.handRaised$,
userMedia.local ? userMedia.alwaysShow$ : of<boolean | null>(null),
],
(speaker, presenter, video, handRaised, alwaysShow) => {
if (alwaysShow !== null)
return alwaysShow
? SortingBin.SelfAlwaysShown
: SortingBin.SelfNotAlwaysShown;
else if (presenter) return SortingBin.Presenters;
else if (speaker) return SortingBin.Speakers;
else if (handRaised) return SortingBin.HandRaised;
else if (video) return SortingBin.Video;
else return SortingBin.NoVideo;
},
),
),
};
}

View File

@@ -0,0 +1,78 @@
/*
Copyright 2023, 2024 New Vector Ltd.
Copyright 2026 Element Creations Ltd.
SPDX-License-Identifier: AGPL-3.0-only OR LicenseRef-Element-Commercial
Please see LICENSE in the repository root for full details.
*/
import {
LocalTrack,
type Participant,
RemoteTrack,
type Track,
} from "livekit-client";
import {
combineLatest,
interval,
type Observable,
startWith,
switchMap,
map,
} from "rxjs";
import { observeTrackReference$ } from "../observeTrackReference";
export function observeRtpStreamStats$(
participant: Participant,
source: Track.Source,
type: "inbound-rtp" | "outbound-rtp",
): Observable<
RTCInboundRtpStreamStats | RTCOutboundRtpStreamStats | undefined
> {
return combineLatest([
observeTrackReference$(participant, source),
interval(1000).pipe(startWith(0)),
]).pipe(
switchMap(async ([trackReference]) => {
const track = trackReference?.publication?.track;
if (
!track ||
!(track instanceof RemoteTrack || track instanceof LocalTrack)
) {
return undefined;
}
const report = await track.getRTCStatsReport();
if (!report) {
return undefined;
}
for (const v of report.values()) {
if (v.type === type) {
return v;
}
}
return undefined;
}),
startWith(undefined),
);
}
export function observeInboundRtpStreamStats$(
participant: Participant,
source: Track.Source,
): Observable<RTCInboundRtpStreamStats | undefined> {
return observeRtpStreamStats$(participant, source, "inbound-rtp").pipe(
map((x) => x as RTCInboundRtpStreamStats | undefined),
);
}
export function observeOutboundRtpStreamStats$(
participant: Participant,
source: Track.Source,
): Observable<RTCOutboundRtpStreamStats | undefined> {
return observeRtpStreamStats$(participant, source, "outbound-rtp").pipe(
map((x) => x as RTCOutboundRtpStreamStats | undefined),
);
}

View File

@@ -7,7 +7,7 @@ Please see LICENSE in the repository root for full details.
import { describe, test } from "vitest";
import { withTestScheduler } from "../utils/test";
import { withTestScheduler } from "../../utils/test";
import { observeSpeaker$ } from "./observeSpeaker";
const yesNo = {

Some files were not shown because too many files have changed in this diff Show More