Files
element-call-Github/dev-backend-docker-compose.yml
Timo K 7dbbd763b9 Refactor how we aquire the jwt token for the local user. (only fetch it
once)

The local jwt token needs to be aquired via the right endpoint. The
endpoint defines how our rtcBackendIdentity is computed. Based on us
using sticky events or state events we also need to use the right
endpoint. This cannot be done generically in the connection manager. The
jwt token now is computed in the localTransport and the resolved sfu
config is passed to the connection manager.

Add JWT endpoint version and SFU config support Pin matrix-js-sdk to a
specific commit and update dev auth image tag. Propagate SFU config and
JWT endpoint choice through local transport, ConnectionManager and
Connection; add JwtEndpointVersion enum and LocalTransportWithSFUConfig
type. Add NO_MATRIX_2 auth error and locale string, thread
rtcBackendIdentity through UI props, and include related test, CSS and
minor imports updates
2026-01-09 13:38:26 +01:00

176 lines
5.2 KiB
YAML

networks:
ecbackend:
services:
auth-service:
image: ghcr.io/element-hq/lk-jwt-service:pr_139
pull_policy: always
hostname: auth-server
environment:
- LIVEKIT_JWT_PORT=6080
- LIVEKIT_URL=wss://matrix-rtc.m.localhost/livekit/sfu
- LIVEKIT_KEY=devkey
- LIVEKIT_SECRET=secret
# If the configured homeserver runs on localhost, it'll probably be using
# a self-signed certificate
- LIVEKIT_INSECURE_SKIP_VERIFY_TLS=YES_I_KNOW_WHAT_I_AM_DOING
- LIVEKIT_FULL_ACCESS_HOMESERVERS=*
deploy:
restart_policy:
condition: on-failure
ports:
# HOST_PORT:CONTAINER_PORT
- 6080:6080
networks:
- ecbackend
auth-service-1:
image: ghcr.io/element-hq/lk-jwt-service:pr_139
pull_policy: always
hostname: auth-server-1
environment:
- LIVEKIT_JWT_PORT=16080
- LIVEKIT_URL=wss://matrix-rtc.othersite.m.localhost/livekit/sfu
- LIVEKIT_KEY=devkey
- LIVEKIT_SECRET=secret
# If the configured homeserver runs on localhost, it'll probably be using
# a self-signed certificate
- LIVEKIT_INSECURE_SKIP_VERIFY_TLS=YES_I_KNOW_WHAT_I_AM_DOING
- LIVEKIT_FULL_ACCESS_HOMESERVERS=*
deploy:
restart_policy:
condition: on-failure
ports:
# HOST_PORT:CONTAINER_PORT
- 16080:16080
networks:
- ecbackend
livekit:
image: livekit/livekit-server:v1.9.4
pull_policy: always
hostname: livekit-sfu
command: --dev --config /etc/livekit.yaml
restart: unless-stopped
# The SFU seems to work far more reliably when we let it share the host
# network rather than opening specific ports (but why?? we're not missing
# any…)
ports:
# HOST_PORT:CONTAINER_PORT
- 7880:7880/tcp
- 7881:7881/tcp
- 7882:7882/tcp
- 50100-50200:50100-50200/udp
volumes:
- ./backend/dev_livekit.yaml:/etc/livekit.yaml:Z
networks:
- ecbackend
livekit-1:
image: livekit/livekit-server:v1.9.4
pull_policy: always
hostname: livekit-sfu-1
command: --dev --config /etc/livekit.yaml
restart: unless-stopped
# The SFU seems to work far more reliably when we let it share the host
# network rather than opening specific ports (but why?? we're not missing
# any…)
ports:
# HOST_PORT:CONTAINER_PORT
- 17880:17880/tcp
- 17881:17881/tcp
- 17882:17882/tcp
- 50300-50400:50300-50400/udp
volumes:
- ./backend/dev_livekit-othersite.yaml:/etc/livekit.yaml:Z
networks:
- ecbackend
synapse:
hostname: homeserver
image: ghcr.io/element-hq/synapse:pr-18968-dcb7678281bc02d4551043a6338fe5b7e6aa47ce
pull_policy: always
environment:
- SYNAPSE_CONFIG_PATH=/data/cfg/homeserver.yaml
# Needed for rootless podman-compose such that the uid/gid mapping does
# fit local user uid. If the container runs as root (uid 0) it is fine as
# it actually maps to your non-root user on the host (e.g. 1000).
# Otherwise uid mapping will not match your non-root user.
- UID=0
- GID=0
volumes:
- ./backend/synapse_tmp:/data:Z
- ./backend/dev_homeserver.yaml:/data/cfg/homeserver.yaml:Z
networks:
- ecbackend
synapse-1:
hostname: homeserver-1
image: ghcr.io/element-hq/synapse:pr-18968-dcb7678281bc02d4551043a6338fe5b7e6aa47ce
pull_policy: always
environment:
- SYNAPSE_CONFIG_PATH=/data/cfg/homeserver.yaml
# Needed for rootless podman-compose such that the uid/gid mapping does
# fit local user uid. If the container runs as root (uid 0) it is fine as
# it actually maps to your non-root user on the host (e.g. 1000).
# Otherwise uid mapping will not match your non-root user.
- UID=0
- GID=0
volumes:
- ./backend/synapse_tmp_othersite:/data:Z
- ./backend/dev_homeserver-othersite.yaml:/data/cfg/homeserver.yaml:Z
networks:
- ecbackend
element-web:
image: ghcr.io/element-hq/element-web:develop
pull_policy: always
volumes:
- ./backend/ew.test.config.json:/app/config.json:Z
environment:
ELEMENT_WEB_PORT: 8081
ports:
- "8081:8081"
networks:
- ecbackend
element-web-1:
image: ghcr.io/element-hq/element-web:develop
pull_policy: always
volumes:
- ./backend/ew.test.othersite.config.json:/app/config.json:Z
environment:
ELEMENT_WEB_PORT: 18081
ports:
# HOST_PORT:CONTAINER_PORT
- "18081:18081"
networks:
- ecbackend
nginx:
# see backend/dev_tls_setup for how to generate the tls certs
hostname: synapse.m.localhost
image: nginx:latest
pull_policy: always
volumes:
- ./backend/dev_nginx.conf:/etc/nginx/conf.d/default.conf:Z
- ./backend/dev_tls_m.localhost.key:/root/ssl/key.pem:Z
- ./backend/dev_tls_m.localhost.crt:/root/ssl/cert.pem:Z
ports:
# HOST_PORT:CONTAINER_PORT
- "443:443"
- "8008:80"
- "4443:443"
- "8448:8448"
extra_hosts:
- "host.docker.internal:host-gateway"
depends_on:
- synapse
networks:
ecbackend:
aliases:
- synapse.m.localhost
- synapse.othersite.m.localhost
- matrix-rtc.m.localhost
- matrix-rtc.othersite.m.localhost