Quick way to replace matrix JS SDK with LiveKit

This commit is contained in:
Daniel Abramov 2023-06-02 14:49:11 +02:00
parent fb9dd7ff71
commit ee1819a0b6
13 changed files with 177 additions and 800 deletions

View file

@ -30,10 +30,7 @@ import { ReactComponent as ScreenshareIcon } from "../icons/Screenshare.svg";
import { ReactComponent as SettingsIcon } from "../icons/Settings.svg";
import { ReactComponent as AddUserIcon } from "../icons/AddUser.svg";
import { ReactComponent as ArrowDownIcon } from "../icons/ArrowDown.svg";
import { ReactComponent as Fullscreen } from "../icons/Fullscreen.svg";
import { ReactComponent as FullscreenExit } from "../icons/FullscreenExit.svg";
import { TooltipTrigger } from "../Tooltip";
import { VolumeIcon } from "./VolumeIcon";
export type ButtonVariant =
| "default"
@ -263,45 +260,3 @@ export function InviteButton({
</TooltipTrigger>
);
}
interface AudioButtonProps extends Omit<Props, "variant"> {
/**
* A number between 0 and 1
*/
volume: number;
}
export function AudioButton({ volume, ...rest }: AudioButtonProps) {
const { t } = useTranslation();
const tooltip = useCallback(() => t("Local volume"), [t]);
return (
<TooltipTrigger tooltip={tooltip}>
<Button variant="icon" {...rest}>
<VolumeIcon volume={volume} />
</Button>
</TooltipTrigger>
);
}
interface FullscreenButtonProps extends Omit<Props, "variant"> {
fullscreen?: boolean;
}
export function FullscreenButton({
fullscreen,
...rest
}: FullscreenButtonProps) {
const { t } = useTranslation();
const tooltip = useCallback(() => {
return fullscreen ? t("Exit full screen") : t("Full screen");
}, [fullscreen, t]);
return (
<TooltipTrigger tooltip={tooltip}>
<Button variant="icon" {...rest}>
{fullscreen ? <FullscreenExit /> : <Fullscreen />}
</Button>
</TooltipTrigger>
);
}

View file

@ -1,35 +0,0 @@
/*
Copyright 2022 New Vector Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import React from "react";
import { ReactComponent as AudioMuted } from "../icons/AudioMuted.svg";
import { ReactComponent as AudioLow } from "../icons/AudioLow.svg";
import { ReactComponent as Audio } from "../icons/Audio.svg";
interface Props {
/**
* Number between 0 and 1
*/
volume: number;
}
export function VolumeIcon({ volume }: Props) {
if (volume <= 0) return <AudioMuted />;
if (volume <= 0.5) return <AudioLow />;
return <Audio />;
}

View file

@ -29,7 +29,6 @@ import { MatrixInfo } from "./VideoPreview";
import { InCallView } from "./InCallView";
import { CallEndedView } from "./CallEndedView";
import { useSentryGroupCallHandler } from "./useSentryGroupCallHandler";
import { useLocationNavigation } from "../useLocationNavigation";
import { PosthogAnalytics } from "../PosthogAnalytics";
import { useProfile } from "../profile/useProfile";
import { useLiveKit } from "./useLiveKit";
@ -62,19 +61,8 @@ export function GroupCallView({
const {
state,
error,
activeSpeaker,
userMediaFeeds,
microphoneMuted,
localVideoMuted,
enter,
leave,
toggleLocalVideoMuted,
toggleMicrophoneMuted,
toggleScreensharing,
setMicrophoneMuted,
requestingScreenshare,
isScreensharing,
screenshareFeeds,
participants,
unencryptedEventsFromUsers,
} = useGroupCall(groupCall);
@ -97,8 +85,7 @@ export function GroupCallView({
roomId: roomIdOrAlias,
};
// TODO: Pass the correct URL and the correct JWT token here.
const lkState = useLiveKit("<SFU_URL_HERE>", "<JWT_TOKEN_HERE>");
const lkState = useLiveKit();
useEffect(() => {
if (widget && preload) {
@ -134,8 +121,6 @@ export function GroupCallView({
useSentryGroupCallHandler(groupCall);
useLocationNavigation(requestingScreenshare);
const [left, setLeft] = useState(false);
const history = useHistory();
@ -191,24 +176,13 @@ export function GroupCallView({
<InCallView
groupCall={groupCall}
client={client}
roomName={groupCall.room.name}
avatarUrl={avatarUrl}
participants={participants}
mediaDevices={lkState.mediaDevices}
microphoneMuted={microphoneMuted}
localVideoMuted={localVideoMuted}
toggleLocalVideoMuted={toggleLocalVideoMuted}
toggleMicrophoneMuted={toggleMicrophoneMuted}
setMicrophoneMuted={setMicrophoneMuted}
userMediaFeeds={userMediaFeeds}
activeSpeaker={activeSpeaker}
onLeave={onLeave}
toggleScreensharing={toggleScreensharing}
isScreensharing={isScreensharing}
screenshareFeeds={screenshareFeeds}
roomIdOrAlias={roomIdOrAlias}
unencryptedEventsFromUsers={unencryptedEventsFromUsers}
hideHeader={hideHeader}
matrixInfo={matrixInfo}
mediaDevices={lkState.mediaDevices}
livekitRoom={lkState.room}
/>
);
} else if (left) {

View file

@ -21,10 +21,17 @@ import { ResizeObserver } from "@juggle/resize-observer";
import { MatrixClient } from "matrix-js-sdk/src/client";
import { RoomMember } from "matrix-js-sdk/src/models/room-member";
import { GroupCall } from "matrix-js-sdk/src/webrtc/groupCall";
import { CallFeed } from "matrix-js-sdk/src/webrtc/callFeed";
import classNames from "classnames";
import { useTranslation } from "react-i18next";
import { JoinRule } from "matrix-js-sdk/src/@types/partials";
import { Room, Track } from "livekit-client";
import {
useLiveKitRoom,
useLocalParticipant,
useParticipants,
useToken,
useTracks,
} from "@livekit/components-react";
import type { IWidgetApiRequest } from "matrix-widget-api";
import styles from "./InCallView.module.css";
@ -50,10 +57,8 @@ import { Avatar } from "../Avatar";
import { UserMenuContainer } from "../UserMenuContainer";
import { useRageshakeRequestModal } from "../settings/submit-rageshake";
import { RageshakeRequestModal } from "./RageshakeRequestModal";
import { useShowInspector, useSpatialAudio } from "../settings/useSetting";
import { useShowInspector } from "../settings/useSetting";
import { useModalTriggerState } from "../Modal";
import { useAudioContext } from "../video-grid/useMediaStream";
import { useFullscreen } from "../video-grid/useFullscreen";
import { PosthogAnalytics } from "../PosthogAnalytics";
import { widget, ElementWidgetActions } from "../widget";
import { useJoinRule } from "./useJoinRule";
@ -61,9 +66,9 @@ import { useUrlParams } from "../UrlParams";
import { usePrefersReducedMotion } from "../usePrefersReducedMotion";
import { ParticipantInfo } from "./useGroupCall";
import { TileDescriptor } from "../video-grid/TileDescriptor";
import { AudioSink } from "../video-grid/AudioSink";
import { useCallViewKeyboardShortcuts } from "../useCallViewKeyboardShortcuts";
import { MediaDevicesState } from "../settings/mediaDevices";
import { MatrixInfo } from "./VideoPreview";
const canScreenshare = "getDisplayMedia" in (navigator.mediaDevices ?? {});
// There is currently a bug in Safari our our code with cloning and sending MediaStreams
@ -75,46 +80,25 @@ interface Props {
client: MatrixClient;
groupCall: GroupCall;
participants: Map<RoomMember, Map<string, ParticipantInfo>>;
roomName: string;
avatarUrl: string;
mediaDevices: MediaDevicesState;
microphoneMuted: boolean;
localVideoMuted: boolean;
toggleLocalVideoMuted: () => void;
toggleMicrophoneMuted: () => void;
toggleScreensharing: () => void;
setMicrophoneMuted: (muted: boolean) => void;
userMediaFeeds: CallFeed[];
activeSpeaker: CallFeed | null;
onLeave: () => void;
isScreensharing: boolean;
screenshareFeeds: CallFeed[];
roomIdOrAlias: string;
unencryptedEventsFromUsers: Set<string>;
hideHeader: boolean;
matrixInfo: MatrixInfo;
mediaDevices: MediaDevicesState;
livekitRoom: Room;
}
export function InCallView({
client,
groupCall,
participants,
roomName,
avatarUrl,
mediaDevices,
microphoneMuted,
localVideoMuted,
toggleLocalVideoMuted,
toggleMicrophoneMuted,
setMicrophoneMuted,
userMediaFeeds,
activeSpeaker,
onLeave,
toggleScreensharing,
isScreensharing,
screenshareFeeds,
roomIdOrAlias,
unencryptedEventsFromUsers,
hideHeader,
matrixInfo,
mediaDevices,
livekitRoom,
}: Props) {
const { t } = useTranslation();
usePreventScroll();
@ -132,13 +116,49 @@ export function InCallView({
[containerRef1, containerRef2]
);
const { layout, setLayout } = useVideoGridLayout(screenshareFeeds.length > 0);
const { toggleFullscreen, fullscreenParticipant } =
useFullscreen(containerRef1);
const userId = client.getUserId();
const deviceId = client.getDeviceId();
const options = useMemo(
() => ({
userInfo: {
name: matrixInfo.userName,
identity: `${userId}:${deviceId}`,
},
}),
[matrixInfo.userName, userId, deviceId]
);
const token = useToken(
"http://localhost:8080/token",
matrixInfo.roomName,
options
);
const [spatialAudio] = useSpatialAudio();
// Uses a hook to connect to the LiveKit room (on unmount the room will be left) and publish local media tracks (default).
useLiveKitRoom({
token,
serverUrl: "ws://localhost:7880",
room: livekitRoom,
onConnected: () => {
console.log("connected to LiveKit room");
},
onDisconnected: () => {
console.log("disconnected from LiveKit room");
},
onError: (err) => {
console.error("error connecting to LiveKit room", err);
},
});
const screenSharingTracks = useTracks(
[{ source: Track.Source.ScreenShare, withPlaceholder: false }],
{
room: livekitRoom,
}
);
const { layout, setLayout } = useVideoGridLayout(
screenSharingTracks.length > 0
);
const [audioContext, audioDestination] = useAudioContext();
const [showInspector] = useShowInspector();
const { modalState: feedbackModalState, modalProps: feedbackModalProps } =
@ -146,11 +166,28 @@ export function InCallView({
const { hideScreensharing } = useUrlParams();
const {
isMicrophoneEnabled,
isCameraEnabled,
isScreenShareEnabled,
localParticipant,
} = useLocalParticipant({ room: livekitRoom });
const toggleMicrophone = useCallback(async () => {
await localParticipant.setMicrophoneEnabled(!isMicrophoneEnabled);
}, [localParticipant, isMicrophoneEnabled]);
const toggleCamera = useCallback(async () => {
await localParticipant.setCameraEnabled(!isCameraEnabled);
}, [localParticipant, isCameraEnabled]);
const toggleScreenSharing = useCallback(async () => {
await localParticipant.setScreenShareEnabled(!isScreenShareEnabled);
}, [localParticipant, isScreenShareEnabled]);
useCallViewKeyboardShortcuts(
!feedbackModalState.isOpen,
toggleMicrophoneMuted,
toggleLocalVideoMuted,
setMicrophoneMuted
toggleCamera,
toggleMicrophone,
async (muted) => await localParticipant.setMicrophoneEnabled(!muted)
);
useEffect(() => {
@ -189,27 +226,33 @@ export function InCallView({
}
}, [setLayout]);
const sfuParticipants = useParticipants({
room: livekitRoom,
});
const items = useMemo(() => {
const tileDescriptors: TileDescriptor[] = [];
const localUserId = client.getUserId()!;
const localDeviceId = client.getDeviceId()!;
// One tile for each participant, to start with (we want a tile for everyone we
// think should be in the call, even if we don't have a call feed for them yet)
const tileDescriptors: TileDescriptor[] = [];
for (const [member, participantMap] of participants) {
for (const [deviceId, { connectionState, presenter }] of participantMap) {
const callFeed = userMediaFeeds.find(
(f) => f.userId === member.userId && f.deviceId === deviceId
);
const id = `${member.userId}:${deviceId}`;
const sfuParticipant = sfuParticipants.find((p) => p.identity === id);
const hasScreenShare =
sfuParticipant?.getTrack(Track.Source.ScreenShare) !== undefined;
tileDescriptors.push({
id: `${member.userId} ${deviceId}`,
id,
member,
callFeed,
focused: screenshareFeeds.length === 0 && callFeed === activeSpeaker,
isLocal: member.userId === localUserId && deviceId === localDeviceId,
focused: hasScreenShare && !sfuParticipant?.isLocal,
isLocal: member.userId == localUserId && deviceId == localDeviceId,
presenter,
connectionState,
sfuParticipant,
});
}
}
@ -218,46 +261,17 @@ export function InCallView({
tileDescriptors.length
);
// Add the screenshares too
for (const screenshareFeed of screenshareFeeds) {
const member = screenshareFeed.getMember()!;
const connectionState = participants
.get(member)
?.get(screenshareFeed.deviceId!)?.connectionState;
// If the participant has left, their screenshare feed is stale and we
// shouldn't bother showing it
if (connectionState !== undefined) {
tileDescriptors.push({
id: screenshareFeed.id,
member,
callFeed: screenshareFeed,
focused: true,
isLocal: screenshareFeed.isLocal,
presenter: false,
connectionState,
});
}
}
return tileDescriptors;
}, [client, participants, userMediaFeeds, activeSpeaker, screenshareFeeds]);
}, [client, participants, sfuParticipants]);
const reducedControls = boundsValid && bounds.width <= 400;
const noControls = reducedControls && bounds.height <= 400;
// The maximised participant: either the participant that the user has
// manually put in fullscreen, or the focused (active) participant if the
// window is too small to show everyone
// The maximised participant: the focused (active) participant if the
// window is too small to show everyone.
const maximisedParticipant = useMemo(
() =>
fullscreenParticipant ??
(noControls
? items.find((item) => item.focused) ??
items.find((item) => item.callFeed) ??
null
: null),
[fullscreenParticipant, noControls, items]
() => (noControls ? items.find((item) => item.focused) ?? null : null),
[noControls, items]
);
const renderAvatar = useCallback(
@ -296,12 +310,7 @@ export function InCallView({
key={maximisedParticipant.id}
item={maximisedParticipant}
getAvatar={renderAvatar}
audioContext={audioContext}
audioDestination={audioDestination}
disableSpeakingIndicator={true}
maximised={Boolean(maximisedParticipant)}
fullscreen={maximisedParticipant === fullscreenParticipant}
onFullscreen={toggleFullscreen}
/>
);
}
@ -323,12 +332,7 @@ export function InCallView({
key={item.id}
item={item}
getAvatar={renderAvatar}
audioContext={audioContext}
audioDestination={audioDestination}
disableSpeakingIndicator={items.length < 3}
maximised={false}
fullscreen={false}
onFullscreen={toggleFullscreen}
{...rest}
/>
)}
@ -345,26 +349,6 @@ export function InCallView({
[styles.maximised]: maximisedParticipant,
});
// If spatial audio is disabled, we render one audio tag for each participant
// (with spatial audio, all the audio goes via the Web Audio API)
// We also do this if there's a feed maximised because we only trigger spatial
// audio rendering for feeds that we're displaying, which will need to be fixed
// once we start having more participants than we can fit on a screen, but this
// is a workaround for now.
const audioElements: JSX.Element[] = [];
if (!spatialAudio || maximisedParticipant) {
for (const item of items) {
if (item.isLocal) continue; // We don't want to render own audio
audioElements.push(
<AudioSink
tileDescriptor={item}
audioOutput="AUDIO OUTPUT?"
key={item.id}
/>
);
}
}
let footer: JSX.Element | null;
if (noControls) {
@ -372,25 +356,25 @@ export function InCallView({
} else if (reducedControls) {
footer = (
<div className={styles.footer}>
<MicButton muted={microphoneMuted} onPress={toggleMicrophoneMuted} />
<VideoButton muted={localVideoMuted} onPress={toggleLocalVideoMuted} />
<MicButton muted={!isMicrophoneEnabled} onPress={toggleMicrophone} />
<VideoButton muted={!isCameraEnabled} onPress={toggleCamera} />
<HangupButton onPress={onLeave} />
</div>
);
} else {
footer = (
<div className={styles.footer}>
<MicButton muted={microphoneMuted} onPress={toggleMicrophoneMuted} />
<VideoButton muted={localVideoMuted} onPress={toggleLocalVideoMuted} />
<MicButton muted={!isMicrophoneEnabled} onPress={toggleMicrophone} />
<VideoButton muted={!isCameraEnabled} onPress={toggleCamera} />
{canScreenshare && !hideScreensharing && !isSafari && (
<ScreenshareButton
enabled={isScreensharing}
onPress={toggleScreensharing}
enabled={isScreenShareEnabled}
onPress={toggleScreenSharing}
/>
)}
{!maximisedParticipant && (
<OverflowMenu
roomId={roomIdOrAlias}
roomId={matrixInfo.roomId}
mediaDevices={mediaDevices}
inCall
showInvite={joinRule === JoinRule.Public}
@ -405,11 +389,13 @@ export function InCallView({
return (
<div className={containerClasses} ref={containerRef}>
<>{audioElements}</>
{!hideHeader && !maximisedParticipant && (
<Header>
<LeftNav>
<RoomHeaderInfo roomName={roomName} avatarUrl={avatarUrl} />
<RoomHeaderInfo
roomName={matrixInfo.roomName}
avatarUrl={matrixInfo.avatarUrl}
/>
<VersionMismatchWarning
users={unencryptedEventsFromUsers}
room={groupCall.room}
@ -431,7 +417,7 @@ export function InCallView({
{rageshakeRequestModalState.isOpen && (
<RageshakeRequestModal
{...rageshakeRequestModalProps}
roomIdOrAlias={roomIdOrAlias}
roomIdOrAlias={matrixInfo.roomId}
/>
)}
</div>

View file

@ -6,21 +6,20 @@ import { MediaDevicesState, MediaDevices } from "../settings/mediaDevices";
import { LocalMediaInfo, MediaInfo } from "./VideoPreview";
type LiveKitState = {
// The state of the media devices (changing the devices will also change them in the room).
mediaDevices: MediaDevicesState;
// The local media (audio and video) that can be referenced in an e.g. lobby view.
localMedia: LocalMediaInfo;
enterRoom: () => Promise<void>;
leaveRoom: () => Promise<void>;
// A reference to the newly constructed (but not yet entered) room for future use with the LiveKit hooks.
// TODO: Abstract this away, so that the user doesn't have to deal with the LiveKit room directly.
room: Room;
};
// Returns the React state for the LiveKit's Room class.
// The actual return type should be `LiveKitState`, but since this is a React hook, the initialisation is
// delayed (done after the rendering, not during the rendering), because of that this function may return `undefined`.
// But soon this state is changed to the actual `LiveKitState` value.
export function useLiveKit(
url: string,
token: string
): LiveKitState | undefined {
export function useLiveKit(): LiveKitState | undefined {
// TODO: Pass the proper paramters to configure the room (supported codecs, simulcast, adaptive streaming, etc).
const [room] = React.useState<Room>(() => {
return new Room();
@ -83,19 +82,11 @@ export function useLiveKit(
setVideoEnabled
),
},
enterRoom: async () => {
// TODO: Pass connection parameters (autosubscribe, etc.).
await room.connect(url, token);
},
leaveRoom: async () => {
await room.disconnect();
},
room,
};
setState(state);
}, [
url,
token,
mediaDevices,
audio.localTrack,
video.localTrack,

View file

@ -1,47 +0,0 @@
/*
Copyright 2022 New Vector Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import React from "react";
import { TileDescriptor } from "./TileDescriptor";
import { useCallFeed } from "./useCallFeed";
import { useMediaStream } from "./useMediaStream";
interface Props {
tileDescriptor: TileDescriptor;
audioOutput: string;
}
// Renders and <audio> element on the page playing the given stream
// to the given output.
export const AudioSink: React.FC<Props> = ({
tileDescriptor,
audioOutput,
}: Props) => {
const { localVolume, stream } = useCallFeed(tileDescriptor.callFeed);
const audioElementRef = useMediaStream(
stream,
audioOutput,
// We don't compare the audioMuted flag of useCallFeed here, since unmuting
// depends on to-device messages which may lag behind the audio actually
// starting to flow over the stream
tileDescriptor.isLocal,
localVolume
);
return <audio ref={audioElementRef} />;
};

View file

@ -15,7 +15,7 @@ limitations under the License.
*/
import { RoomMember } from "matrix-js-sdk";
import { CallFeed } from "matrix-js-sdk/src/webrtc/callFeed";
import { LocalParticipant, RemoteParticipant } from "livekit-client";
import { ConnectionState } from "../room/useGroupCall";
@ -26,7 +26,7 @@ export interface TileDescriptor {
member: RoomMember;
focused: boolean;
presenter: boolean;
callFeed?: CallFeed;
isLocal?: boolean;
connectionState: ConnectionState;
sfuParticipant?: LocalParticipant | RemoteParticipant;
}

View file

@ -15,14 +15,9 @@ limitations under the License.
*/
import React, { useState } from "react";
import { useMemo } from "react";
import { RoomMember } from "matrix-js-sdk";
import { VideoGrid, useVideoGridLayout } from "./VideoGrid";
import { VideoTile } from "./VideoTile";
import { useVideoGridLayout } from "./VideoGrid";
import { Button } from "../button";
import { ConnectionState } from "../room/useGroupCall";
import { TileDescriptor } from "./TileDescriptor";
export default {
title: "VideoGrid",
@ -35,18 +30,6 @@ export const ParticipantsTest = () => {
const { layout, setLayout } = useVideoGridLayout(false);
const [participantCount, setParticipantCount] = useState(1);
const items: TileDescriptor[] = useMemo(
() =>
new Array(participantCount).fill(undefined).map((_, i) => ({
id: (i + 1).toString(),
member: new RoomMember("!fake:room.id", `@user${i}:fake.dummy`),
focused: false,
presenter: false,
connectionState: ConnectionState.Connected,
})),
[participantCount]
);
return (
<>
<div style={{ display: "flex", width: "100vw", height: "32px" }}>
@ -68,26 +51,6 @@ export const ParticipantsTest = () => {
</Button>
)}
</div>
<div
style={{
display: "flex",
width: "100vw",
height: "calc(100vh - 32px)",
}}
>
<VideoGrid layout={layout} items={items}>
{({ item, ...rest }) => (
<VideoTile
key={item.id}
name={`User ${item.id}`}
disableSpeakingIndicator={items.length < 3}
connectionState={ConnectionState.Connected}
debugInfo={{ width: undefined, height: undefined }}
{...rest}
/>
)}
</VideoGrid>
</div>
</>
);
};

View file

@ -18,134 +18,63 @@ import React, { forwardRef } from "react";
import { animated } from "@react-spring/web";
import classNames from "classnames";
import { useTranslation } from "react-i18next";
import { LocalParticipant, RemoteParticipant, Track } from "livekit-client";
import { useMediaTrack } from "@livekit/components-react";
import styles from "./VideoTile.module.css";
import { ReactComponent as MicMutedIcon } from "../icons/MicMuted.svg";
import { ReactComponent as VideoMutedIcon } from "../icons/VideoMuted.svg";
import { AudioButton, FullscreenButton } from "../button/Button";
import { ConnectionState } from "../room/useGroupCall";
import { CallFeedDebugInfo } from "./useCallFeed";
import { useShowInspector } from "../settings/useSetting";
interface Props {
name: string;
connectionState: ConnectionState;
speaking?: boolean;
audioMuted?: boolean;
videoMuted?: boolean;
screenshare?: boolean;
avatar?: JSX.Element;
mediaRef?: React.RefObject<MediaElement>;
onOptionsPress?: () => void;
localVolume?: number;
hasAudio?: boolean;
maximised?: boolean;
fullscreen?: boolean;
onFullscreen?: () => void;
className?: string;
showOptions?: boolean;
isLocal?: boolean;
disableSpeakingIndicator?: boolean;
debugInfo: CallFeedDebugInfo;
sfuParticipant: LocalParticipant | RemoteParticipant;
}
export const VideoTile = forwardRef<HTMLDivElement, Props>(
(
{
name,
connectionState,
speaking,
audioMuted,
videoMuted,
screenshare,
avatar,
mediaRef,
onOptionsPress,
localVolume,
hasAudio,
maximised,
fullscreen,
onFullscreen,
className,
showOptions,
isLocal,
// TODO: disableSpeakingIndicator is not used atm.
disableSpeakingIndicator,
debugInfo,
...rest
},
ref
) => {
const [showInspector] = useShowInspector();
({ name, avatar, maximised, className, sfuParticipant, ...rest }, ref) => {
const { t } = useTranslation();
const toolbarButtons: JSX.Element[] = [];
if (connectionState == ConnectionState.Connected && !isLocal) {
if (hasAudio) {
toolbarButtons.push(
<AudioButton
key="localVolume"
className={styles.button}
volume={localVolume}
onPress={onOptionsPress}
/>
const videoEl = React.useRef<HTMLVideoElement>(null);
const { isMuted: cameraMuted } = useMediaTrack(
Track.Source.Camera,
sfuParticipant,
{
element: videoEl,
}
);
}
if (screenshare) {
toolbarButtons.push(
<FullscreenButton
key="fullscreen"
className={styles.button}
fullscreen={fullscreen}
onPress={onFullscreen}
/>
const audioEl = React.useRef<HTMLAudioElement>(null);
const { isMuted: microphoneMuted } = useMediaTrack(
Track.Source.Microphone,
sfuParticipant,
{
element: audioEl,
}
);
}
}
let caption: string;
switch (connectionState) {
case ConnectionState.EstablishingCall:
caption = t("{{name}} (Connecting...)", { name });
break;
case ConnectionState.WaitMedia:
// not strictly true, but probably easier to understand than, "Waiting for media"
caption = t("{{name}} (Waiting for video...)", { name });
break;
case ConnectionState.Connected:
caption = name;
break;
}
return (
<animated.div
className={classNames(styles.videoTile, className, {
[styles.isLocal]: isLocal,
[styles.speaking]: speaking,
[styles.muted]: audioMuted,
[styles.screenshare]: screenshare,
[styles.isLocal]: sfuParticipant.isLocal,
[styles.speaking]: sfuParticipant.isSpeaking,
[styles.muted]: microphoneMuted,
[styles.screenshare]: false,
[styles.maximised]: maximised,
})}
ref={ref}
{...rest}
>
{showInspector && (
<div className={classNames(styles.debugInfo)}>
{JSON.stringify(debugInfo)}
</div>
)}
{toolbarButtons.length > 0 && !maximised && (
<div className={classNames(styles.toolbar)}>{toolbarButtons}</div>
)}
{videoMuted && (
{cameraMuted && (
<>
<div className={styles.videoMutedOverlay} />
{avatar}
</>
)}
{!maximised &&
(screenshare ? (
(sfuParticipant.isScreenShareEnabled ? (
<div className={styles.presenterLabel}>
<span>{t("{{name}} is presenting", { name })}</span>
</div>
@ -156,13 +85,15 @@ export const VideoTile = forwardRef<HTMLDivElement, Props>(
Mute state is currently sent over to-device messages, which
aren't quite real-time, so this is an important kludge to make
sure no one appears muted when they've clearly begun talking. */
audioMuted && !videoMuted && !speaking && <MicMutedIcon />
microphoneMuted &&
!cameraMuted &&
!sfuParticipant.isSpeaking && <MicMutedIcon />
}
{videoMuted && <VideoMutedIcon />}
<span title={caption}>{caption}</span>
{cameraMuted && <VideoMutedIcon />}
</div>
))}
<video ref={mediaRef} playsInline disablePictureInPicture />
<video ref={videoEl} />
<audio ref={audioEl} />
</animated.div>
);
}

View file

@ -14,17 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
import { SDPStreamMetadataPurpose } from "matrix-js-sdk/src/webrtc/callEventTypes";
import React from "react";
import { useCallback, useEffect } from "react";
import { RoomMember } from "matrix-js-sdk/src/models/room-member";
import { useTranslation } from "react-i18next";
import { useCallFeed } from "./useCallFeed";
import { useSpatialMediaStream } from "./useMediaStream";
import { ConnectionState } from "../room/useGroupCall";
import { useRoomMemberName } from "./useRoomMemberName";
import { VideoTile } from "./VideoTile";
import { VideoTileSettingsModal } from "./VideoTileSettingsModal";
import { useModalTriggerState } from "../Modal";
import { TileDescriptor } from "./TileDescriptor";
interface Props {
@ -36,12 +32,7 @@ interface Props {
width: number,
height: number
) => JSX.Element;
audioContext: AudioContext;
audioDestination: AudioNode;
disableSpeakingIndicator: boolean;
maximised: boolean;
fullscreen: boolean;
onFullscreen: (item: TileDescriptor) => void;
}
export function VideoTileContainer({
@ -49,88 +40,37 @@ export function VideoTileContainer({
width,
height,
getAvatar,
audioContext,
audioDestination,
disableSpeakingIndicator,
maximised,
fullscreen,
onFullscreen,
...rest
}: Props) {
const {
isLocal,
audioMuted,
videoMuted,
localVolume,
hasAudio,
speaking,
stream,
purpose,
debugInfo,
} = useCallFeed(item.callFeed);
const { rawDisplayName } = useRoomMemberName(item.member);
const [tileRef, mediaRef] = useSpatialMediaStream(
stream ?? null,
audioContext,
audioDestination,
localVolume,
// The feed is muted if it's local audio (because we don't want our own audio,
// but it's a hook and we can't call it conditionally so we're stuck with it)
// or if there's a maximised feed in which case we always render audio via audio
// elements because we wire it up at the video tile container level and only one
// video tile container is displayed.
isLocal || maximised
);
const {
modalState: videoTileSettingsModalState,
modalProps: videoTileSettingsModalProps,
} = useModalTriggerState();
const onOptionsPress = () => {
videoTileSettingsModalState.open();
};
const { t } = useTranslation();
const onFullscreenCallback = useCallback(() => {
onFullscreen(item);
}, [onFullscreen, item]);
// Firefox doesn't respect the disablePictureInPicture attribute
// https://bugzilla.mozilla.org/show_bug.cgi?id=1611831
useEffect(() => {
item.callFeed?.setResolution(width, height);
}, [width, height, item.callFeed]);
useEffect(() => {
item.callFeed?.setIsVisible(true);
}, [item.callFeed]);
let caption: string;
switch (item.connectionState) {
case ConnectionState.EstablishingCall:
caption = t("{{name}} (Connecting...)", { name });
break;
case ConnectionState.WaitMedia:
// not strictly true, but probably easier to understand than, "Waiting for media"
caption = t("{{name}} (Waiting for video...)", { name });
break;
case ConnectionState.Connected:
caption = rawDisplayName;
break;
}
return (
<>
{!item.sfuParticipant && <span title={caption}>{caption}</span>}
{item.sfuParticipant && (
<VideoTile
isLocal={isLocal}
speaking={speaking && !disableSpeakingIndicator}
audioMuted={audioMuted}
videoMuted={videoMuted}
screenshare={purpose === SDPStreamMetadataPurpose.Screenshare}
sfuParticipant={item.sfuParticipant}
name={rawDisplayName}
connectionState={item.connectionState}
ref={tileRef}
mediaRef={mediaRef}
avatar={getAvatar && getAvatar(item.member, width, height)}
onOptionsPress={onOptionsPress}
localVolume={localVolume}
hasAudio={hasAudio}
maximised={maximised}
fullscreen={fullscreen}
onFullscreen={onFullscreenCallback}
debugInfo={debugInfo}
{...rest}
/>
{videoTileSettingsModalState.isOpen && !maximised && item.callFeed && (
<VideoTileSettingsModal
{...videoTileSettingsModalProps}
feed={item.callFeed}
/>
)}
</>
);

View file

@ -1,120 +0,0 @@
/*
Copyright 2022 New Vector Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
.videoTileSettingsModal {
width: 700px;
height: 316px;
display: flex;
}
.content {
position: relative;
margin: 27px 34px;
height: 100%;
display: flex;
flex-direction: column;
justify-content: center;
}
.localVolumePercentage {
width: 3ch;
}
.localVolumeSlider[type="range"] {
-ms-appearance: none;
-moz-appearance: none;
-webkit-appearance: none;
appearance: none;
background-color: transparent;
--slider-color: var(--quinary-content);
--slider-height: 4px;
--thumb-color: var(--accent);
--thumb-radius: 100%;
--thumb-size: 16px;
--thumb-margin-top: -6px;
cursor: pointer;
width: 100%;
}
.localVolumeSlider[type="range"]::-moz-range-track {
-moz-appearance: none;
appearance: none;
background-color: var(--slider-color);
height: var(--slider-height);
}
.localVolumeSlider[type="range"]::-ms-track {
-ms-appearance: none;
appearance: none;
background-color: var(--slider-color);
height: var(--slider-height);
}
.localVolumeSlider[type="range"]::-webkit-slider-runnable-track {
-webkit-appearance: none;
appearance: none;
background-color: var(--slider-color);
height: var(--slider-height);
}
.localVolumeSlider[type="range"]::-moz-range-thumb {
-moz-appearance: none;
appearance: none;
height: var(--thumb-size);
width: var(--thumb-size);
margin-top: var(--thumb-margin-top);
border-radius: var(--thumb-radius);
background: var(--thumb-color);
}
.localVolumeSlider[type="range"]::-ms-thumb {
-ms-appearance: none;
appearance: none;
height: var(--thumb-size);
width: var(--thumb-size);
margin-top: var(--thumb-margin-top);
border-radius: var(--thumb-radius);
background: var(--thumb-color);
}
.localVolumeSlider[type="range"]::-webkit-slider-thumb {
-webkit-appearance: none;
appearance: none;
height: var(--thumb-size);
width: var(--thumb-size);
margin-top: var(--thumb-margin-top);
border-radius: var(--thumb-radius);
background: var(--thumb-color);
}
.localVolumeSlider[type="range"]::-moz-range-progress {
-moz-appearance: none;
appearance: none;
height: var(--slider-height);
background: var(--thumb-color);
}
.localVolumeSlider[type="range"]::-ms-fill-lower {
-moz-appearance: none;
appearance: none;
height: var(--slider-height);
background: var(--thumb-color);
}

View file

@ -1,82 +0,0 @@
/*
Copyright 2022 New Vector Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import React, { ChangeEvent, useState } from "react";
import { CallFeed } from "matrix-js-sdk/src/webrtc/callFeed";
import { useTranslation } from "react-i18next";
import { FieldRow } from "../input/Input";
import { Modal } from "../Modal";
import styles from "./VideoTileSettingsModal.module.css";
import { VolumeIcon } from "../button/VolumeIcon";
interface LocalVolumeProps {
feed: CallFeed;
}
const LocalVolume: React.FC<LocalVolumeProps> = ({
feed,
}: LocalVolumeProps) => {
const [localVolume, setLocalVolume] = useState<number>(feed.getLocalVolume());
const onLocalVolumeChanged = (event: ChangeEvent<HTMLInputElement>) => {
const value: number = +event.target.value;
setLocalVolume(value);
feed.setLocalVolume(value);
};
return (
<>
<FieldRow>
<VolumeIcon volume={localVolume} />
<input
className={styles.localVolumeSlider}
type="range"
min="0"
max="1"
step="0.01"
value={localVolume}
onChange={onLocalVolumeChanged}
/>
</FieldRow>
</>
);
};
// TODO: Extend ModalProps
interface Props {
feed: CallFeed;
onClose: () => void;
}
export const VideoTileSettingsModal = ({ feed, onClose, ...rest }: Props) => {
const { t } = useTranslation();
return (
<Modal
className={styles.videoTileSettingsModal}
title={t("Local volume")}
isDismissable
mobileFullScreen
onClose={onClose}
{...rest}
>
<div className={styles.content}>
<LocalVolume feed={feed} />
</div>
</Modal>
);
};

View file

@ -1,79 +0,0 @@
/*
Copyright 2022 New Vector Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import { logger } from "matrix-js-sdk/src/logger";
import { useCallback, useEffect, useState } from "react";
import { useEventTarget } from "../useEvents";
import { TileDescriptor } from "./TileDescriptor";
import { useCallFeed } from "./useCallFeed";
export function useFullscreen(ref: React.RefObject<HTMLElement>): {
toggleFullscreen: (participant: TileDescriptor) => void;
fullscreenParticipant: TileDescriptor | null;
} {
const [fullscreenParticipant, setFullscreenParticipant] =
useState<TileDescriptor | null>(null);
const { disposed } = useCallFeed(fullscreenParticipant?.callFeed);
const toggleFullscreen = useCallback(
(tileDes: TileDescriptor) => {
if (fullscreenParticipant) {
document.exitFullscreen();
setFullscreenParticipant(null);
} else {
try {
if (ref.current.requestFullscreen) {
ref.current.requestFullscreen();
} else if (ref.current.webkitRequestFullscreen) {
ref.current.webkitRequestFullscreen();
} else {
logger.error("No available fullscreen API!");
}
setFullscreenParticipant(tileDes);
} catch (error) {
console.warn("Failed to fullscreen:", error);
}
}
},
[fullscreenParticipant, setFullscreenParticipant, ref]
);
const onFullscreenChanged = useCallback(() => {
if (!document.fullscreenElement && !document.webkitFullscreenElement) {
setFullscreenParticipant(null);
}
}, [setFullscreenParticipant]);
useEventTarget(ref.current, "fullscreenchange", onFullscreenChanged);
useEventTarget(ref.current, "webkitfullscreenchange", onFullscreenChanged);
useEffect(() => {
if (disposed) {
if (document.exitFullscreen) {
document.exitFullscreen();
} else if (document.webkitExitFullscreen) {
document.webkitExitFullscreen();
} else {
logger.error("No available fullscreen API!");
}
setFullscreenParticipant(null);
}
}, [disposed]);
return { toggleFullscreen, fullscreenParticipant };
}