element-web/src/MediaDeviceHandler.ts
David Langley 491f0cd08a
Change license (#13)
* Copyright headers 1

* Licence headers 2

* Copyright Headers 3

* Copyright Headers 4

* Copyright Headers 5

* Copyright Headers 6

* Copyright headers 7

* Add copyright headers for html and config file

* Replace license files and update package.json

* Update with CLA

* lint
2024-09-09 13:57:16 +00:00

215 lines
8.4 KiB
TypeScript

/*
Copyright 2024 New Vector Ltd.
Copyright 2021 Šimon Brandner <simon.bra.ag@gmail.com>
Copyright 2017 Michael Telatynski <7t3chguy@gmail.com>
SPDX-License-Identifier: AGPL-3.0-only OR GPL-3.0-only
Please see LICENSE files in the repository root for full details.
*/
import EventEmitter from "events";
import { logger } from "matrix-js-sdk/src/logger";
import SettingsStore from "./settings/SettingsStore";
import { SettingLevel } from "./settings/SettingLevel";
import { MatrixClientPeg } from "./MatrixClientPeg";
import { _t } from "./languageHandler";
// XXX: MediaDeviceKind is a union type, so we make our own enum
export enum MediaDeviceKindEnum {
AudioOutput = "audiooutput",
AudioInput = "audioinput",
VideoInput = "videoinput",
}
export type IMediaDevices = Record<MediaDeviceKindEnum, Array<MediaDeviceInfo>>;
export enum MediaDeviceHandlerEvent {
AudioOutputChanged = "audio_output_changed",
}
export default class MediaDeviceHandler extends EventEmitter {
private static internalInstance?: MediaDeviceHandler;
public static get instance(): MediaDeviceHandler {
if (!MediaDeviceHandler.internalInstance) {
MediaDeviceHandler.internalInstance = new MediaDeviceHandler();
}
return MediaDeviceHandler.internalInstance;
}
public static async hasAnyLabeledDevices(): Promise<boolean> {
const devices = await navigator.mediaDevices.enumerateDevices();
return devices.some((d) => Boolean(d.label));
}
/**
* Gets the available audio input/output and video input devices
* from the browser: a thin wrapper around mediaDevices.enumerateDevices()
* that also returns results by type of devices. Note that this requires
* user media permissions and an active stream, otherwise you'll get blank
* device labels.
*
* Once the Permissions API
* (https://developer.mozilla.org/en-US/docs/Web/API/Permissions_API)
* is ready for primetime, it might help make this simpler.
*
* @return Promise<IMediaDevices> The available media devices
*/
public static async getDevices(): Promise<IMediaDevices | undefined> {
try {
const devices = await navigator.mediaDevices.enumerateDevices();
const output: Record<MediaDeviceKindEnum, MediaDeviceInfo[]> = {
[MediaDeviceKindEnum.AudioOutput]: [],
[MediaDeviceKindEnum.AudioInput]: [],
[MediaDeviceKindEnum.VideoInput]: [],
};
devices.forEach((device) => output[device.kind].push(device));
return output;
} catch (error) {
logger.warn("Unable to refresh WebRTC Devices: ", error);
}
}
public static getDefaultDevice = (devices: Array<Partial<MediaDeviceInfo>>): string => {
// Note we're looking for a device with deviceId 'default' but adding a device
// with deviceId == the empty string: this is because Chrome gives us a device
// with deviceId 'default', so we're looking for this, not the one we are adding.
if (!devices.some((i) => i.deviceId === "default")) {
devices.unshift({ deviceId: "", label: _t("voip|default_device") });
return "";
} else {
return "default";
}
};
/**
* Retrieves devices from the SettingsStore and tells the js-sdk to use them
*/
public static async loadDevices(): Promise<void> {
const audioDeviceId = SettingsStore.getValue("webrtc_audioinput");
const videoDeviceId = SettingsStore.getValue("webrtc_videoinput");
await MatrixClientPeg.safeGet().getMediaHandler().setAudioInput(audioDeviceId);
await MatrixClientPeg.safeGet().getMediaHandler().setVideoInput(videoDeviceId);
await MediaDeviceHandler.updateAudioSettings();
}
private static async updateAudioSettings(): Promise<void> {
await MatrixClientPeg.safeGet().getMediaHandler().setAudioSettings({
autoGainControl: MediaDeviceHandler.getAudioAutoGainControl(),
echoCancellation: MediaDeviceHandler.getAudioEchoCancellation(),
noiseSuppression: MediaDeviceHandler.getAudioNoiseSuppression(),
});
}
public setAudioOutput(deviceId: string): void {
SettingsStore.setValue("webrtc_audiooutput", null, SettingLevel.DEVICE, deviceId);
this.emit(MediaDeviceHandlerEvent.AudioOutputChanged, deviceId);
}
/**
* This will not change the device that a potential call uses. The call will
* need to be ended and started again for this change to take effect
* @param {string} deviceId
*/
public async setAudioInput(deviceId: string): Promise<void> {
SettingsStore.setValue("webrtc_audioinput", null, SettingLevel.DEVICE, deviceId);
return MatrixClientPeg.safeGet().getMediaHandler().setAudioInput(deviceId);
}
/**
* This will not change the device that a potential call uses. The call will
* need to be ended and started again for this change to take effect
* @param {string} deviceId
*/
public async setVideoInput(deviceId: string): Promise<void> {
SettingsStore.setValue("webrtc_videoinput", null, SettingLevel.DEVICE, deviceId);
return MatrixClientPeg.safeGet().getMediaHandler().setVideoInput(deviceId);
}
public async setDevice(deviceId: string, kind: MediaDeviceKindEnum): Promise<void> {
switch (kind) {
case MediaDeviceKindEnum.AudioOutput:
this.setAudioOutput(deviceId);
break;
case MediaDeviceKindEnum.AudioInput:
await this.setAudioInput(deviceId);
break;
case MediaDeviceKindEnum.VideoInput:
await this.setVideoInput(deviceId);
break;
}
}
public static async setAudioAutoGainControl(value: boolean): Promise<void> {
await SettingsStore.setValue("webrtc_audio_autoGainControl", null, SettingLevel.DEVICE, value);
await MediaDeviceHandler.updateAudioSettings();
}
public static async setAudioEchoCancellation(value: boolean): Promise<void> {
await SettingsStore.setValue("webrtc_audio_echoCancellation", null, SettingLevel.DEVICE, value);
await MediaDeviceHandler.updateAudioSettings();
}
public static async setAudioNoiseSuppression(value: boolean): Promise<void> {
await SettingsStore.setValue("webrtc_audio_noiseSuppression", null, SettingLevel.DEVICE, value);
await MediaDeviceHandler.updateAudioSettings();
}
public static getAudioOutput(): string {
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_audiooutput");
}
public static getAudioInput(): string {
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_audioinput");
}
public static getVideoInput(): string {
return SettingsStore.getValueAt(SettingLevel.DEVICE, "webrtc_videoinput");
}
public static getAudioAutoGainControl(): boolean {
return SettingsStore.getValue("webrtc_audio_autoGainControl");
}
public static getAudioEchoCancellation(): boolean {
return SettingsStore.getValue("webrtc_audio_echoCancellation");
}
public static getAudioNoiseSuppression(): boolean {
return SettingsStore.getValue("webrtc_audio_noiseSuppression");
}
/**
* Returns the current set deviceId for a device kind
* @param {MediaDeviceKindEnum} kind of the device that will be returned
* @returns {string} the deviceId
*/
public static getDevice(kind: MediaDeviceKindEnum): string {
switch (kind) {
case MediaDeviceKindEnum.AudioOutput:
return this.getAudioOutput();
case MediaDeviceKindEnum.AudioInput:
return this.getAudioInput();
case MediaDeviceKindEnum.VideoInput:
return this.getVideoInput();
}
}
public static get startWithAudioMuted(): boolean {
return SettingsStore.getValue("audioInputMuted");
}
public static set startWithAudioMuted(value: boolean) {
SettingsStore.setValue("audioInputMuted", null, SettingLevel.DEVICE, value);
}
public static get startWithVideoMuted(): boolean {
return SettingsStore.getValue("videoInputMuted");
}
public static set startWithVideoMuted(value: boolean) {
SettingsStore.setValue("videoInputMuted", null, SettingLevel.DEVICE, value);
}
}