Add ability to choose calls audio backend.

This commit is contained in:
John Preston 2021-01-07 19:27:11 +04:00
parent b23e4fa491
commit e11efe483e
8 changed files with 127 additions and 41 deletions

View file

@ -780,7 +780,8 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
sendSignalingData(bytes);
});
},
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(),
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
settings.callAudioBackend()),
};
if (Logs::DebugEnabled()) {
auto callLogFolder = cWorkingDir() + qsl("DebugLogs");

View file

@ -582,7 +582,8 @@ void GroupCall::createAndStartController() {
},
.initialInputDeviceId = _audioInputId.toStdString(),
.initialOutputDeviceId = _audioOutputId.toStdString(),
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(),
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
settings.callAudioBackend()),
};
if (Logs::DebugEnabled()) {
auto callLogFolder = cWorkingDir() + qsl("DebugLogs");

View file

@ -480,6 +480,7 @@ void GroupCallSettingsBox(
// Means we finished showing the box.
crl::on_main(box, [=] {
state->micTester = std::make_unique<Webrtc::AudioInputTester>(
Core::App().settings().callAudioBackend(),
Core::App().settings().callInputDeviceId());
state->levelUpdateTimer.callEach(kMicTestUpdateInterval);
});

View file

@ -13,12 +13,14 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "window/themes/window_theme.h"
#include "window/section_widget.h"
#include "base/platform/base_platform_info.h"
#include "webrtc/webrtc_create_adm.h"
#include "facades.h"
namespace Core {
Settings::Settings()
: _sendSubmitWay(Ui::InputSubmitSettings::Enter)
: _callAudioBackend(Webrtc::Backend::OpenAL)
, _sendSubmitWay(Ui::InputSubmitSettings::Enter)
, _floatPlayerColumn(Window::Column::Second)
, _floatPlayerCorner(RectPart::TopRight)
, _dialogsWidthRatio(DefaultDialogsWidthRatio()) {
@ -112,7 +114,8 @@ QByteArray Settings::serialize() const {
<< qint32(_ipRevealWarning ? 1 : 0)
<< qint32(_groupCallPushToTalk ? 1 : 0)
<< _groupCallPushToTalkShortcut
<< qint64(_groupCallPushToTalkDelay);
<< qint64(_groupCallPushToTalkDelay)
<< qint32(_callAudioBackend);
}
return result;
}
@ -183,6 +186,7 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
qint32 groupCallPushToTalk = _groupCallPushToTalk ? 1 : 0;
QByteArray groupCallPushToTalkShortcut = _groupCallPushToTalkShortcut;
qint64 groupCallPushToTalkDelay = _groupCallPushToTalkDelay;
qint32 callAudioBackend = static_cast<qint32>(_callAudioBackend);
stream >> themesAccentColors;
if (!stream.atEnd()) {
@ -275,6 +279,9 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
>> groupCallPushToTalkShortcut
>> groupCallPushToTalkDelay;
}
if (!stream.atEnd()) {
stream >> callAudioBackend;
}
if (stream.status() != QDataStream::Ok) {
LOG(("App Error: "
"Bad data for Core::Settings::constructFromSerialized()"));
@ -369,6 +376,12 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
_groupCallPushToTalk = (groupCallPushToTalk == 1);
_groupCallPushToTalkShortcut = groupCallPushToTalkShortcut;
_groupCallPushToTalkDelay = groupCallPushToTalkDelay;
auto uncheckedBackend = static_cast<Webrtc::Backend>(callAudioBackend);
switch (uncheckedBackend) {
case Webrtc::Backend::OpenAL:
case Webrtc::Backend::ADM:
case Webrtc::Backend::ADM2: _callAudioBackend = uncheckedBackend; break;
}
}
bool Settings::chatWide() const {

View file

@ -21,6 +21,10 @@ namespace Window {
enum class Column;
} // namespace Window
namespace Webrtc {
enum class Backend;
} // namespace Webrtc
namespace Core {
class Settings final {
@ -217,6 +221,12 @@ public:
void setCallAudioDuckingEnabled(bool value) {
_callAudioDuckingEnabled = value;
}
[[nodiscard]] Webrtc::Backend callAudioBackend() const {
return _callAudioBackend;
}
void setCallAudioBackend(Webrtc::Backend backend) {
_callAudioBackend = backend;
}
[[nodiscard]] bool groupCallPushToTalk() const {
return _groupCallPushToTalk;
}
@ -531,13 +541,14 @@ private:
int _callOutputVolume = 100;
int _callInputVolume = 100;
bool _callAudioDuckingEnabled = true;
Webrtc::Backend _callAudioBackend = Webrtc::Backend();
bool _groupCallPushToTalk = false;
QByteArray _groupCallPushToTalkShortcut;
crl::time _groupCallPushToTalkDelay = 20;
Window::Theme::AccentColors _themesAccentColors;
bool _lastSeenWarningSeen = false;
Ui::SendFilesWay _sendFilesWay;
Ui::InputSubmitSettings _sendSubmitWay;
Ui::SendFilesWay _sendFilesWay = Ui::SendFilesWay();
Ui::InputSubmitSettings _sendSubmitWay = Ui::InputSubmitSettings();
base::flat_map<QString, QString> _soundOverrides;
bool _exeLaunchWarning = true;
bool _ipRevealWarning = true;
@ -553,8 +564,8 @@ private:
rpl::variable<bool> _autoDownloadDictionaries = true;
rpl::variable<bool> _mainMenuAccountsShown = true;
bool _tabbedSelectorSectionEnabled = false; // per-window
Window::Column _floatPlayerColumn; // per-window
RectPart _floatPlayerCorner; // per-window
Window::Column _floatPlayerColumn = Window::Column(); // per-window
RectPart _floatPlayerCorner = RectPart(); // per-window
bool _thirdSectionInfoEnabled = true; // per-window
rpl::event_stream<bool> _thirdSectionInfoEnabledValue; // per-window
int _thirdSectionExtendedBy = -1; // per-window

View file

@ -30,11 +30,18 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "webrtc/webrtc_media_devices.h"
#include "webrtc/webrtc_video_track.h"
#include "webrtc/webrtc_audio_input_tester.h"
#include "webrtc/webrtc_create_adm.h" // Webrtc::Backend.
#include "tgcalls/VideoCaptureInterface.h"
#include "facades.h"
#include "app.h" // App::restart().
#include "styles/style_layers.h"
namespace Settings {
namespace {
using namespace Webrtc;
} // namespace
Calls::Calls(
QWidget *parent,
@ -58,7 +65,7 @@ void Calls::setupContent() {
const auto content = Ui::CreateChild<Ui::VerticalLayout>(this);
const auto &settings = Core::App().settings();
const auto cameras = Webrtc::GetVideoInputList();
const auto cameras = GetVideoInputList();
if (!cameras.empty()) {
const auto hasCall = (Core::App().calls().currentCall() != nullptr);
@ -66,16 +73,16 @@ void Calls::setupContent() {
const auto capturer = capturerOwner.get();
content->lifetime().add([owner = std::move(capturerOwner)]{});
const auto track = content->lifetime().make_state<Webrtc::VideoTrack>(
const auto track = content->lifetime().make_state<VideoTrack>(
(hasCall
? Webrtc::VideoState::Inactive
: Webrtc::VideoState::Active));
? VideoState::Inactive
: VideoState::Active));
const auto currentCameraName = [&] {
const auto i = ranges::find(
cameras,
settings.callVideoInputDeviceId(),
&Webrtc::VideoInput::id);
&VideoInput::id);
return (i != end(cameras))
? i->name
: tr::lng_settings_call_device_default(tr::now);
@ -93,15 +100,15 @@ void Calls::setupContent() {
),
st::settingsButton
)->addClickHandler([=] {
const auto &devices = Webrtc::GetVideoInputList();
const auto &devices = GetVideoInputList();
const auto options = ranges::view::concat(
ranges::view::single(tr::lng_settings_call_device_default(tr::now)),
devices | ranges::view::transform(&Webrtc::VideoInput::name)
devices | ranges::view::transform(&VideoInput::name)
) | ranges::to_vector;
const auto i = ranges::find(
devices,
Core::App().settings().callVideoInputDeviceId(),
&Webrtc::VideoInput::id);
&VideoInput::id);
const auto currentOption = (i != end(devices))
? int(i - begin(devices) + 1)
: 0;
@ -159,11 +166,11 @@ void Calls::setupContent() {
Core::App().calls().currentCallValue(
) | rpl::start_with_next([=](::Calls::Call *value) {
if (value) {
track->setState(Webrtc::VideoState::Inactive);
track->setState(VideoState::Inactive);
bubbleWrap->resize(bubbleWrap->width(), 0);
} else {
capturer->setPreferredAspectRatio(0.);
track->setState(Webrtc::VideoState::Active);
track->setState(VideoState::Active);
capturer->setOutput(track->sink());
}
}, content->lifetime());
@ -252,6 +259,22 @@ void Calls::setupContent() {
// }, content->lifetime());
//#endif // Q_OS_MAC && !OS_MAC_STORE
const auto backend = [&]() -> QString {
using namespace Webrtc;
switch (settings.callAudioBackend()) {
case Backend::OpenAL: return "OpenAL";
case Backend::ADM: return "WebRTC ADM";
case Backend::ADM2: return "WebRTC ADM2";
}
Unexpected("Value in backend.");
}();
AddButton(
content,
rpl::single("Call audio backend: " + backend),
st::settingsButton
)->addClickHandler([] {
Ui::show(ChooseAudioBackendBox());
});
AddButton(
content,
tr::lng_settings_call_open_system_prefs(),
@ -300,27 +323,30 @@ void Calls::requestPermissionAndStartTestingMicrophone() {
void Calls::startTestingMicrophone() {
_levelUpdateTimer.callEach(kMicTestUpdateInterval);
_micTester = std::make_unique<Webrtc::AudioInputTester>(
_micTester = std::make_unique<AudioInputTester>(
Core::App().settings().callAudioBackend(),
Core::App().settings().callInputDeviceId());
}
QString CurrentAudioOutputName() {
const auto list = Webrtc::GetAudioOutputList();
const auto &settings = Core::App().settings();
const auto list = GetAudioOutputList(settings.callAudioBackend());
const auto i = ranges::find(
list,
Core::App().settings().callOutputDeviceId(),
&Webrtc::AudioOutput::id);
settings.callOutputDeviceId(),
&AudioOutput::id);
return (i != end(list))
? i->name
: tr::lng_settings_call_device_default(tr::now);
}
QString CurrentAudioInputName() {
const auto list = Webrtc::GetAudioInputList();
const auto &settings = Core::App().settings();
const auto list = GetAudioInputList(settings.callAudioBackend());
const auto i = ranges::find(
list,
Core::App().settings().callInputDeviceId(),
&Webrtc::AudioInput::id);
settings.callInputDeviceId(),
&AudioInput::id);
return (i != end(list))
? i->name
: tr::lng_settings_call_device_default(tr::now);
@ -330,21 +356,22 @@ object_ptr<SingleChoiceBox> ChooseAudioOutputBox(
Fn<void(QString id, QString name)> chosen,
const style::Checkbox *st,
const style::Radio *radioSt) {
const auto &devices = Webrtc::GetAudioOutputList();
const auto &settings = Core::App().settings();
const auto list = GetAudioOutputList(settings.callAudioBackend());
const auto options = ranges::view::concat(
ranges::view::single(tr::lng_settings_call_device_default(tr::now)),
devices | ranges::view::transform(&Webrtc::AudioOutput::name)
list | ranges::view::transform(&AudioOutput::name)
) | ranges::to_vector;
const auto i = ranges::find(
devices,
Core::App().settings().callOutputDeviceId(),
&Webrtc::AudioOutput::id);
const auto currentOption = (i != end(devices))
? int(i - begin(devices) + 1)
list,
settings.callOutputDeviceId(),
&AudioOutput::id);
const auto currentOption = (i != end(list))
? int(i - begin(list) + 1)
: 0;
const auto save = [=](int option) {
const auto deviceId = option
? devices[option - 1].id
? list[option - 1].id
: "default";
Core::App().calls().setCurrentAudioDevice(false, deviceId);
chosen(deviceId, options[option]);
@ -362,21 +389,22 @@ object_ptr<SingleChoiceBox> ChooseAudioInputBox(
Fn<void(QString id, QString name)> chosen,
const style::Checkbox *st,
const style::Radio *radioSt) {
const auto devices = Webrtc::GetAudioInputList();
const auto &settings = Core::App().settings();
const auto list = GetAudioInputList(settings.callAudioBackend());
const auto options = ranges::view::concat(
ranges::view::single(tr::lng_settings_call_device_default(tr::now)),
devices | ranges::view::transform(&Webrtc::AudioInput::name)
list | ranges::view::transform(&AudioInput::name)
) | ranges::to_vector;
const auto i = ranges::find(
devices,
list,
Core::App().settings().callInputDeviceId(),
&Webrtc::AudioInput::id);
const auto currentOption = (i != end(devices))
? int(i - begin(devices) + 1)
&AudioInput::id);
const auto currentOption = (i != end(list))
? int(i - begin(list) + 1)
: 0;
const auto save = [=](int option) {
const auto deviceId = option
? devices[option - 1].id
? list[option - 1].id
: "default";
Core::App().calls().setCurrentAudioDevice(true, deviceId);
chosen(deviceId, options[option]);
@ -390,5 +418,33 @@ object_ptr<SingleChoiceBox> ChooseAudioInputBox(
radioSt);
}
object_ptr<SingleChoiceBox> ChooseAudioBackendBox(
const style::Checkbox *st,
const style::Radio *radioSt) {
const auto &settings = Core::App().settings();
const auto list = GetAudioInputList(settings.callAudioBackend());
const auto options = std::vector<QString>{
"OpenAL",
"Webrtc ADM",
#ifdef Q_OS_WIN
"Webrtc ADM2",
#endif // Q_OS_WIN
};
const auto currentOption = static_cast<int>(settings.callAudioBackend());
const auto save = [=](int option) {
Core::App().settings().setCallAudioBackend(
static_cast<Webrtc::Backend>(option));
Core::App().saveSettings();
App::restart();
};
return Box<SingleChoiceBox>(
rpl::single<QString>("Calls audio backend"),
options,
currentOption,
save,
st,
radioSt);
}
} // namespace Settings

View file

@ -69,6 +69,9 @@ inline constexpr auto kMicTestAnimationDuration = crl::time(200);
Fn<void(QString id, QString name)> chosen,
const style::Checkbox *st = nullptr,
const style::Radio *radioSt = nullptr);
[[nodiscard]] object_ptr<SingleChoiceBox> ChooseAudioBackendBox(
const style::Checkbox *st = nullptr,
const style::Radio *radioSt = nullptr);
} // namespace Settings

@ -1 +1 @@
Subproject commit bada95202ae45a650d76d9572b20fc9cb03365ad
Subproject commit 4dd8c26bd1748a677df34785e62e031785e93e23