Add ability to choose calls audio backend.

This commit is contained in:
John Preston 2021-01-07 19:27:11 +04:00
parent b23e4fa491
commit e11efe483e
8 changed files with 127 additions and 41 deletions

View file

@ -780,7 +780,8 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
sendSignalingData(bytes); sendSignalingData(bytes);
}); });
}, },
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(), .createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
settings.callAudioBackend()),
}; };
if (Logs::DebugEnabled()) { if (Logs::DebugEnabled()) {
auto callLogFolder = cWorkingDir() + qsl("DebugLogs"); auto callLogFolder = cWorkingDir() + qsl("DebugLogs");

View file

@ -582,7 +582,8 @@ void GroupCall::createAndStartController() {
}, },
.initialInputDeviceId = _audioInputId.toStdString(), .initialInputDeviceId = _audioInputId.toStdString(),
.initialOutputDeviceId = _audioOutputId.toStdString(), .initialOutputDeviceId = _audioOutputId.toStdString(),
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(), .createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
settings.callAudioBackend()),
}; };
if (Logs::DebugEnabled()) { if (Logs::DebugEnabled()) {
auto callLogFolder = cWorkingDir() + qsl("DebugLogs"); auto callLogFolder = cWorkingDir() + qsl("DebugLogs");

View file

@ -480,6 +480,7 @@ void GroupCallSettingsBox(
// Means we finished showing the box. // Means we finished showing the box.
crl::on_main(box, [=] { crl::on_main(box, [=] {
state->micTester = std::make_unique<Webrtc::AudioInputTester>( state->micTester = std::make_unique<Webrtc::AudioInputTester>(
Core::App().settings().callAudioBackend(),
Core::App().settings().callInputDeviceId()); Core::App().settings().callInputDeviceId());
state->levelUpdateTimer.callEach(kMicTestUpdateInterval); state->levelUpdateTimer.callEach(kMicTestUpdateInterval);
}); });

View file

@ -13,12 +13,14 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "window/themes/window_theme.h" #include "window/themes/window_theme.h"
#include "window/section_widget.h" #include "window/section_widget.h"
#include "base/platform/base_platform_info.h" #include "base/platform/base_platform_info.h"
#include "webrtc/webrtc_create_adm.h"
#include "facades.h" #include "facades.h"
namespace Core { namespace Core {
Settings::Settings() Settings::Settings()
: _sendSubmitWay(Ui::InputSubmitSettings::Enter) : _callAudioBackend(Webrtc::Backend::OpenAL)
, _sendSubmitWay(Ui::InputSubmitSettings::Enter)
, _floatPlayerColumn(Window::Column::Second) , _floatPlayerColumn(Window::Column::Second)
, _floatPlayerCorner(RectPart::TopRight) , _floatPlayerCorner(RectPart::TopRight)
, _dialogsWidthRatio(DefaultDialogsWidthRatio()) { , _dialogsWidthRatio(DefaultDialogsWidthRatio()) {
@ -112,7 +114,8 @@ QByteArray Settings::serialize() const {
<< qint32(_ipRevealWarning ? 1 : 0) << qint32(_ipRevealWarning ? 1 : 0)
<< qint32(_groupCallPushToTalk ? 1 : 0) << qint32(_groupCallPushToTalk ? 1 : 0)
<< _groupCallPushToTalkShortcut << _groupCallPushToTalkShortcut
<< qint64(_groupCallPushToTalkDelay); << qint64(_groupCallPushToTalkDelay)
<< qint32(_callAudioBackend);
} }
return result; return result;
} }
@ -183,6 +186,7 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
qint32 groupCallPushToTalk = _groupCallPushToTalk ? 1 : 0; qint32 groupCallPushToTalk = _groupCallPushToTalk ? 1 : 0;
QByteArray groupCallPushToTalkShortcut = _groupCallPushToTalkShortcut; QByteArray groupCallPushToTalkShortcut = _groupCallPushToTalkShortcut;
qint64 groupCallPushToTalkDelay = _groupCallPushToTalkDelay; qint64 groupCallPushToTalkDelay = _groupCallPushToTalkDelay;
qint32 callAudioBackend = static_cast<qint32>(_callAudioBackend);
stream >> themesAccentColors; stream >> themesAccentColors;
if (!stream.atEnd()) { if (!stream.atEnd()) {
@ -275,6 +279,9 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
>> groupCallPushToTalkShortcut >> groupCallPushToTalkShortcut
>> groupCallPushToTalkDelay; >> groupCallPushToTalkDelay;
} }
if (!stream.atEnd()) {
stream >> callAudioBackend;
}
if (stream.status() != QDataStream::Ok) { if (stream.status() != QDataStream::Ok) {
LOG(("App Error: " LOG(("App Error: "
"Bad data for Core::Settings::constructFromSerialized()")); "Bad data for Core::Settings::constructFromSerialized()"));
@ -369,6 +376,12 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
_groupCallPushToTalk = (groupCallPushToTalk == 1); _groupCallPushToTalk = (groupCallPushToTalk == 1);
_groupCallPushToTalkShortcut = groupCallPushToTalkShortcut; _groupCallPushToTalkShortcut = groupCallPushToTalkShortcut;
_groupCallPushToTalkDelay = groupCallPushToTalkDelay; _groupCallPushToTalkDelay = groupCallPushToTalkDelay;
auto uncheckedBackend = static_cast<Webrtc::Backend>(callAudioBackend);
switch (uncheckedBackend) {
case Webrtc::Backend::OpenAL:
case Webrtc::Backend::ADM:
case Webrtc::Backend::ADM2: _callAudioBackend = uncheckedBackend; break;
}
} }
bool Settings::chatWide() const { bool Settings::chatWide() const {

View file

@ -21,6 +21,10 @@ namespace Window {
enum class Column; enum class Column;
} // namespace Window } // namespace Window
namespace Webrtc {
enum class Backend;
} // namespace Webrtc
namespace Core { namespace Core {
class Settings final { class Settings final {
@ -217,6 +221,12 @@ public:
void setCallAudioDuckingEnabled(bool value) { void setCallAudioDuckingEnabled(bool value) {
_callAudioDuckingEnabled = value; _callAudioDuckingEnabled = value;
} }
[[nodiscard]] Webrtc::Backend callAudioBackend() const {
return _callAudioBackend;
}
void setCallAudioBackend(Webrtc::Backend backend) {
_callAudioBackend = backend;
}
[[nodiscard]] bool groupCallPushToTalk() const { [[nodiscard]] bool groupCallPushToTalk() const {
return _groupCallPushToTalk; return _groupCallPushToTalk;
} }
@ -531,13 +541,14 @@ private:
int _callOutputVolume = 100; int _callOutputVolume = 100;
int _callInputVolume = 100; int _callInputVolume = 100;
bool _callAudioDuckingEnabled = true; bool _callAudioDuckingEnabled = true;
Webrtc::Backend _callAudioBackend = Webrtc::Backend();
bool _groupCallPushToTalk = false; bool _groupCallPushToTalk = false;
QByteArray _groupCallPushToTalkShortcut; QByteArray _groupCallPushToTalkShortcut;
crl::time _groupCallPushToTalkDelay = 20; crl::time _groupCallPushToTalkDelay = 20;
Window::Theme::AccentColors _themesAccentColors; Window::Theme::AccentColors _themesAccentColors;
bool _lastSeenWarningSeen = false; bool _lastSeenWarningSeen = false;
Ui::SendFilesWay _sendFilesWay; Ui::SendFilesWay _sendFilesWay = Ui::SendFilesWay();
Ui::InputSubmitSettings _sendSubmitWay; Ui::InputSubmitSettings _sendSubmitWay = Ui::InputSubmitSettings();
base::flat_map<QString, QString> _soundOverrides; base::flat_map<QString, QString> _soundOverrides;
bool _exeLaunchWarning = true; bool _exeLaunchWarning = true;
bool _ipRevealWarning = true; bool _ipRevealWarning = true;
@ -553,8 +564,8 @@ private:
rpl::variable<bool> _autoDownloadDictionaries = true; rpl::variable<bool> _autoDownloadDictionaries = true;
rpl::variable<bool> _mainMenuAccountsShown = true; rpl::variable<bool> _mainMenuAccountsShown = true;
bool _tabbedSelectorSectionEnabled = false; // per-window bool _tabbedSelectorSectionEnabled = false; // per-window
Window::Column _floatPlayerColumn; // per-window Window::Column _floatPlayerColumn = Window::Column(); // per-window
RectPart _floatPlayerCorner; // per-window RectPart _floatPlayerCorner = RectPart(); // per-window
bool _thirdSectionInfoEnabled = true; // per-window bool _thirdSectionInfoEnabled = true; // per-window
rpl::event_stream<bool> _thirdSectionInfoEnabledValue; // per-window rpl::event_stream<bool> _thirdSectionInfoEnabledValue; // per-window
int _thirdSectionExtendedBy = -1; // per-window int _thirdSectionExtendedBy = -1; // per-window

View file

@ -30,11 +30,18 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "webrtc/webrtc_media_devices.h" #include "webrtc/webrtc_media_devices.h"
#include "webrtc/webrtc_video_track.h" #include "webrtc/webrtc_video_track.h"
#include "webrtc/webrtc_audio_input_tester.h" #include "webrtc/webrtc_audio_input_tester.h"
#include "webrtc/webrtc_create_adm.h" // Webrtc::Backend.
#include "tgcalls/VideoCaptureInterface.h" #include "tgcalls/VideoCaptureInterface.h"
#include "facades.h" #include "facades.h"
#include "app.h" // App::restart().
#include "styles/style_layers.h" #include "styles/style_layers.h"
namespace Settings { namespace Settings {
namespace {
using namespace Webrtc;
} // namespace
Calls::Calls( Calls::Calls(
QWidget *parent, QWidget *parent,
@ -58,7 +65,7 @@ void Calls::setupContent() {
const auto content = Ui::CreateChild<Ui::VerticalLayout>(this); const auto content = Ui::CreateChild<Ui::VerticalLayout>(this);
const auto &settings = Core::App().settings(); const auto &settings = Core::App().settings();
const auto cameras = Webrtc::GetVideoInputList(); const auto cameras = GetVideoInputList();
if (!cameras.empty()) { if (!cameras.empty()) {
const auto hasCall = (Core::App().calls().currentCall() != nullptr); const auto hasCall = (Core::App().calls().currentCall() != nullptr);
@ -66,16 +73,16 @@ void Calls::setupContent() {
const auto capturer = capturerOwner.get(); const auto capturer = capturerOwner.get();
content->lifetime().add([owner = std::move(capturerOwner)]{}); content->lifetime().add([owner = std::move(capturerOwner)]{});
const auto track = content->lifetime().make_state<Webrtc::VideoTrack>( const auto track = content->lifetime().make_state<VideoTrack>(
(hasCall (hasCall
? Webrtc::VideoState::Inactive ? VideoState::Inactive
: Webrtc::VideoState::Active)); : VideoState::Active));
const auto currentCameraName = [&] { const auto currentCameraName = [&] {
const auto i = ranges::find( const auto i = ranges::find(
cameras, cameras,
settings.callVideoInputDeviceId(), settings.callVideoInputDeviceId(),
&Webrtc::VideoInput::id); &VideoInput::id);
return (i != end(cameras)) return (i != end(cameras))
? i->name ? i->name
: tr::lng_settings_call_device_default(tr::now); : tr::lng_settings_call_device_default(tr::now);
@ -93,15 +100,15 @@ void Calls::setupContent() {
), ),
st::settingsButton st::settingsButton
)->addClickHandler([=] { )->addClickHandler([=] {
const auto &devices = Webrtc::GetVideoInputList(); const auto &devices = GetVideoInputList();
const auto options = ranges::view::concat( const auto options = ranges::view::concat(
ranges::view::single(tr::lng_settings_call_device_default(tr::now)), ranges::view::single(tr::lng_settings_call_device_default(tr::now)),
devices | ranges::view::transform(&Webrtc::VideoInput::name) devices | ranges::view::transform(&VideoInput::name)
) | ranges::to_vector; ) | ranges::to_vector;
const auto i = ranges::find( const auto i = ranges::find(
devices, devices,
Core::App().settings().callVideoInputDeviceId(), Core::App().settings().callVideoInputDeviceId(),
&Webrtc::VideoInput::id); &VideoInput::id);
const auto currentOption = (i != end(devices)) const auto currentOption = (i != end(devices))
? int(i - begin(devices) + 1) ? int(i - begin(devices) + 1)
: 0; : 0;
@ -159,11 +166,11 @@ void Calls::setupContent() {
Core::App().calls().currentCallValue( Core::App().calls().currentCallValue(
) | rpl::start_with_next([=](::Calls::Call *value) { ) | rpl::start_with_next([=](::Calls::Call *value) {
if (value) { if (value) {
track->setState(Webrtc::VideoState::Inactive); track->setState(VideoState::Inactive);
bubbleWrap->resize(bubbleWrap->width(), 0); bubbleWrap->resize(bubbleWrap->width(), 0);
} else { } else {
capturer->setPreferredAspectRatio(0.); capturer->setPreferredAspectRatio(0.);
track->setState(Webrtc::VideoState::Active); track->setState(VideoState::Active);
capturer->setOutput(track->sink()); capturer->setOutput(track->sink());
} }
}, content->lifetime()); }, content->lifetime());
@ -252,6 +259,22 @@ void Calls::setupContent() {
// }, content->lifetime()); // }, content->lifetime());
//#endif // Q_OS_MAC && !OS_MAC_STORE //#endif // Q_OS_MAC && !OS_MAC_STORE
const auto backend = [&]() -> QString {
using namespace Webrtc;
switch (settings.callAudioBackend()) {
case Backend::OpenAL: return "OpenAL";
case Backend::ADM: return "WebRTC ADM";
case Backend::ADM2: return "WebRTC ADM2";
}
Unexpected("Value in backend.");
}();
AddButton(
content,
rpl::single("Call audio backend: " + backend),
st::settingsButton
)->addClickHandler([] {
Ui::show(ChooseAudioBackendBox());
});
AddButton( AddButton(
content, content,
tr::lng_settings_call_open_system_prefs(), tr::lng_settings_call_open_system_prefs(),
@ -300,27 +323,30 @@ void Calls::requestPermissionAndStartTestingMicrophone() {
void Calls::startTestingMicrophone() { void Calls::startTestingMicrophone() {
_levelUpdateTimer.callEach(kMicTestUpdateInterval); _levelUpdateTimer.callEach(kMicTestUpdateInterval);
_micTester = std::make_unique<Webrtc::AudioInputTester>( _micTester = std::make_unique<AudioInputTester>(
Core::App().settings().callAudioBackend(),
Core::App().settings().callInputDeviceId()); Core::App().settings().callInputDeviceId());
} }
QString CurrentAudioOutputName() { QString CurrentAudioOutputName() {
const auto list = Webrtc::GetAudioOutputList(); const auto &settings = Core::App().settings();
const auto list = GetAudioOutputList(settings.callAudioBackend());
const auto i = ranges::find( const auto i = ranges::find(
list, list,
Core::App().settings().callOutputDeviceId(), settings.callOutputDeviceId(),
&Webrtc::AudioOutput::id); &AudioOutput::id);
return (i != end(list)) return (i != end(list))
? i->name ? i->name
: tr::lng_settings_call_device_default(tr::now); : tr::lng_settings_call_device_default(tr::now);
} }
QString CurrentAudioInputName() { QString CurrentAudioInputName() {
const auto list = Webrtc::GetAudioInputList(); const auto &settings = Core::App().settings();
const auto list = GetAudioInputList(settings.callAudioBackend());
const auto i = ranges::find( const auto i = ranges::find(
list, list,
Core::App().settings().callInputDeviceId(), settings.callInputDeviceId(),
&Webrtc::AudioInput::id); &AudioInput::id);
return (i != end(list)) return (i != end(list))
? i->name ? i->name
: tr::lng_settings_call_device_default(tr::now); : tr::lng_settings_call_device_default(tr::now);
@ -330,21 +356,22 @@ object_ptr<SingleChoiceBox> ChooseAudioOutputBox(
Fn<void(QString id, QString name)> chosen, Fn<void(QString id, QString name)> chosen,
const style::Checkbox *st, const style::Checkbox *st,
const style::Radio *radioSt) { const style::Radio *radioSt) {
const auto &devices = Webrtc::GetAudioOutputList(); const auto &settings = Core::App().settings();
const auto list = GetAudioOutputList(settings.callAudioBackend());
const auto options = ranges::view::concat( const auto options = ranges::view::concat(
ranges::view::single(tr::lng_settings_call_device_default(tr::now)), ranges::view::single(tr::lng_settings_call_device_default(tr::now)),
devices | ranges::view::transform(&Webrtc::AudioOutput::name) list | ranges::view::transform(&AudioOutput::name)
) | ranges::to_vector; ) | ranges::to_vector;
const auto i = ranges::find( const auto i = ranges::find(
devices, list,
Core::App().settings().callOutputDeviceId(), settings.callOutputDeviceId(),
&Webrtc::AudioOutput::id); &AudioOutput::id);
const auto currentOption = (i != end(devices)) const auto currentOption = (i != end(list))
? int(i - begin(devices) + 1) ? int(i - begin(list) + 1)
: 0; : 0;
const auto save = [=](int option) { const auto save = [=](int option) {
const auto deviceId = option const auto deviceId = option
? devices[option - 1].id ? list[option - 1].id
: "default"; : "default";
Core::App().calls().setCurrentAudioDevice(false, deviceId); Core::App().calls().setCurrentAudioDevice(false, deviceId);
chosen(deviceId, options[option]); chosen(deviceId, options[option]);
@ -362,21 +389,22 @@ object_ptr<SingleChoiceBox> ChooseAudioInputBox(
Fn<void(QString id, QString name)> chosen, Fn<void(QString id, QString name)> chosen,
const style::Checkbox *st, const style::Checkbox *st,
const style::Radio *radioSt) { const style::Radio *radioSt) {
const auto devices = Webrtc::GetAudioInputList(); const auto &settings = Core::App().settings();
const auto list = GetAudioInputList(settings.callAudioBackend());
const auto options = ranges::view::concat( const auto options = ranges::view::concat(
ranges::view::single(tr::lng_settings_call_device_default(tr::now)), ranges::view::single(tr::lng_settings_call_device_default(tr::now)),
devices | ranges::view::transform(&Webrtc::AudioInput::name) list | ranges::view::transform(&AudioInput::name)
) | ranges::to_vector; ) | ranges::to_vector;
const auto i = ranges::find( const auto i = ranges::find(
devices, list,
Core::App().settings().callInputDeviceId(), Core::App().settings().callInputDeviceId(),
&Webrtc::AudioInput::id); &AudioInput::id);
const auto currentOption = (i != end(devices)) const auto currentOption = (i != end(list))
? int(i - begin(devices) + 1) ? int(i - begin(list) + 1)
: 0; : 0;
const auto save = [=](int option) { const auto save = [=](int option) {
const auto deviceId = option const auto deviceId = option
? devices[option - 1].id ? list[option - 1].id
: "default"; : "default";
Core::App().calls().setCurrentAudioDevice(true, deviceId); Core::App().calls().setCurrentAudioDevice(true, deviceId);
chosen(deviceId, options[option]); chosen(deviceId, options[option]);
@ -390,5 +418,33 @@ object_ptr<SingleChoiceBox> ChooseAudioInputBox(
radioSt); radioSt);
} }
object_ptr<SingleChoiceBox> ChooseAudioBackendBox(
const style::Checkbox *st,
const style::Radio *radioSt) {
const auto &settings = Core::App().settings();
const auto list = GetAudioInputList(settings.callAudioBackend());
const auto options = std::vector<QString>{
"OpenAL",
"Webrtc ADM",
#ifdef Q_OS_WIN
"Webrtc ADM2",
#endif // Q_OS_WIN
};
const auto currentOption = static_cast<int>(settings.callAudioBackend());
const auto save = [=](int option) {
Core::App().settings().setCallAudioBackend(
static_cast<Webrtc::Backend>(option));
Core::App().saveSettings();
App::restart();
};
return Box<SingleChoiceBox>(
rpl::single<QString>("Calls audio backend"),
options,
currentOption,
save,
st,
radioSt);
}
} // namespace Settings } // namespace Settings

View file

@ -69,6 +69,9 @@ inline constexpr auto kMicTestAnimationDuration = crl::time(200);
Fn<void(QString id, QString name)> chosen, Fn<void(QString id, QString name)> chosen,
const style::Checkbox *st = nullptr, const style::Checkbox *st = nullptr,
const style::Radio *radioSt = nullptr); const style::Radio *radioSt = nullptr);
[[nodiscard]] object_ptr<SingleChoiceBox> ChooseAudioBackendBox(
const style::Checkbox *st = nullptr,
const style::Radio *radioSt = nullptr);
} // namespace Settings } // namespace Settings

@ -1 +1 @@
Subproject commit bada95202ae45a650d76d9572b20fc9cb03365ad Subproject commit 4dd8c26bd1748a677df34785e62e031785e93e23