mirror of
https://github.com/AyuGram/AyuGramDesktop.git
synced 2025-06-05 06:33:57 +02:00
Fully migrate to Webrtc::Environment.
This commit is contained in:
parent
9a6ab3b0f2
commit
104ba4db7c
21 changed files with 329 additions and 528 deletions
|
@ -1235,8 +1235,6 @@ PRIVATE
|
||||||
platform/mac/touchbar/mac_touchbar_manager.mm
|
platform/mac/touchbar/mac_touchbar_manager.mm
|
||||||
platform/mac/touchbar/mac_touchbar_media_view.h
|
platform/mac/touchbar/mac_touchbar_media_view.h
|
||||||
platform/mac/touchbar/mac_touchbar_media_view.mm
|
platform/mac/touchbar/mac_touchbar_media_view.mm
|
||||||
platform/win/audio_win.cpp
|
|
||||||
platform/win/audio_win.h
|
|
||||||
platform/win/file_utilities_win.cpp
|
platform/win/file_utilities_win.cpp
|
||||||
platform/win/file_utilities_win.h
|
platform/win/file_utilities_win.h
|
||||||
platform/win/launcher_win.cpp
|
platform/win/launcher_win.cpp
|
||||||
|
@ -1260,7 +1258,6 @@ PRIVATE
|
||||||
platform/win/windows_autostart_task.h
|
platform/win/windows_autostart_task.h
|
||||||
platform/win/windows_toast_activator.cpp
|
platform/win/windows_toast_activator.cpp
|
||||||
platform/win/windows_toast_activator.h
|
platform/win/windows_toast_activator.h
|
||||||
platform/platform_audio.h
|
|
||||||
platform/platform_file_utilities.h
|
platform/platform_file_utilities.h
|
||||||
platform/platform_launcher.h
|
platform/platform_launcher.h
|
||||||
platform/platform_integration.cpp
|
platform/platform_integration.cpp
|
||||||
|
|
|
@ -25,8 +25,8 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "media/audio/media_audio_track.h"
|
#include "media/audio/media_audio_track.h"
|
||||||
#include "base/platform/base_platform_info.h"
|
#include "base/platform/base_platform_info.h"
|
||||||
#include "calls/calls_panel.h"
|
#include "calls/calls_panel.h"
|
||||||
|
#include "webrtc/webrtc_environment.h"
|
||||||
#include "webrtc/webrtc_video_track.h"
|
#include "webrtc/webrtc_video_track.h"
|
||||||
#include "webrtc/webrtc_media_devices.h"
|
|
||||||
#include "webrtc/webrtc_create_adm.h"
|
#include "webrtc/webrtc_create_adm.h"
|
||||||
#include "data/data_user.h"
|
#include "data/data_user.h"
|
||||||
#include "data/data_session.h"
|
#include "data/data_session.h"
|
||||||
|
@ -429,30 +429,37 @@ void Call::setMuted(bool mute) {
|
||||||
|
|
||||||
void Call::setupMediaDevices() {
|
void Call::setupMediaDevices() {
|
||||||
_playbackDeviceId.changes() | rpl::filter([=] {
|
_playbackDeviceId.changes() | rpl::filter([=] {
|
||||||
return _instance != nullptr;
|
return _instance && _setDeviceIdCallback;
|
||||||
}) | rpl::start_with_next([=](const QString &deviceId) {
|
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||||
|
_setDeviceIdCallback(
|
||||||
|
Webrtc::DeviceType::Playback,
|
||||||
|
deviceId);
|
||||||
_instance->setAudioOutputDevice(deviceId.toStdString());
|
_instance->setAudioOutputDevice(deviceId.toStdString());
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
|
||||||
_captureDeviceId.changes() | rpl::filter([=] {
|
_captureDeviceId.changes() | rpl::filter([=] {
|
||||||
return _instance != nullptr;
|
return _instance && _setDeviceIdCallback;
|
||||||
}) | rpl::start_with_next([=](const QString &deviceId) {
|
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||||
|
_setDeviceIdCallback(
|
||||||
|
Webrtc::DeviceType::Capture,
|
||||||
|
deviceId);
|
||||||
_instance->setAudioInputDevice(deviceId.toStdString());
|
_instance->setAudioInputDevice(deviceId.toStdString());
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Call::setupOutgoingVideo() {
|
void Call::setupOutgoingVideo() {
|
||||||
static const auto hasDevices = [] {
|
const auto cameraId = [] {
|
||||||
return !Webrtc::GetVideoInputList().empty();
|
return Core::App().mediaDevices().defaultId(
|
||||||
|
Webrtc::DeviceType::Camera);
|
||||||
};
|
};
|
||||||
const auto started = _videoOutgoing->state();
|
const auto started = _videoOutgoing->state();
|
||||||
if (!hasDevices()) {
|
if (cameraId().isEmpty()) {
|
||||||
_videoOutgoing->setState(Webrtc::VideoState::Inactive);
|
_videoOutgoing->setState(Webrtc::VideoState::Inactive);
|
||||||
}
|
}
|
||||||
_videoOutgoing->stateValue(
|
_videoOutgoing->stateValue(
|
||||||
) | rpl::start_with_next([=](Webrtc::VideoState state) {
|
) | rpl::start_with_next([=](Webrtc::VideoState state) {
|
||||||
if (state != Webrtc::VideoState::Inactive
|
if (state != Webrtc::VideoState::Inactive
|
||||||
&& !hasDevices()
|
&& cameraId().isEmpty()
|
||||||
&& !_videoCaptureIsScreencast) {
|
&& !_videoCaptureIsScreencast) {
|
||||||
_errors.fire({ ErrorType::NoCamera });
|
_errors.fire({ ErrorType::NoCamera });
|
||||||
_videoOutgoing->setState(Webrtc::VideoState::Inactive);
|
_videoOutgoing->setState(Webrtc::VideoState::Inactive);
|
||||||
|
@ -892,6 +899,33 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
|
||||||
const auto versionString = version.toStdString();
|
const auto versionString = version.toStdString();
|
||||||
const auto &settings = Core::App().settings();
|
const auto &settings = Core::App().settings();
|
||||||
const auto weak = base::make_weak(this);
|
const auto weak = base::make_weak(this);
|
||||||
|
|
||||||
|
_setDeviceIdCallback = nullptr;
|
||||||
|
const auto playbackDeviceIdInitial = _playbackDeviceId.current();
|
||||||
|
const auto captureDeviceIdInitial = _captureDeviceId.current();
|
||||||
|
const auto saveSetDeviceIdCallback = [=](
|
||||||
|
Fn<void(Webrtc::DeviceType, QString)> setDeviceIdCallback) {
|
||||||
|
setDeviceIdCallback(
|
||||||
|
Webrtc::DeviceType::Playback,
|
||||||
|
playbackDeviceIdInitial);
|
||||||
|
setDeviceIdCallback(
|
||||||
|
Webrtc::DeviceType::Capture,
|
||||||
|
captureDeviceIdInitial);
|
||||||
|
crl::on_main(weak, [=] {
|
||||||
|
_setDeviceIdCallback = std::move(setDeviceIdCallback);
|
||||||
|
const auto playback = _playbackDeviceId.current();
|
||||||
|
if (_instance && playback != playbackDeviceIdInitial) {
|
||||||
|
_setDeviceIdCallback(Webrtc::DeviceType::Playback, playback);
|
||||||
|
_instance->setAudioOutputDevice(playback.toStdString());
|
||||||
|
}
|
||||||
|
const auto capture = _captureDeviceId.current();
|
||||||
|
if (_instance && capture != captureDeviceIdInitial) {
|
||||||
|
_setDeviceIdCallback(Webrtc::DeviceType::Capture, capture);
|
||||||
|
_instance->setAudioInputDevice(capture.toStdString());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
tgcalls::Descriptor descriptor = {
|
tgcalls::Descriptor descriptor = {
|
||||||
.version = versionString,
|
.version = versionString,
|
||||||
.config = tgcalls::Config{
|
.config = tgcalls::Config{
|
||||||
|
@ -910,8 +944,8 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
|
||||||
std::move(encryptionKeyValue),
|
std::move(encryptionKeyValue),
|
||||||
(_type == Type::Outgoing)),
|
(_type == Type::Outgoing)),
|
||||||
.mediaDevicesConfig = tgcalls::MediaDevicesConfig{
|
.mediaDevicesConfig = tgcalls::MediaDevicesConfig{
|
||||||
.audioInputId = _captureDeviceId.current().toStdString(),
|
.audioInputId = captureDeviceIdInitial.toStdString(),
|
||||||
.audioOutputId = _playbackDeviceId.current().toStdString(),
|
.audioOutputId = playbackDeviceIdInitial.toStdString(),
|
||||||
.inputVolume = 1.f,//settings.callInputVolume() / 100.f,
|
.inputVolume = 1.f,//settings.callInputVolume() / 100.f,
|
||||||
.outputVolume = 1.f,//settings.callOutputVolume() / 100.f,
|
.outputVolume = 1.f,//settings.callOutputVolume() / 100.f,
|
||||||
},
|
},
|
||||||
|
@ -942,7 +976,7 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
|
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
|
||||||
settings.callAudioBackend()),
|
saveSetDeviceIdCallback),
|
||||||
};
|
};
|
||||||
if (Logs::DebugEnabled()) {
|
if (Logs::DebugEnabled()) {
|
||||||
const auto callLogFolder = cWorkingDir() + u"DebugLogs"_q;
|
const auto callLogFolder = cWorkingDir() + u"DebugLogs"_q;
|
||||||
|
|
|
@ -271,6 +271,7 @@ private:
|
||||||
base::DelayedCallTimer _finishByTimeoutTimer;
|
base::DelayedCallTimer _finishByTimeoutTimer;
|
||||||
base::Timer _discardByTimeoutTimer;
|
base::Timer _discardByTimeoutTimer;
|
||||||
|
|
||||||
|
Fn<void(Webrtc::DeviceType, QString)> _setDeviceIdCallback;
|
||||||
Webrtc::DeviceId _playbackDeviceId;
|
Webrtc::DeviceId _playbackDeviceId;
|
||||||
Webrtc::DeviceId _captureDeviceId;
|
Webrtc::DeviceId _captureDeviceId;
|
||||||
Webrtc::DeviceId _cameraDeviceId;
|
Webrtc::DeviceId _cameraDeviceId;
|
||||||
|
|
|
@ -48,8 +48,8 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "base/power_save_blocker.h"
|
#include "base/power_save_blocker.h"
|
||||||
#include "media/streaming/media_streaming_utility.h"
|
#include "media/streaming/media_streaming_utility.h"
|
||||||
#include "window/main_window.h"
|
#include "window/main_window.h"
|
||||||
|
#include "webrtc/webrtc_environment.h"
|
||||||
#include "webrtc/webrtc_video_track.h"
|
#include "webrtc/webrtc_video_track.h"
|
||||||
#include "webrtc/webrtc_media_devices.h"
|
|
||||||
#include "styles/style_calls.h"
|
#include "styles/style_calls.h"
|
||||||
#include "styles/style_chat.h"
|
#include "styles/style_chat.h"
|
||||||
|
|
||||||
|
@ -238,13 +238,14 @@ void Panel::initControls() {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
_screencast->entity()->setClickedCallback([=] {
|
_screencast->entity()->setClickedCallback([=] {
|
||||||
|
const auto env = &Core::App().mediaDevices();
|
||||||
if (!_call) {
|
if (!_call) {
|
||||||
return;
|
return;
|
||||||
} else if (!Webrtc::DesktopCaptureAllowed()) {
|
} else if (!env->desktopCaptureAllowed()) {
|
||||||
if (auto box = Group::ScreenSharingPrivacyRequestBox()) {
|
if (auto box = Group::ScreenSharingPrivacyRequestBox()) {
|
||||||
_layerBg->showBox(std::move(box));
|
_layerBg->showBox(std::move(box));
|
||||||
}
|
}
|
||||||
} else if (const auto source = Webrtc::UniqueDesktopCaptureSource()) {
|
} else if (const auto source = env->uniqueDesktopCaptureSource()) {
|
||||||
if (_call->isSharingScreen()) {
|
if (_call->isSharingScreen()) {
|
||||||
_call->toggleScreenSharing(std::nullopt);
|
_call->toggleScreenSharing(std::nullopt);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -2065,14 +2065,16 @@ void GroupCall::applyOtherParticipantUpdate(
|
||||||
|
|
||||||
void GroupCall::setupMediaDevices() {
|
void GroupCall::setupMediaDevices() {
|
||||||
_playbackDeviceId.changes() | rpl::filter([=] {
|
_playbackDeviceId.changes() | rpl::filter([=] {
|
||||||
return _instance != nullptr;
|
return _instance && _setDeviceIdCallback;
|
||||||
}) | rpl::start_with_next([=](const QString &deviceId) {
|
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||||
|
_setDeviceIdCallback(Webrtc::DeviceType::Playback, deviceId);
|
||||||
_instance->setAudioOutputDevice(deviceId.toStdString());
|
_instance->setAudioOutputDevice(deviceId.toStdString());
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
|
||||||
_captureDeviceId.changes() | rpl::filter([=] {
|
_captureDeviceId.changes() | rpl::filter([=] {
|
||||||
return _instance != nullptr;
|
return _instance && _setDeviceIdCallback;
|
||||||
}) | rpl::start_with_next([=](const QString &deviceId) {
|
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||||
|
_setDeviceIdCallback(Webrtc::DeviceType::Capture, deviceId);
|
||||||
_instance->setAudioInputDevice(deviceId.toStdString());
|
_instance->setAudioInputDevice(deviceId.toStdString());
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
|
||||||
|
@ -2338,6 +2340,31 @@ bool GroupCall::tryCreateController() {
|
||||||
|
|
||||||
const auto weak = base::make_weak(&_instanceGuard);
|
const auto weak = base::make_weak(&_instanceGuard);
|
||||||
const auto myLevel = std::make_shared<tgcalls::GroupLevelValue>();
|
const auto myLevel = std::make_shared<tgcalls::GroupLevelValue>();
|
||||||
|
const auto playbackDeviceIdInitial = _playbackDeviceId.current();
|
||||||
|
const auto captureDeviceIdInitial = _captureDeviceId.current();
|
||||||
|
const auto saveSetDeviceIdCallback = [=](
|
||||||
|
Fn<void(Webrtc::DeviceType, QString)> setDeviceIdCallback) {
|
||||||
|
setDeviceIdCallback(
|
||||||
|
Webrtc::DeviceType::Playback,
|
||||||
|
playbackDeviceIdInitial);
|
||||||
|
setDeviceIdCallback(
|
||||||
|
Webrtc::DeviceType::Capture,
|
||||||
|
captureDeviceIdInitial);
|
||||||
|
crl::on_main(weak, [=] {
|
||||||
|
_setDeviceIdCallback = std::move(setDeviceIdCallback);
|
||||||
|
const auto playback = _playbackDeviceId.current();
|
||||||
|
if (_instance && playback != playbackDeviceIdInitial) {
|
||||||
|
_setDeviceIdCallback(Webrtc::DeviceType::Playback, playback);
|
||||||
|
_instance->setAudioOutputDevice(playback.toStdString());
|
||||||
|
}
|
||||||
|
const auto capture = _captureDeviceId.current();
|
||||||
|
if (_instance && capture != captureDeviceIdInitial) {
|
||||||
|
_setDeviceIdCallback(Webrtc::DeviceType::Capture, capture);
|
||||||
|
_instance->setAudioInputDevice(capture.toStdString());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
tgcalls::GroupInstanceDescriptor descriptor = {
|
tgcalls::GroupInstanceDescriptor descriptor = {
|
||||||
.threads = tgcalls::StaticThreads::getThreads(),
|
.threads = tgcalls::StaticThreads::getThreads(),
|
||||||
.config = tgcalls::GroupConfig{
|
.config = tgcalls::GroupConfig{
|
||||||
|
@ -2360,10 +2387,10 @@ bool GroupCall::tryCreateController() {
|
||||||
}
|
}
|
||||||
crl::on_main(weak, [=] { audioLevelsUpdated(data); });
|
crl::on_main(weak, [=] { audioLevelsUpdated(data); });
|
||||||
},
|
},
|
||||||
.initialInputDeviceId = _captureDeviceId.current().toStdString(),
|
.initialInputDeviceId = captureDeviceIdInitial.toStdString(),
|
||||||
.initialOutputDeviceId = _playbackDeviceId.current().toStdString(),
|
.initialOutputDeviceId = playbackDeviceIdInitial.toStdString(),
|
||||||
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
|
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
|
||||||
settings.callAudioBackend()),
|
saveSetDeviceIdCallback),
|
||||||
.videoCapture = _cameraCapture,
|
.videoCapture = _cameraCapture,
|
||||||
.requestCurrentTime = [=, call = base::make_weak(this)](
|
.requestCurrentTime = [=, call = base::make_weak(this)](
|
||||||
std::function<void(int64_t)> done) {
|
std::function<void(int64_t)> done) {
|
||||||
|
|
|
@ -667,6 +667,7 @@ private:
|
||||||
|
|
||||||
crl::time _lastSendProgressUpdate = 0;
|
crl::time _lastSendProgressUpdate = 0;
|
||||||
|
|
||||||
|
Fn<void(Webrtc::DeviceType, QString)> _setDeviceIdCallback;
|
||||||
Webrtc::DeviceId _playbackDeviceId;
|
Webrtc::DeviceId _playbackDeviceId;
|
||||||
Webrtc::DeviceId _captureDeviceId;
|
Webrtc::DeviceId _captureDeviceId;
|
||||||
Webrtc::DeviceId _cameraDeviceId;
|
Webrtc::DeviceId _cameraDeviceId;
|
||||||
|
|
|
@ -54,8 +54,8 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "base/power_save_blocker.h"
|
#include "base/power_save_blocker.h"
|
||||||
#include "apiwrap.h" // api().kick.
|
#include "apiwrap.h" // api().kick.
|
||||||
#include "api/api_chat_participants.h" // api().kick.
|
#include "api/api_chat_participants.h" // api().kick.
|
||||||
|
#include "webrtc/webrtc_environment.h"
|
||||||
#include "webrtc/webrtc_video_track.h"
|
#include "webrtc/webrtc_video_track.h"
|
||||||
#include "webrtc/webrtc_media_devices.h" // UniqueDesktopCaptureSource.
|
|
||||||
#include "webrtc/webrtc_audio_input_tester.h"
|
#include "webrtc/webrtc_audio_input_tester.h"
|
||||||
#include "styles/style_calls.h"
|
#include "styles/style_calls.h"
|
||||||
#include "styles/style_layers.h"
|
#include "styles/style_layers.h"
|
||||||
|
@ -1374,9 +1374,10 @@ void Panel::chooseShareScreenSource() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const auto choose = [=] {
|
const auto choose = [=] {
|
||||||
if (!Webrtc::DesktopCaptureAllowed()) {
|
const auto env = &Core::App().mediaDevices();
|
||||||
|
if (!env->desktopCaptureAllowed()) {
|
||||||
screenSharingPrivacyRequest();
|
screenSharingPrivacyRequest();
|
||||||
} else if (const auto source = Webrtc::UniqueDesktopCaptureSource()) {
|
} else if (const auto source = env->uniqueDesktopCaptureSource()) {
|
||||||
if (_call->isSharingScreen()) {
|
if (_call->isSharingScreen()) {
|
||||||
_call->toggleScreenSharing(std::nullopt);
|
_call->toggleScreenSharing(std::nullopt);
|
||||||
} else {
|
} else {
|
||||||
|
@ -2003,7 +2004,8 @@ void Panel::trackControlOver(not_null<Ui::RpWidget*> control, bool over) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Panel::showStickedTooltip() {
|
void Panel::showStickedTooltip() {
|
||||||
static const auto kHasCamera = !Webrtc::GetVideoInputList().empty();
|
static const auto kHasCamera = !Core::App().mediaDevices().defaultId(
|
||||||
|
Webrtc::DeviceType::Camera).isEmpty();
|
||||||
const auto callReady = (_call->state() == State::Joined
|
const auto callReady = (_call->state() == State::Joined
|
||||||
|| _call->state() == State::Connecting);
|
|| _call->state() == State::Connecting);
|
||||||
if (!(_stickedTooltipsShown & StickedTooltip::Camera)
|
if (!(_stickedTooltipsShown & StickedTooltip::Camera)
|
||||||
|
|
|
@ -42,7 +42,6 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "core/application.h"
|
#include "core/application.h"
|
||||||
#include "core/core_settings.h"
|
#include "core/core_settings.h"
|
||||||
#include "webrtc/webrtc_audio_input_tester.h"
|
#include "webrtc/webrtc_audio_input_tester.h"
|
||||||
#include "webrtc/webrtc_media_devices.h"
|
|
||||||
#include "settings/settings_calls.h"
|
#include "settings/settings_calls.h"
|
||||||
#include "main/main_session.h"
|
#include "main/main_session.h"
|
||||||
#include "apiwrap.h"
|
#include "apiwrap.h"
|
||||||
|
@ -250,6 +249,7 @@ void SettingsBox(
|
||||||
const auto weakBox = Ui::MakeWeak(box);
|
const auto weakBox = Ui::MakeWeak(box);
|
||||||
|
|
||||||
struct State {
|
struct State {
|
||||||
|
std::unique_ptr<Webrtc::DeviceId> computedDeviceId;
|
||||||
std::unique_ptr<Webrtc::AudioInputTester> micTester;
|
std::unique_ptr<Webrtc::AudioInputTester> micTester;
|
||||||
Ui::LevelMeter *micTestLevel = nullptr;
|
Ui::LevelMeter *micTestLevel = nullptr;
|
||||||
float micLevel = 0.;
|
float micLevel = 0.;
|
||||||
|
@ -327,9 +327,6 @@ void SettingsBox(
|
||||||
crl::guard(box, [=](const QString &id) {
|
crl::guard(box, [=](const QString &id) {
|
||||||
Core::App().settings().setCallCaptureDeviceId(id);
|
Core::App().settings().setCallCaptureDeviceId(id);
|
||||||
Core::App().saveSettingsDelayed();
|
Core::App().saveSettingsDelayed();
|
||||||
if (state->micTester) {
|
|
||||||
state->micTester->setDeviceId(id);
|
|
||||||
}
|
|
||||||
}),
|
}),
|
||||||
&st::groupCallCheckbox,
|
&st::groupCallCheckbox,
|
||||||
&st::groupCallRadio));
|
&st::groupCallRadio));
|
||||||
|
@ -773,9 +770,14 @@ void SettingsBox(
|
||||||
box->setShowFinishedCallback([=] {
|
box->setShowFinishedCallback([=] {
|
||||||
// Means we finished showing the box.
|
// Means we finished showing the box.
|
||||||
crl::on_main(box, [=] {
|
crl::on_main(box, [=] {
|
||||||
|
state->computedDeviceId = std::make_unique<Webrtc::DeviceId>(
|
||||||
|
&Core::App().mediaDevices(),
|
||||||
|
Webrtc::DeviceType::Capture,
|
||||||
|
Webrtc::DeviceIdValueWithFallback(
|
||||||
|
Core::App().settings().callCaptureDeviceIdValue(),
|
||||||
|
Core::App().settings().captureDeviceIdValue()));
|
||||||
state->micTester = std::make_unique<Webrtc::AudioInputTester>(
|
state->micTester = std::make_unique<Webrtc::AudioInputTester>(
|
||||||
Core::App().settings().callAudioBackend(),
|
state->computedDeviceId->value());
|
||||||
Core::App().settings().callCaptureDeviceId());
|
|
||||||
state->levelUpdateTimer.callEach(kMicTestUpdateInterval);
|
state->levelUpdateTimer.callEach(kMicTestUpdateInterval);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -884,8 +886,9 @@ MicLevelTester::MicLevelTester(Fn<void()> show)
|
||||||
, _timer([=] { check(); })
|
, _timer([=] { check(); })
|
||||||
, _tester(
|
, _tester(
|
||||||
std::make_unique<Webrtc::AudioInputTester>(
|
std::make_unique<Webrtc::AudioInputTester>(
|
||||||
Core::App().settings().callAudioBackend(),
|
Webrtc::DeviceIdValueWithFallback(
|
||||||
Core::App().settings().callCaptureDeviceId())) {
|
Core::App().settings().callCaptureDeviceIdValue(),
|
||||||
|
Core::App().settings().captureDeviceIdValue()))) {
|
||||||
_timer.callEach(kMicrophoneTooltipCheckInterval);
|
_timer.callEach(kMicrophoneTooltipCheckInterval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -431,7 +431,7 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
|
||||||
qint32 groupCallPushToTalk = _groupCallPushToTalk ? 1 : 0;
|
qint32 groupCallPushToTalk = _groupCallPushToTalk ? 1 : 0;
|
||||||
QByteArray groupCallPushToTalkShortcut = _groupCallPushToTalkShortcut;
|
QByteArray groupCallPushToTalkShortcut = _groupCallPushToTalkShortcut;
|
||||||
qint64 groupCallPushToTalkDelay = _groupCallPushToTalkDelay;
|
qint64 groupCallPushToTalkDelay = _groupCallPushToTalkDelay;
|
||||||
qint32 callAudioBackend = 0;
|
qint32 legacyCallAudioBackend = 0;
|
||||||
qint32 disableCallsLegacy = 0;
|
qint32 disableCallsLegacy = 0;
|
||||||
QByteArray windowPosition;
|
QByteArray windowPosition;
|
||||||
std::vector<RecentEmojiPreload> recentEmojiPreload;
|
std::vector<RecentEmojiPreload> recentEmojiPreload;
|
||||||
|
@ -565,7 +565,7 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
|
||||||
>> groupCallPushToTalkDelay;
|
>> groupCallPushToTalkDelay;
|
||||||
}
|
}
|
||||||
if (!stream.atEnd()) {
|
if (!stream.atEnd()) {
|
||||||
stream >> callAudioBackend;
|
stream >> legacyCallAudioBackend;
|
||||||
}
|
}
|
||||||
if (!stream.atEnd()) {
|
if (!stream.atEnd()) {
|
||||||
stream >> disableCallsLegacy;
|
stream >> disableCallsLegacy;
|
||||||
|
@ -991,10 +991,6 @@ void Settings::setTabbedReplacedWithInfo(bool enabled) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Webrtc::Backend Settings::callAudioBackend() const {
|
|
||||||
return Webrtc::Backend::OpenAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Settings::setDialogsWidthRatio(float64 ratio) {
|
void Settings::setDialogsWidthRatio(float64 ratio) {
|
||||||
_dialogsWidthRatio = ratio;
|
_dialogsWidthRatio = ratio;
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,10 +29,6 @@ namespace Window {
|
||||||
enum class Column;
|
enum class Column;
|
||||||
} // namespace Window
|
} // namespace Window
|
||||||
|
|
||||||
namespace Webrtc {
|
|
||||||
enum class Backend;
|
|
||||||
} // namespace Webrtc
|
|
||||||
|
|
||||||
namespace Calls::Group {
|
namespace Calls::Group {
|
||||||
enum class StickedTooltip;
|
enum class StickedTooltip;
|
||||||
} // namespace Calls::Group
|
} // namespace Calls::Group
|
||||||
|
@ -343,7 +339,6 @@ public:
|
||||||
void setCallAudioDuckingEnabled(bool value) {
|
void setCallAudioDuckingEnabled(bool value) {
|
||||||
_callAudioDuckingEnabled = value;
|
_callAudioDuckingEnabled = value;
|
||||||
}
|
}
|
||||||
[[nodiscard]] Webrtc::Backend callAudioBackend() const;
|
|
||||||
[[nodiscard]] bool disableCallsLegacy() const {
|
[[nodiscard]] bool disableCallsLegacy() const {
|
||||||
return _disableCallsLegacy;
|
return _disableCallsLegacy;
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,11 +14,9 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "media/media_common.h"
|
#include "media/media_common.h"
|
||||||
#include "media/streaming/media_streaming_utility.h"
|
#include "media/streaming/media_streaming_utility.h"
|
||||||
#include "webrtc/webrtc_environment.h"
|
#include "webrtc/webrtc_environment.h"
|
||||||
#include "webrtc/webrtc_media_devices.h"
|
|
||||||
#include "data/data_document.h"
|
#include "data/data_document.h"
|
||||||
#include "data/data_file_origin.h"
|
#include "data/data_file_origin.h"
|
||||||
#include "data/data_session.h"
|
#include "data/data_session.h"
|
||||||
#include "platform/platform_audio.h"
|
|
||||||
#include "core/application.h"
|
#include "core/application.h"
|
||||||
#include "core/core_settings.h"
|
#include "core/core_settings.h"
|
||||||
#include "main/main_session.h"
|
#include "main/main_session.h"
|
||||||
|
@ -73,57 +71,6 @@ bool PlaybackErrorHappened() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void EnumeratePlaybackDevices() {
|
|
||||||
auto deviceNames = QStringList();
|
|
||||||
auto devices = [&] {
|
|
||||||
if (alcIsExtensionPresent(nullptr, "ALC_ENUMERATE_ALL_EXT")) {
|
|
||||||
return alcGetString(nullptr, alcGetEnumValue(nullptr, "ALC_ALL_DEVICES_SPECIFIER"));
|
|
||||||
} else {
|
|
||||||
return alcGetString(nullptr, ALC_DEVICE_SPECIFIER);
|
|
||||||
}
|
|
||||||
}();
|
|
||||||
Assert(devices != nullptr);
|
|
||||||
while (*devices != 0) {
|
|
||||||
auto deviceName8Bit = QByteArray(devices);
|
|
||||||
auto deviceName = QString::fromUtf8(deviceName8Bit);
|
|
||||||
deviceNames.append(deviceName);
|
|
||||||
devices += deviceName8Bit.size() + 1;
|
|
||||||
}
|
|
||||||
LOG(("Audio Playback Devices: %1").arg(deviceNames.join(';')));
|
|
||||||
|
|
||||||
auto device = [&] {
|
|
||||||
if (alcIsExtensionPresent(nullptr, "ALC_ENUMERATE_ALL_EXT")) {
|
|
||||||
return alcGetString(nullptr, alcGetEnumValue(nullptr, "ALC_DEFAULT_ALL_DEVICES_SPECIFIER"));
|
|
||||||
} else {
|
|
||||||
return alcGetString(nullptr, ALC_DEFAULT_DEVICE_SPECIFIER);
|
|
||||||
}
|
|
||||||
}();
|
|
||||||
if (device) {
|
|
||||||
LOG(("Audio Playback Default Device: %1").arg(QString::fromUtf8(device)));
|
|
||||||
} else {
|
|
||||||
LOG(("Audio Playback Default Device: (null)"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void EnumerateCaptureDevices() {
|
|
||||||
auto deviceNames = QStringList();
|
|
||||||
auto devices = alcGetString(nullptr, ALC_CAPTURE_DEVICE_SPECIFIER);
|
|
||||||
Assert(devices != nullptr);
|
|
||||||
while (*devices != 0) {
|
|
||||||
auto deviceName8Bit = QByteArray(devices);
|
|
||||||
auto deviceName = QString::fromUtf8(deviceName8Bit);
|
|
||||||
deviceNames.append(deviceName);
|
|
||||||
devices += deviceName8Bit.size() + 1;
|
|
||||||
}
|
|
||||||
LOG(("Audio Capture Devices: %1").arg(deviceNames.join(';')));
|
|
||||||
|
|
||||||
if (auto device = alcGetString(nullptr, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)) {
|
|
||||||
LOG(("Audio Capture Default Device: %1").arg(QString::fromUtf8(device)));
|
|
||||||
} else {
|
|
||||||
LOG(("Audio Capture Default Device: (null)"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Thread: Any. Must be locked: AudioMutex.
|
// Thread: Any. Must be locked: AudioMutex.
|
||||||
void DestroyPlaybackDevice() {
|
void DestroyPlaybackDevice() {
|
||||||
if (AudioContext) {
|
if (AudioContext) {
|
||||||
|
@ -142,7 +89,7 @@ void DestroyPlaybackDevice() {
|
||||||
bool CreatePlaybackDevice() {
|
bool CreatePlaybackDevice() {
|
||||||
if (AudioDevice) return true;
|
if (AudioDevice) return true;
|
||||||
|
|
||||||
const auto id = Current().deviceId().toStdString();
|
const auto id = Current().playbackDeviceId().toStdString();
|
||||||
AudioDevice = alcOpenDevice(id.c_str());
|
AudioDevice = alcOpenDevice(id.c_str());
|
||||||
if (!AudioDevice) {
|
if (!AudioDevice) {
|
||||||
LOG(("Audio Error: Could not create default playback device, refreshing.."));
|
LOG(("Audio Error: Could not create default playback device, refreshing.."));
|
||||||
|
@ -193,25 +140,14 @@ void Start(not_null<Instance*> instance) {
|
||||||
qRegisterMetaType<AudioMsgId>();
|
qRegisterMetaType<AudioMsgId>();
|
||||||
qRegisterMetaType<VoiceWaveform>();
|
qRegisterMetaType<VoiceWaveform>();
|
||||||
|
|
||||||
if (!Webrtc::InitPipewireStubs()) {
|
const auto loglevel = getenv("ALSOFT_LOGLEVEL");
|
||||||
LOG(("Audio Info: Failed to load pipewire 0.3 stubs."));
|
|
||||||
}
|
|
||||||
|
|
||||||
auto loglevel = getenv("ALSOFT_LOGLEVEL");
|
|
||||||
LOG(("OpenAL Logging Level: %1").arg(loglevel ? loglevel : "(not set)"));
|
LOG(("OpenAL Logging Level: %1").arg(loglevel ? loglevel : "(not set)"));
|
||||||
|
|
||||||
EnumeratePlaybackDevices();
|
|
||||||
EnumerateCaptureDevices();
|
|
||||||
|
|
||||||
MixerInstance = new Player::Mixer(instance);
|
MixerInstance = new Player::Mixer(instance);
|
||||||
|
|
||||||
//Platform::Audio::Init();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Thread: Main.
|
// Thread: Main.
|
||||||
void Finish(not_null<Instance*> instance) {
|
void Finish(not_null<Instance*> instance) {
|
||||||
//Platform::Audio::DeInit();
|
|
||||||
|
|
||||||
// MixerInstance variable should be modified under AudioMutex protection.
|
// MixerInstance variable should be modified under AudioMutex protection.
|
||||||
// So it is modified in the ~Mixer() destructor after all tracks are cleared.
|
// So it is modified in the ~Mixer() destructor after all tracks are cleared.
|
||||||
delete MixerInstance;
|
delete MixerInstance;
|
||||||
|
|
|
@ -9,6 +9,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
|
|
||||||
#include "media/audio/media_audio_capture_common.h"
|
#include "media/audio/media_audio_capture_common.h"
|
||||||
#include "media/audio/media_audio_ffmpeg_loader.h"
|
#include "media/audio/media_audio_ffmpeg_loader.h"
|
||||||
|
#include "media/audio/media_audio_track.h"
|
||||||
#include "ffmpeg/ffmpeg_utility.h"
|
#include "ffmpeg/ffmpeg_utility.h"
|
||||||
#include "base/timer.h"
|
#include "base/timer.h"
|
||||||
|
|
||||||
|
@ -84,7 +85,7 @@ public:
|
||||||
Inner(QThread *thread);
|
Inner(QThread *thread);
|
||||||
~Inner();
|
~Inner();
|
||||||
|
|
||||||
void start(Fn<void(Update)> updated, Fn<void()> error);
|
void start(QString id, Fn<void(Update)> updated, Fn<void()> error);
|
||||||
void stop(Fn<void(Result&&)> callback = nullptr);
|
void stop(Fn<void(Result&&)> callback = nullptr);
|
||||||
void pause(bool value, Fn<void(Result&&)> callback);
|
void pause(bool value, Fn<void(Result&&)> callback);
|
||||||
|
|
||||||
|
@ -129,8 +130,9 @@ Instance::Instance() : _inner(std::make_unique<Inner>(&_thread)) {
|
||||||
|
|
||||||
void Instance::start() {
|
void Instance::start() {
|
||||||
_updates.fire_done();
|
_updates.fire_done();
|
||||||
|
const auto id = Audio::Current().captureDeviceId();
|
||||||
InvokeQueued(_inner.get(), [=] {
|
InvokeQueued(_inner.get(), [=] {
|
||||||
_inner->start([=](Update update) {
|
_inner->start(id, [=](Update update) {
|
||||||
crl::on_main(this, [=] {
|
crl::on_main(this, [=] {
|
||||||
_updates.fire_copy(update);
|
_updates.fire_copy(update);
|
||||||
});
|
});
|
||||||
|
@ -292,7 +294,10 @@ void Instance::Inner::fail() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Instance::Inner::start(Fn<void(Update)> updated, Fn<void()> error) {
|
void Instance::Inner::start(
|
||||||
|
QString id,
|
||||||
|
Fn<void(Update)> updated,
|
||||||
|
Fn<void()> error) {
|
||||||
_updated = std::move(updated);
|
_updated = std::move(updated);
|
||||||
_error = std::move(error);
|
_error = std::move(error);
|
||||||
if (_paused) {
|
if (_paused) {
|
||||||
|
@ -300,7 +305,12 @@ void Instance::Inner::start(Fn<void(Update)> updated, Fn<void()> error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start OpenAL Capture
|
// Start OpenAL Capture
|
||||||
d->device = alcCaptureOpenDevice(nullptr, kCaptureFrequency, AL_FORMAT_MONO16, kCaptureFrequency / 5);
|
const auto utf = id.toStdString();
|
||||||
|
d->device = alcCaptureOpenDevice(
|
||||||
|
utf.c_str(),
|
||||||
|
kCaptureFrequency,
|
||||||
|
AL_FORMAT_MONO16,
|
||||||
|
kCaptureFrequency / 5);
|
||||||
if (!d->device) {
|
if (!d->device) {
|
||||||
LOG(("Audio Error: capture device not present!"));
|
LOG(("Audio Error: capture device not present!"));
|
||||||
fail();
|
fail();
|
||||||
|
|
|
@ -248,7 +248,12 @@ Instance::Instance()
|
||||||
&Core::App().mediaDevices(),
|
&Core::App().mediaDevices(),
|
||||||
Webrtc::DeviceType::Playback,
|
Webrtc::DeviceType::Playback,
|
||||||
Webrtc::DeviceIdOrDefault(
|
Webrtc::DeviceIdOrDefault(
|
||||||
Core::App().settings().playbackDeviceIdValue())) {
|
Core::App().settings().playbackDeviceIdValue()))
|
||||||
|
, _captureDeviceId(
|
||||||
|
&Core::App().mediaDevices(),
|
||||||
|
Webrtc::DeviceType::Capture,
|
||||||
|
Webrtc::DeviceIdOrDefault(
|
||||||
|
Core::App().settings().captureDeviceIdValue())) {
|
||||||
_updateTimer.setCallback([this] {
|
_updateTimer.setCallback([this] {
|
||||||
auto hasActive = false;
|
auto hasActive = false;
|
||||||
for (auto track : _tracks) {
|
for (auto track : _tracks) {
|
||||||
|
@ -273,10 +278,14 @@ Instance::Instance()
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
}
|
}
|
||||||
|
|
||||||
QString Instance::deviceId() const {
|
QString Instance::playbackDeviceId() const {
|
||||||
return _playbackDeviceId.current();
|
return _playbackDeviceId.current();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QString Instance::captureDeviceId() const {
|
||||||
|
return _captureDeviceId.current();
|
||||||
|
}
|
||||||
|
|
||||||
std::unique_ptr<Track> Instance::createTrack() {
|
std::unique_ptr<Track> Instance::createTrack() {
|
||||||
return std::make_unique<Track>(this);
|
return std::make_unique<Track>(this);
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,7 +95,8 @@ public:
|
||||||
// Thread: Main.
|
// Thread: Main.
|
||||||
Instance();
|
Instance();
|
||||||
|
|
||||||
[[nodiscard]] QString deviceId() const;
|
[[nodiscard]] QString playbackDeviceId() const;
|
||||||
|
[[nodiscard]] QString captureDeviceId() const;
|
||||||
|
|
||||||
[[nodiscard]] std::unique_ptr<Track> createTrack();
|
[[nodiscard]] std::unique_ptr<Track> createTrack();
|
||||||
|
|
||||||
|
@ -119,6 +120,7 @@ private:
|
||||||
private:
|
private:
|
||||||
std::set<Track*> _tracks;
|
std::set<Track*> _tracks;
|
||||||
Webrtc::DeviceId _playbackDeviceId;
|
Webrtc::DeviceId _playbackDeviceId;
|
||||||
|
Webrtc::DeviceId _captureDeviceId;
|
||||||
|
|
||||||
base::Timer _updateTimer;
|
base::Timer _updateTimer;
|
||||||
|
|
||||||
|
|
|
@ -1,36 +0,0 @@
|
||||||
/*
|
|
||||||
This file is part of Telegram Desktop,
|
|
||||||
the official desktop application for the Telegram messaging service.
|
|
||||||
|
|
||||||
For license and copyright information please follow this link:
|
|
||||||
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|
||||||
*/
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
namespace Platform {
|
|
||||||
namespace Audio {
|
|
||||||
|
|
||||||
void Init();
|
|
||||||
|
|
||||||
void DeInit();
|
|
||||||
|
|
||||||
} // namespace Audio
|
|
||||||
} // namespace Platform
|
|
||||||
|
|
||||||
// Platform dependent implementations.
|
|
||||||
|
|
||||||
#if defined Q_OS_WINRT || defined Q_OS_WIN
|
|
||||||
#include "platform/win/audio_win.h"
|
|
||||||
#else // Q_OS_WINRT || Q_OS_WIN
|
|
||||||
namespace Platform {
|
|
||||||
namespace Audio {
|
|
||||||
|
|
||||||
inline void Init() {
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void DeInit() {
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Audio
|
|
||||||
} // namespace Platform
|
|
||||||
#endif // Q_OS_WINRT || Q_OS_WIN
|
|
|
@ -1,180 +0,0 @@
|
||||||
/*
|
|
||||||
This file is part of Telegram Desktop,
|
|
||||||
the official desktop application for the Telegram messaging service.
|
|
||||||
|
|
||||||
For license and copyright information please follow this link:
|
|
||||||
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|
||||||
*/
|
|
||||||
#include "platform/win/audio_win.h"
|
|
||||||
|
|
||||||
#include "platform/win/windows_dlls.h"
|
|
||||||
#include "media/audio/media_audio.h"
|
|
||||||
|
|
||||||
#include <initguid.h>
|
|
||||||
#include <mmdeviceapi.h>
|
|
||||||
#include <audioclient.h>
|
|
||||||
|
|
||||||
#include <wrl/client.h>
|
|
||||||
|
|
||||||
using namespace Microsoft::WRL;
|
|
||||||
|
|
||||||
namespace Platform {
|
|
||||||
namespace Audio {
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
// Inspired by Chromium.
|
|
||||||
class DeviceListener : public IMMNotificationClient {
|
|
||||||
public:
|
|
||||||
DeviceListener() = default;
|
|
||||||
DeviceListener(const DeviceListener &other) = delete;
|
|
||||||
DeviceListener &operator=(const DeviceListener &other) = delete;
|
|
||||||
virtual ~DeviceListener() = default;
|
|
||||||
|
|
||||||
private:
|
|
||||||
// IMMNotificationClient implementation.
|
|
||||||
STDMETHOD_(ULONG, AddRef)() override {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
STDMETHOD_(ULONG, Release)() override {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
STDMETHOD(QueryInterface)(REFIID iid, void** object) override;
|
|
||||||
STDMETHOD(OnPropertyValueChanged)(LPCWSTR device_id, const PROPERTYKEY key) override;
|
|
||||||
STDMETHOD(OnDeviceAdded)(LPCWSTR device_id) override {
|
|
||||||
return S_OK;
|
|
||||||
}
|
|
||||||
STDMETHOD(OnDeviceRemoved)(LPCWSTR device_id) override {
|
|
||||||
return S_OK;
|
|
||||||
}
|
|
||||||
STDMETHOD(OnDeviceStateChanged)(LPCWSTR device_id, DWORD new_state) override;
|
|
||||||
STDMETHOD(OnDefaultDeviceChanged)(EDataFlow flow, ERole role, LPCWSTR new_default_device_id) override;
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
STDMETHODIMP DeviceListener::QueryInterface(REFIID iid, void** object) {
|
|
||||||
if (iid == IID_IUnknown || iid == __uuidof(IMMNotificationClient)) {
|
|
||||||
*object = static_cast<IMMNotificationClient*>(this);
|
|
||||||
return S_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
*object = NULL;
|
|
||||||
return E_NOINTERFACE;
|
|
||||||
}
|
|
||||||
|
|
||||||
STDMETHODIMP DeviceListener::OnPropertyValueChanged(LPCWSTR device_id, const PROPERTYKEY key) {
|
|
||||||
auto deviceName = device_id ? '"' + QString::fromWCharArray(device_id) + '"' : QString("nullptr");
|
|
||||||
|
|
||||||
constexpr auto kKeyBufferSize = 1024;
|
|
||||||
WCHAR keyBuffer[kKeyBufferSize] = { 0 };
|
|
||||||
auto hr = Dlls::PSStringFromPropertyKey ? Dlls::PSStringFromPropertyKey(key, keyBuffer, kKeyBufferSize) : E_FAIL;
|
|
||||||
auto keyName = Dlls::PSStringFromPropertyKey ? (SUCCEEDED(hr) ? '"' + QString::fromWCharArray(keyBuffer) + '"' : QString("unknown")) : QString("unsupported");
|
|
||||||
|
|
||||||
// BAD GUID { 0xD4EF3098, 0xC967, 0x4A4E, { 0xB2, 0x19, 0xAC, 0xB6, 0xDA, 0x1D, 0xC3, 0x73 } };
|
|
||||||
// BAD GUID { 0x3DE556E2, 0xE087, 0x4721, { 0xBE, 0x97, 0xEC, 0x16, 0x2D, 0x54, 0x81, 0xF8 } };
|
|
||||||
|
|
||||||
// VERY BAD GUID { 0x91F1336D, 0xC37C, 0x4C48, { 0xAD, 0xEB, 0x92, 0x17, 0x2F, 0xA8, 0x7E, 0xEB } };
|
|
||||||
// It is fired somewhere from CloseAudioPlaybackDevice() causing deadlock on AudioMutex.
|
|
||||||
|
|
||||||
// Sometimes unknown value change events come very frequently, like each 0.5 seconds.
|
|
||||||
// So we will handle only special value change events from mmdeviceapi.h
|
|
||||||
//
|
|
||||||
// We have logs of PKEY_AudioEndpoint_Disable_SysFx property change 3-5 times each second.
|
|
||||||
// So for now we disable PKEY_AudioEndpoint and both PKEY_AudioUnknown changes handling
|
|
||||||
//.
|
|
||||||
// constexpr GUID pkey_AudioEndpoint = { 0x1da5d803, 0xd492, 0x4edd, { 0x8c, 0x23, 0xe0, 0xc0, 0xff, 0xee, 0x7f, 0x0e } };
|
|
||||||
constexpr GUID pkey_AudioEngine_Device = { 0xf19f064d, 0x82c, 0x4e27, { 0xbc, 0x73, 0x68, 0x82, 0xa1, 0xbb, 0x8e, 0x4c } };
|
|
||||||
constexpr GUID pkey_AudioEngine_OEM = { 0xe4870e26, 0x3cc5, 0x4cd2, { 0xba, 0x46, 0xca, 0xa, 0x9a, 0x70, 0xed, 0x4 } };
|
|
||||||
// constexpr GUID pkey_AudioUnknown1 = { 0x3d6e1656, 0x2e50, 0x4c4c, { 0x8d, 0x85, 0xd0, 0xac, 0xae, 0x3c, 0x6c, 0x68 } };
|
|
||||||
// constexpr GUID pkey_AudioUnknown2 = { 0x624f56de, 0xfd24, 0x473e, { 0x81, 0x4a, 0xde, 0x40, 0xaa, 0xca, 0xed, 0x16 } };
|
|
||||||
if (false
|
|
||||||
// || key.fmtid == pkey_AudioEndpoint
|
|
||||||
|| key.fmtid == pkey_AudioEngine_Device
|
|
||||||
|| key.fmtid == pkey_AudioEngine_OEM
|
|
||||||
// || key.fmtid == pkey_AudioUnknown1
|
|
||||||
// || key.fmtid == pkey_AudioUnknown2
|
|
||||||
|| false) {
|
|
||||||
LOG(("Audio Info: OnPropertyValueChanged(%1, %2) scheduling detach from audio device.").arg(deviceName).arg(keyName));
|
|
||||||
Media::Audio::ScheduleDetachFromDeviceSafe();
|
|
||||||
} else {
|
|
||||||
DEBUG_LOG(("Audio Info: OnPropertyValueChanged(%1, %2) unknown, skipping.").arg(deviceName).arg(keyName));
|
|
||||||
}
|
|
||||||
return S_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
STDMETHODIMP DeviceListener::OnDeviceStateChanged(LPCWSTR device_id, DWORD new_state) {
|
|
||||||
auto deviceName = device_id ? '"' + QString::fromWCharArray(device_id) + '"' : QString("nullptr");
|
|
||||||
LOG(("Audio Info: OnDeviceStateChanged(%1, %2) scheduling detach from audio device.").arg(deviceName).arg(new_state));
|
|
||||||
Media::Audio::ScheduleDetachFromDeviceSafe();
|
|
||||||
return S_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
STDMETHODIMP DeviceListener::OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR new_default_device_id) {
|
|
||||||
// Only listen for console and communication device changes.
|
|
||||||
if ((role != eConsole && role != eCommunications) || (flow != eRender && flow != eCapture)) {
|
|
||||||
LOG(("Audio Info: skipping OnDefaultDeviceChanged() flow %1, role %2, new_default_device_id: %3").arg(flow).arg(role).arg(new_default_device_id ? '"' + QString::fromWCharArray(new_default_device_id) + '"' : QString("nullptr")));
|
|
||||||
return S_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG(("Audio Info: OnDefaultDeviceChanged() scheduling detach from audio device, flow %1, role %2, new_default_device_id: %3").arg(flow).arg(role).arg(new_default_device_id ? '"' + QString::fromWCharArray(new_default_device_id) + '"' : QString("nullptr")));
|
|
||||||
Media::Audio::ScheduleDetachFromDeviceSafe();
|
|
||||||
|
|
||||||
return S_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto WasCoInitialized = false;
|
|
||||||
ComPtr<IMMDeviceEnumerator> Enumerator;
|
|
||||||
|
|
||||||
DeviceListener *Listener = nullptr;
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
void Init() {
|
|
||||||
auto hr = CoCreateInstance(CLSID_MMDeviceEnumerator, nullptr, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&Enumerator));
|
|
||||||
if (FAILED(hr)) {
|
|
||||||
Enumerator.Reset();
|
|
||||||
|
|
||||||
if (hr == CO_E_NOTINITIALIZED) {
|
|
||||||
LOG(("Audio Info: CoCreateInstance fails with CO_E_NOTINITIALIZED"));
|
|
||||||
hr = CoInitialize(nullptr);
|
|
||||||
if (SUCCEEDED(hr)) {
|
|
||||||
WasCoInitialized = true;
|
|
||||||
hr = CoCreateInstance(CLSID_MMDeviceEnumerator, nullptr, CLSCTX_INPROC_SERVER, IID_PPV_ARGS(&Enumerator));
|
|
||||||
if (FAILED(hr)) {
|
|
||||||
Enumerator.Reset();
|
|
||||||
|
|
||||||
LOG(("Audio Error: could not CoCreateInstance of MMDeviceEnumerator, HRESULT: %1").arg(hr));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
LOG(("Audio Error: could not CoCreateInstance of MMDeviceEnumerator, HRESULT: %1").arg(hr));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Listener = new DeviceListener();
|
|
||||||
hr = Enumerator->RegisterEndpointNotificationCallback(Listener);
|
|
||||||
if (FAILED(hr)) {
|
|
||||||
LOG(("Audio Error: RegisterEndpointNotificationCallback failed, HRESULT: %1").arg(hr));
|
|
||||||
delete base::take(Listener);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void DeInit() {
|
|
||||||
if (Enumerator) {
|
|
||||||
if (Listener) {
|
|
||||||
auto hr = Enumerator->UnregisterEndpointNotificationCallback(Listener);
|
|
||||||
if (FAILED(hr)) {
|
|
||||||
LOG(("Audio Error: UnregisterEndpointNotificationCallback failed, HRESULT: %1").arg(hr));
|
|
||||||
}
|
|
||||||
delete base::take(Listener);
|
|
||||||
}
|
|
||||||
Enumerator.Reset();
|
|
||||||
}
|
|
||||||
if (WasCoInitialized) {
|
|
||||||
CoUninitialize();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Audio
|
|
||||||
} // namespace Platform
|
|
|
@ -1,18 +0,0 @@
|
||||||
/*
|
|
||||||
This file is part of Telegram Desktop,
|
|
||||||
the official desktop application for the Telegram messaging service.
|
|
||||||
|
|
||||||
For license and copyright information please follow this link:
|
|
||||||
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|
||||||
*/
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
namespace Platform {
|
|
||||||
namespace Audio {
|
|
||||||
|
|
||||||
void Init();
|
|
||||||
void DeInit();
|
|
||||||
|
|
||||||
} // namespace Audio
|
|
||||||
} // namespace Platform
|
|
||||||
|
|
|
@ -30,7 +30,6 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "apiwrap.h"
|
#include "apiwrap.h"
|
||||||
#include "api/api_authorizations.h"
|
#include "api/api_authorizations.h"
|
||||||
#include "webrtc/webrtc_environment.h"
|
#include "webrtc/webrtc_environment.h"
|
||||||
#include "webrtc/webrtc_media_devices.h"
|
|
||||||
#include "webrtc/webrtc_video_track.h"
|
#include "webrtc/webrtc_video_track.h"
|
||||||
#include "webrtc/webrtc_audio_input_tester.h"
|
#include "webrtc/webrtc_audio_input_tester.h"
|
||||||
#include "webrtc/webrtc_create_adm.h" // Webrtc::Backend.
|
#include "webrtc/webrtc_create_adm.h" // Webrtc::Backend.
|
||||||
|
@ -46,12 +45,15 @@ using namespace Webrtc;
|
||||||
DeviceType type,
|
DeviceType type,
|
||||||
rpl::producer<QString> id) {
|
rpl::producer<QString> id) {
|
||||||
return std::move(id) | rpl::map([type](const QString &id) {
|
return std::move(id) | rpl::map([type](const QString &id) {
|
||||||
const auto list = Core::App().mediaDevices().devices(type);
|
return Core::App().mediaDevices().devicesValue(
|
||||||
const auto i = ranges::find(list, id, &DeviceInfo::id);
|
type
|
||||||
return (i != end(list))
|
) | rpl::map([id](const std::vector<DeviceInfo> &list) {
|
||||||
? i->name
|
const auto i = ranges::find(list, id, &DeviceInfo::id);
|
||||||
: tr::lng_settings_call_device_default(tr::now);
|
return (i != end(list) && !i->inactive)
|
||||||
});
|
? i->name
|
||||||
|
: tr::lng_settings_call_device_default(tr::now);
|
||||||
|
});
|
||||||
|
}) | rpl::flatten_latest();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
@ -82,10 +84,6 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
|
||||||
|
|
||||||
const auto hasCall = (Core::App().calls().currentCall() != nullptr);
|
const auto hasCall = (Core::App().calls().currentCall() != nullptr);
|
||||||
|
|
||||||
const auto cameraNameStream = lifetime.make_state<
|
|
||||||
rpl::event_stream<QString>
|
|
||||||
>();
|
|
||||||
|
|
||||||
auto capturerOwner = lifetime.make_state<
|
auto capturerOwner = lifetime.make_state<
|
||||||
std::shared_ptr<tgcalls::VideoCaptureInterface>
|
std::shared_ptr<tgcalls::VideoCaptureInterface>
|
||||||
>();
|
>();
|
||||||
|
@ -95,62 +93,30 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
|
||||||
? VideoState::Inactive
|
? VideoState::Inactive
|
||||||
: VideoState::Active));
|
: VideoState::Active));
|
||||||
|
|
||||||
const auto currentCameraName = [&] {
|
const auto deviceId = lifetime.make_state<rpl::variable<QString>>(
|
||||||
const auto cameras = GetVideoInputList();
|
Core::App().settings().cameraDeviceId());
|
||||||
const auto i = ranges::find(
|
auto resolvedId = rpl::deferred([=] {
|
||||||
cameras,
|
return DeviceIdOrDefault(deviceId->value());
|
||||||
Core::App().settings().cameraDeviceId(),
|
});
|
||||||
&VideoInput::id);
|
|
||||||
return (i != end(cameras))
|
|
||||||
? i->name
|
|
||||||
: tr::lng_settings_call_device_default(tr::now);
|
|
||||||
}();
|
|
||||||
|
|
||||||
AddButtonWithLabel(
|
AddButtonWithLabel(
|
||||||
content,
|
content,
|
||||||
tr::lng_settings_call_input_device(),
|
tr::lng_settings_call_input_device(),
|
||||||
rpl::single(
|
CameraDeviceNameValue(rpl::duplicate(resolvedId)),
|
||||||
currentCameraName
|
|
||||||
) | rpl::then(
|
|
||||||
cameraNameStream->events()
|
|
||||||
),
|
|
||||||
st::settingsButtonNoIcon
|
st::settingsButtonNoIcon
|
||||||
)->addClickHandler([=] {
|
)->addClickHandler([=] {
|
||||||
const auto &devices = GetVideoInputList();
|
show->show(ChooseCameraDeviceBox(
|
||||||
const auto options = ranges::views::concat(
|
rpl::duplicate(resolvedId),
|
||||||
ranges::views::single(
|
[=](const QString &id) {
|
||||||
tr::lng_settings_call_device_default(tr::now)),
|
*deviceId = id;
|
||||||
devices | ranges::views::transform(&VideoInput::name)
|
if (saveToSettings) {
|
||||||
) | ranges::to_vector;
|
Core::App().settings().setCameraDeviceId(id);
|
||||||
const auto i = ranges::find(
|
Core::App().saveSettingsDelayed();
|
||||||
devices,
|
}
|
||||||
Core::App().settings().cameraDeviceId(),
|
if (*capturerOwner) {
|
||||||
&VideoInput::id);
|
(*capturerOwner)->switchToDevice(
|
||||||
const auto currentOption = (i != end(devices))
|
id.toStdString(),
|
||||||
? int(i - begin(devices) + 1)
|
false);
|
||||||
: 0;
|
}
|
||||||
const auto save = crl::guard(content, [=](int option) {
|
|
||||||
cameraNameStream->fire_copy(options[option]);
|
|
||||||
const auto deviceId = option
|
|
||||||
? devices[option - 1].id
|
|
||||||
: kDefaultDeviceId;
|
|
||||||
if (saveToSettings) {
|
|
||||||
Core::App().settings().setCameraDeviceId(deviceId);
|
|
||||||
Core::App().saveSettingsDelayed();
|
|
||||||
}
|
|
||||||
if (*capturerOwner) {
|
|
||||||
(*capturerOwner)->switchToDevice(
|
|
||||||
deviceId.toStdString(),
|
|
||||||
false);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
show->showBox(Box([=](not_null<Ui::GenericBox*> box) {
|
|
||||||
SingleChoiceBox(box, {
|
|
||||||
.title = tr::lng_settings_call_camera(),
|
|
||||||
.options = options,
|
|
||||||
.initialSelection = currentOption,
|
|
||||||
.callback = save,
|
|
||||||
});
|
|
||||||
}));
|
}));
|
||||||
});
|
});
|
||||||
const auto bubbleWrap = content->add(object_ptr<Ui::RpWidget>(content));
|
const auto bubbleWrap = content->add(object_ptr<Ui::RpWidget>(content));
|
||||||
|
@ -221,9 +187,7 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
|
||||||
}
|
}
|
||||||
|
|
||||||
void Calls::sectionSaveChanges(FnMut<void()> done) {
|
void Calls::sectionSaveChanges(FnMut<void()> done) {
|
||||||
if (_micTester) {
|
_testingMicrophone = false;
|
||||||
_micTester.reset();
|
|
||||||
}
|
|
||||||
done();
|
done();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,61 +198,25 @@ void Calls::setupContent() {
|
||||||
Ui::AddSkip(content);
|
Ui::AddSkip(content);
|
||||||
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_output());
|
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_output());
|
||||||
|
|
||||||
const auto playbackIdWithFallback = [=] {
|
initPlaybackButton(
|
||||||
return DeviceIdOrDefault(settings->playbackDeviceIdValue());
|
|
||||||
};
|
|
||||||
AddButtonWithLabel(
|
|
||||||
content,
|
content,
|
||||||
tr::lng_settings_call_output_device(),
|
tr::lng_settings_call_output_device(),
|
||||||
PlaybackDeviceNameValue(playbackIdWithFallback()),
|
rpl::deferred([=] {
|
||||||
st::settingsButtonNoIcon
|
return DeviceIdOrDefault(settings->playbackDeviceIdValue());
|
||||||
)->addClickHandler([=] {
|
}),
|
||||||
_controller->show(ChoosePlaybackDeviceBox(
|
[=](const QString &id) { settings->setPlaybackDeviceId(id); });
|
||||||
playbackIdWithFallback(),
|
|
||||||
crl::guard(this, [=](const QString &id) {
|
|
||||||
settings->setPlaybackDeviceId(id);
|
|
||||||
Core::App().saveSettingsDelayed();
|
|
||||||
})));
|
|
||||||
});
|
|
||||||
|
|
||||||
Ui::AddSkip(content);
|
Ui::AddSkip(content);
|
||||||
Ui::AddDivider(content);
|
Ui::AddDivider(content);
|
||||||
Ui::AddSkip(content);
|
Ui::AddSkip(content);
|
||||||
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_input());
|
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_input());
|
||||||
const auto captureIdWithFallback = [=] {
|
initCaptureButton(
|
||||||
return DeviceIdOrDefault(settings->captureDeviceIdValue());
|
|
||||||
};
|
|
||||||
AddButtonWithLabel(
|
|
||||||
content,
|
content,
|
||||||
tr::lng_settings_call_input_device(),
|
tr::lng_settings_call_input_device(),
|
||||||
CaptureDeviceNameValue(captureIdWithFallback()),
|
rpl::deferred([=] {
|
||||||
st::settingsButtonNoIcon
|
return DeviceIdOrDefault(settings->captureDeviceIdValue());
|
||||||
)->addClickHandler([=] {
|
}),
|
||||||
_controller->show(ChooseCaptureDeviceBox(
|
[=](const QString &id) { settings->setCaptureDeviceId(id); });
|
||||||
captureIdWithFallback(),
|
|
||||||
crl::guard(this, [=](const QString &id) {
|
|
||||||
settings->setCaptureDeviceId(id);
|
|
||||||
Core::App().saveSettingsDelayed();
|
|
||||||
if (_micTester) {
|
|
||||||
_micTester->setDeviceId(id);
|
|
||||||
}
|
|
||||||
})));
|
|
||||||
});
|
|
||||||
|
|
||||||
_micTestLevel = content->add(
|
|
||||||
object_ptr<Ui::LevelMeter>(
|
|
||||||
content,
|
|
||||||
st::defaultLevelMeter),
|
|
||||||
st::settingsLevelMeterPadding);
|
|
||||||
_micTestLevel->resize(QSize(0, st::defaultLevelMeter.height));
|
|
||||||
|
|
||||||
_levelUpdateTimer.setCallback([=] {
|
|
||||||
const auto was = _micLevel;
|
|
||||||
_micLevel = _micTester->getAndResetLevel();
|
|
||||||
_micLevelAnimation.start([=] {
|
|
||||||
_micTestLevel->setValue(_micLevelAnimation.value(_micLevel));
|
|
||||||
}, was, _micLevel, kMicTestAnimationDuration);
|
|
||||||
});
|
|
||||||
|
|
||||||
Ui::AddSkip(content);
|
Ui::AddSkip(content);
|
||||||
Ui::AddDivider(content);
|
Ui::AddDivider(content);
|
||||||
|
@ -329,50 +257,30 @@ void Calls::setupContent() {
|
||||||
content,
|
content,
|
||||||
object_ptr<Ui::VerticalLayout>(content)));
|
object_ptr<Ui::VerticalLayout>(content)));
|
||||||
const auto calls = different->entity();
|
const auto calls = different->entity();
|
||||||
const auto callPlaybackIdWithFallback = [=] {
|
initPlaybackButton(
|
||||||
return DeviceIdValueWithFallback(
|
|
||||||
settings->callPlaybackDeviceIdValue(),
|
|
||||||
settings->playbackDeviceIdValue());
|
|
||||||
};
|
|
||||||
AddButtonWithLabel(
|
|
||||||
calls,
|
calls,
|
||||||
tr::lng_group_call_speakers(),
|
tr::lng_group_call_speakers(),
|
||||||
PlaybackDeviceNameValue(callPlaybackIdWithFallback()),
|
rpl::deferred([=] {
|
||||||
st::settingsButtonNoIcon
|
return DeviceIdValueWithFallback(
|
||||||
)->addClickHandler([=] {
|
settings->callPlaybackDeviceIdValue(),
|
||||||
_controller->show(ChoosePlaybackDeviceBox(
|
settings->playbackDeviceIdValue());
|
||||||
callPlaybackIdWithFallback(),
|
}),
|
||||||
crl::guard(this, [=](const QString &id) {
|
[=](const QString &id) { settings->setCallPlaybackDeviceId(id); });
|
||||||
settings->setCallPlaybackDeviceId(orDefault(id));
|
initCaptureButton(
|
||||||
Core::App().saveSettingsDelayed();
|
|
||||||
})));
|
|
||||||
});
|
|
||||||
const auto callCaptureIdWithFallback = [=] {
|
|
||||||
return DeviceIdValueWithFallback(
|
|
||||||
settings->callCaptureDeviceIdValue(),
|
|
||||||
settings->captureDeviceIdValue());
|
|
||||||
};
|
|
||||||
AddButtonWithLabel(
|
|
||||||
calls,
|
calls,
|
||||||
tr::lng_group_call_microphone(),
|
tr::lng_group_call_microphone(),
|
||||||
CaptureDeviceNameValue(callCaptureIdWithFallback()),
|
rpl::deferred([=] {
|
||||||
st::settingsButtonNoIcon
|
return DeviceIdValueWithFallback(
|
||||||
)->addClickHandler([=] {
|
settings->callCaptureDeviceIdValue(),
|
||||||
_controller->show(ChooseCaptureDeviceBox(
|
settings->captureDeviceIdValue());
|
||||||
callCaptureIdWithFallback(),
|
}),
|
||||||
crl::guard(this, [=](const QString &id) {
|
[=](const QString &id) { settings->setCallCaptureDeviceId(id); });
|
||||||
settings->setCallCaptureDeviceId(orDefault(id));
|
|
||||||
Core::App().saveSettingsDelayed();
|
|
||||||
//if (_micTester) {
|
|
||||||
// _micTester->setDeviceId(id);
|
|
||||||
//}
|
|
||||||
})));
|
|
||||||
});
|
|
||||||
different->toggleOn(same->toggledValue() | rpl::map(!rpl::mappers::_1));
|
different->toggleOn(same->toggledValue() | rpl::map(!rpl::mappers::_1));
|
||||||
Ui::AddSkip(content);
|
Ui::AddSkip(content);
|
||||||
Ui::AddDivider(content);
|
Ui::AddDivider(content);
|
||||||
|
|
||||||
if (!GetVideoInputList().empty()) {
|
if (!Core::App().mediaDevices().defaultId(
|
||||||
|
Webrtc::DeviceType::Camera).isEmpty()) {
|
||||||
Ui::AddSkip(content);
|
Ui::AddSkip(content);
|
||||||
Ui::AddSubsectionTitle(content, tr::lng_settings_call_camera());
|
Ui::AddSubsectionTitle(content, tr::lng_settings_call_camera());
|
||||||
AddCameraSubsection(_controller->uiShow(), content, true);
|
AddCameraSubsection(_controller->uiShow(), content, true);
|
||||||
|
@ -416,18 +324,98 @@ void Calls::setupContent() {
|
||||||
Ui::ResizeFitChild(this, content);
|
Ui::ResizeFitChild(this, content);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Calls::initPlaybackButton(
|
||||||
|
not_null<Ui::VerticalLayout*> container,
|
||||||
|
rpl::producer<QString> text,
|
||||||
|
rpl::producer<QString> resolvedId,
|
||||||
|
Fn<void(QString)> set) {
|
||||||
|
AddButtonWithLabel(
|
||||||
|
container,
|
||||||
|
tr::lng_settings_call_output_device(),
|
||||||
|
PlaybackDeviceNameValue(rpl::duplicate(resolvedId)),
|
||||||
|
st::settingsButtonNoIcon
|
||||||
|
)->addClickHandler([=] {
|
||||||
|
_controller->show(ChoosePlaybackDeviceBox(
|
||||||
|
rpl::duplicate(resolvedId),
|
||||||
|
[=](const QString &id) {
|
||||||
|
set(id);
|
||||||
|
Core::App().saveSettingsDelayed();
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void Calls::initCaptureButton(
|
||||||
|
not_null<Ui::VerticalLayout*> container,
|
||||||
|
rpl::producer<QString> text,
|
||||||
|
rpl::producer<QString> resolvedId,
|
||||||
|
Fn<void(QString)> set) {
|
||||||
|
AddButtonWithLabel(
|
||||||
|
container,
|
||||||
|
tr::lng_settings_call_input_device(),
|
||||||
|
CaptureDeviceNameValue(rpl::duplicate(resolvedId)),
|
||||||
|
st::settingsButtonNoIcon
|
||||||
|
)->addClickHandler([=] {
|
||||||
|
_controller->show(ChooseCaptureDeviceBox(
|
||||||
|
rpl::duplicate(resolvedId),
|
||||||
|
[=](const QString &id) {
|
||||||
|
set(id);
|
||||||
|
Core::App().saveSettingsDelayed();
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
struct LevelState {
|
||||||
|
std::unique_ptr<Webrtc::DeviceId> computedDeviceId;
|
||||||
|
std::unique_ptr<Webrtc::AudioInputTester> tester;
|
||||||
|
base::Timer timer;
|
||||||
|
Ui::Animations::Simple animation;
|
||||||
|
float level = 0.;
|
||||||
|
};
|
||||||
|
const auto level = container->add(
|
||||||
|
object_ptr<Ui::LevelMeter>(
|
||||||
|
container,
|
||||||
|
st::defaultLevelMeter),
|
||||||
|
st::settingsLevelMeterPadding);
|
||||||
|
const auto state = level->lifetime().make_state<LevelState>();
|
||||||
|
level->resize(QSize(0, st::defaultLevelMeter.height));
|
||||||
|
|
||||||
|
state->timer.setCallback([=] {
|
||||||
|
const auto was = state->level;
|
||||||
|
state->level = state->tester->getAndResetLevel();
|
||||||
|
state->animation.start([=] {
|
||||||
|
level->setValue(state->animation.value(state->level));
|
||||||
|
}, was, state->level, kMicTestAnimationDuration);
|
||||||
|
});
|
||||||
|
_testingMicrophone.value() | rpl::start_with_next([=](bool testing) {
|
||||||
|
if (testing) {
|
||||||
|
state->computedDeviceId = std::make_unique<Webrtc::DeviceId>(
|
||||||
|
&Core::App().mediaDevices(),
|
||||||
|
Webrtc::DeviceType::Capture,
|
||||||
|
rpl::duplicate(resolvedId));
|
||||||
|
state->tester = std::make_unique<AudioInputTester>(
|
||||||
|
state->computedDeviceId->value());
|
||||||
|
state->timer.callEach(kMicTestUpdateInterval);
|
||||||
|
} else {
|
||||||
|
state->timer.cancel();
|
||||||
|
state->animation.stop();
|
||||||
|
state->tester = nullptr;
|
||||||
|
state->computedDeviceId = nullptr;
|
||||||
|
}
|
||||||
|
}, level->lifetime());
|
||||||
|
}
|
||||||
|
|
||||||
void Calls::requestPermissionAndStartTestingMicrophone() {
|
void Calls::requestPermissionAndStartTestingMicrophone() {
|
||||||
using namespace ::Platform;
|
using namespace ::Platform;
|
||||||
const auto status = GetPermissionStatus(
|
const auto status = GetPermissionStatus(
|
||||||
PermissionType::Microphone);
|
PermissionType::Microphone);
|
||||||
if (status == PermissionStatus::Granted) {
|
if (status == PermissionStatus::Granted) {
|
||||||
startTestingMicrophone();
|
_testingMicrophone = true;
|
||||||
} else if (status == PermissionStatus::CanRequest) {
|
} else if (status == PermissionStatus::CanRequest) {
|
||||||
const auto startTestingChecked = crl::guard(this, [=](
|
const auto startTestingChecked = crl::guard(this, [=](
|
||||||
PermissionStatus status) {
|
PermissionStatus status) {
|
||||||
if (status == PermissionStatus::Granted) {
|
if (status == PermissionStatus::Granted) {
|
||||||
crl::on_main(crl::guard(this, [=] {
|
crl::on_main(crl::guard(this, [=] {
|
||||||
startTestingMicrophone();
|
_testingMicrophone = true;
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -448,13 +436,6 @@ void Calls::requestPermissionAndStartTestingMicrophone() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Calls::startTestingMicrophone() {
|
|
||||||
_levelUpdateTimer.callEach(kMicTestUpdateInterval);
|
|
||||||
_micTester = std::make_unique<AudioInputTester>(
|
|
||||||
Core::App().settings().callAudioBackend(),
|
|
||||||
Core::App().settings().callCaptureDeviceId());
|
|
||||||
}
|
|
||||||
|
|
||||||
rpl::producer<QString> PlaybackDeviceNameValue(rpl::producer<QString> id) {
|
rpl::producer<QString> PlaybackDeviceNameValue(rpl::producer<QString> id) {
|
||||||
return DeviceNameValue(DeviceType::Playback, std::move(id));
|
return DeviceNameValue(DeviceType::Playback, std::move(id));
|
||||||
}
|
}
|
||||||
|
@ -463,7 +444,12 @@ rpl::producer<QString> CaptureDeviceNameValue(rpl::producer<QString> id) {
|
||||||
return DeviceNameValue(DeviceType::Capture, std::move(id));
|
return DeviceNameValue(DeviceType::Capture, std::move(id));
|
||||||
}
|
}
|
||||||
|
|
||||||
void ChooseAudioDeviceBox(
|
rpl::producer<QString> CameraDeviceNameValue(
|
||||||
|
rpl::producer<QString> id) {
|
||||||
|
return DeviceNameValue(DeviceType::Camera, std::move(id));
|
||||||
|
}
|
||||||
|
|
||||||
|
void ChooseMediaDeviceBox(
|
||||||
not_null<Ui::GenericBox*> box,
|
not_null<Ui::GenericBox*> box,
|
||||||
rpl::producer<QString> title,
|
rpl::producer<QString> title,
|
||||||
rpl::producer<std::vector<DeviceInfo>> devicesValue,
|
rpl::producer<std::vector<DeviceInfo>> devicesValue,
|
||||||
|
@ -495,6 +481,14 @@ void ChooseAudioDeviceBox(
|
||||||
const auto state = box->lifetime().make_state<State>();
|
const auto state = box->lifetime().make_state<State>();
|
||||||
state->currentId = std::move(currentId);
|
state->currentId = std::move(currentId);
|
||||||
|
|
||||||
|
const auto choose = [=](const QString &id) {
|
||||||
|
const auto weak = Ui::MakeWeak(box);
|
||||||
|
chosen(id);
|
||||||
|
if (weak) {
|
||||||
|
box->closeBox();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
const auto group = std::make_shared<Ui::RadiobuttonGroup>();
|
const auto group = std::make_shared<Ui::RadiobuttonGroup>();
|
||||||
const auto fake = std::make_shared<Ui::RadiobuttonGroup>(0);
|
const auto fake = std::make_shared<Ui::RadiobuttonGroup>(0);
|
||||||
const auto buttons = layout->add(object_ptr<Ui::VerticalLayout>(layout));
|
const auto buttons = layout->add(object_ptr<Ui::VerticalLayout>(layout));
|
||||||
|
@ -513,6 +507,12 @@ void ChooseAudioDeviceBox(
|
||||||
*st,
|
*st,
|
||||||
*radioSt),
|
*radioSt),
|
||||||
margins);
|
margins);
|
||||||
|
def->clicks(
|
||||||
|
) | rpl::filter([=] {
|
||||||
|
return !group->value();
|
||||||
|
}) | rpl::start_with_next([=] {
|
||||||
|
choose(kDefaultDeviceId);
|
||||||
|
}, def->lifetime());
|
||||||
const auto showUnavailable = [=](QString text) {
|
const auto showUnavailable = [=](QString text) {
|
||||||
AddSkip(other);
|
AddSkip(other);
|
||||||
AddSubsectionTitle(other, tr::lng_settings_devices_inactive());
|
AddSubsectionTitle(other, tr::lng_settings_devices_inactive());
|
||||||
|
@ -572,14 +572,6 @@ void ChooseAudioDeviceBox(
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const auto choose = [=](const QString &id) {
|
|
||||||
const auto weak = Ui::MakeWeak(box);
|
|
||||||
chosen(id);
|
|
||||||
if (weak) {
|
|
||||||
box->closeBox();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
std::move(
|
std::move(
|
||||||
devicesValue
|
devicesValue
|
||||||
) | rpl::start_with_next([=](std::vector<DeviceInfo> &&list) {
|
) | rpl::start_with_next([=](std::vector<DeviceInfo> &&list) {
|
||||||
|
@ -615,7 +607,7 @@ void ChooseAudioDeviceBox(
|
||||||
button->finishAnimating();
|
button->finishAnimating();
|
||||||
button->clicks(
|
button->clicks(
|
||||||
) | rpl::filter([=] {
|
) | rpl::filter([=] {
|
||||||
return (current == id);
|
return (group->value() == index);
|
||||||
}) | rpl::start_with_next([=] {
|
}) | rpl::start_with_next([=] {
|
||||||
choose(id);
|
choose(id);
|
||||||
}, button->lifetime());
|
}, button->lifetime());
|
||||||
|
@ -656,7 +648,7 @@ object_ptr<Ui::GenericBox> ChoosePlaybackDeviceBox(
|
||||||
const style::Checkbox *st,
|
const style::Checkbox *st,
|
||||||
const style::Radio *radioSt) {
|
const style::Radio *radioSt) {
|
||||||
return Box(
|
return Box(
|
||||||
ChooseAudioDeviceBox,
|
ChooseMediaDeviceBox,
|
||||||
tr::lng_settings_call_output_device(),
|
tr::lng_settings_call_output_device(),
|
||||||
Core::App().mediaDevices().devicesValue(DeviceType::Playback),
|
Core::App().mediaDevices().devicesValue(DeviceType::Playback),
|
||||||
std::move(currentId),
|
std::move(currentId),
|
||||||
|
@ -671,7 +663,7 @@ object_ptr<Ui::GenericBox> ChooseCaptureDeviceBox(
|
||||||
const style::Checkbox *st,
|
const style::Checkbox *st,
|
||||||
const style::Radio *radioSt) {
|
const style::Radio *radioSt) {
|
||||||
return Box(
|
return Box(
|
||||||
ChooseAudioDeviceBox,
|
ChooseMediaDeviceBox,
|
||||||
tr::lng_settings_call_input_device(),
|
tr::lng_settings_call_input_device(),
|
||||||
Core::App().mediaDevices().devicesValue(DeviceType::Capture),
|
Core::App().mediaDevices().devicesValue(DeviceType::Capture),
|
||||||
std::move(currentId),
|
std::move(currentId),
|
||||||
|
@ -680,5 +672,20 @@ object_ptr<Ui::GenericBox> ChooseCaptureDeviceBox(
|
||||||
radioSt);
|
radioSt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
object_ptr<Ui::GenericBox> ChooseCameraDeviceBox(
|
||||||
|
rpl::producer<QString> currentId,
|
||||||
|
Fn<void(QString id)> chosen,
|
||||||
|
const style::Checkbox *st,
|
||||||
|
const style::Radio *radioSt) {
|
||||||
|
return Box(
|
||||||
|
ChooseMediaDeviceBox,
|
||||||
|
tr::lng_settings_call_device_default(),
|
||||||
|
Core::App().mediaDevices().devicesValue(DeviceType::Camera),
|
||||||
|
std::move(currentId),
|
||||||
|
std::move(chosen),
|
||||||
|
st,
|
||||||
|
radioSt);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Settings
|
} // namespace Settings
|
||||||
|
|
||||||
|
|
|
@ -50,15 +50,21 @@ public:
|
||||||
private:
|
private:
|
||||||
void setupContent();
|
void setupContent();
|
||||||
void requestPermissionAndStartTestingMicrophone();
|
void requestPermissionAndStartTestingMicrophone();
|
||||||
void startTestingMicrophone();
|
|
||||||
|
void initPlaybackButton(
|
||||||
|
not_null<Ui::VerticalLayout*> container,
|
||||||
|
rpl::producer<QString> text,
|
||||||
|
rpl::producer<QString> resolvedId,
|
||||||
|
Fn<void(QString)> set);
|
||||||
|
void initCaptureButton(
|
||||||
|
not_null<Ui::VerticalLayout*> container,
|
||||||
|
rpl::producer<QString> text,
|
||||||
|
rpl::producer<QString> resolvedId,
|
||||||
|
Fn<void(QString)> set);
|
||||||
|
|
||||||
const not_null<Window::SessionController*> _controller;
|
const not_null<Window::SessionController*> _controller;
|
||||||
rpl::event_stream<QString> _cameraNameStream;
|
rpl::event_stream<QString> _cameraNameStream;
|
||||||
std::unique_ptr<Webrtc::AudioInputTester> _micTester;
|
rpl::variable<bool> _testingMicrophone;
|
||||||
Ui::LevelMeter *_micTestLevel = nullptr;
|
|
||||||
float _micLevel = 0.;
|
|
||||||
Ui::Animations::Simple _micLevelAnimation;
|
|
||||||
base::Timer _levelUpdateTimer;
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -69,6 +75,8 @@ inline constexpr auto kMicTestAnimationDuration = crl::time(200);
|
||||||
rpl::producer<QString> id);
|
rpl::producer<QString> id);
|
||||||
[[nodiscard]] rpl::producer<QString> CaptureDeviceNameValue(
|
[[nodiscard]] rpl::producer<QString> CaptureDeviceNameValue(
|
||||||
rpl::producer<QString> id);
|
rpl::producer<QString> id);
|
||||||
|
[[nodiscard]] rpl::producer<QString> CameraDeviceNameValue(
|
||||||
|
rpl::producer<QString> id);
|
||||||
[[nodiscard]] object_ptr<Ui::GenericBox> ChoosePlaybackDeviceBox(
|
[[nodiscard]] object_ptr<Ui::GenericBox> ChoosePlaybackDeviceBox(
|
||||||
rpl::producer<QString> currentId,
|
rpl::producer<QString> currentId,
|
||||||
Fn<void(QString id)> chosen,
|
Fn<void(QString id)> chosen,
|
||||||
|
@ -79,6 +87,11 @@ inline constexpr auto kMicTestAnimationDuration = crl::time(200);
|
||||||
Fn<void(QString id)> chosen,
|
Fn<void(QString id)> chosen,
|
||||||
const style::Checkbox *st = nullptr,
|
const style::Checkbox *st = nullptr,
|
||||||
const style::Radio *radioSt = nullptr);
|
const style::Radio *radioSt = nullptr);
|
||||||
|
[[nodiscard]] object_ptr<Ui::GenericBox> ChooseCameraDeviceBox(
|
||||||
|
rpl::producer<QString> currentId,
|
||||||
|
Fn<void(QString id)> chosen,
|
||||||
|
const style::Checkbox *st = nullptr,
|
||||||
|
const style::Radio *radioSt = nullptr);
|
||||||
|
|
||||||
} // namespace Settings
|
} // namespace Settings
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "media/streaming/media_streaming_player.h"
|
#include "media/streaming/media_streaming_player.h"
|
||||||
#include "media/streaming/media_streaming_document.h"
|
#include "media/streaming/media_streaming_document.h"
|
||||||
#include "settings/settings_calls.h" // Calls::AddCameraSubsection.
|
#include "settings/settings_calls.h" // Calls::AddCameraSubsection.
|
||||||
#include "webrtc/webrtc_media_devices.h" // Webrtc::GetVideoInputList.
|
#include "webrtc/webrtc_environment.h"
|
||||||
#include "webrtc/webrtc_video_track.h"
|
#include "webrtc/webrtc_video_track.h"
|
||||||
#include "ui/widgets/popup_menu.h"
|
#include "ui/widgets/popup_menu.h"
|
||||||
#include "window/window_controller.h"
|
#include "window/window_controller.h"
|
||||||
|
@ -53,7 +53,8 @@ namespace {
|
||||||
|
|
||||||
[[nodiscard]] bool IsCameraAvailable() {
|
[[nodiscard]] bool IsCameraAvailable() {
|
||||||
return (Core::App().calls().currentCall() == nullptr)
|
return (Core::App().calls().currentCall() == nullptr)
|
||||||
&& !Webrtc::GetVideoInputList().empty();
|
&& !Core::App().mediaDevices().defaultId(
|
||||||
|
Webrtc::DeviceType::Camera).isEmpty();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CameraBox(
|
void CameraBox(
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit b78e51ad98cd5bf70e916becae0b13496b9f6aca
|
Subproject commit 222ecc82441dd9d80cbd642bb9d89a59caa12944
|
Loading…
Add table
Reference in a new issue