mirror of
https://github.com/AyuGram/AyuGramDesktop.git
synced 2025-04-13 04:37:11 +02:00
Improve things for OpenAL devices management.
This commit is contained in:
parent
0945e04f6b
commit
2f40a44b5c
13 changed files with 140 additions and 92 deletions
|
@ -430,20 +430,20 @@ void Call::setMuted(bool mute) {
|
|||
void Call::setupMediaDevices() {
|
||||
_playbackDeviceId.changes() | rpl::filter([=] {
|
||||
return _instance && _setDeviceIdCallback;
|
||||
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||
_setDeviceIdCallback(
|
||||
Webrtc::DeviceType::Playback,
|
||||
deviceId);
|
||||
_instance->setAudioOutputDevice(deviceId.toStdString());
|
||||
}) | rpl::start_with_next([=](const Webrtc::DeviceResolvedId &deviceId) {
|
||||
_setDeviceIdCallback(deviceId);
|
||||
|
||||
// Value doesn't matter here, just trigger reading of the new value.
|
||||
_instance->setAudioOutputDevice(deviceId.value.toStdString());
|
||||
}, _lifetime);
|
||||
|
||||
_captureDeviceId.changes() | rpl::filter([=] {
|
||||
return _instance && _setDeviceIdCallback;
|
||||
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||
_setDeviceIdCallback(
|
||||
Webrtc::DeviceType::Capture,
|
||||
deviceId);
|
||||
_instance->setAudioInputDevice(deviceId.toStdString());
|
||||
}) | rpl::start_with_next([=](const Webrtc::DeviceResolvedId &deviceId) {
|
||||
_setDeviceIdCallback(deviceId);
|
||||
|
||||
// Value doesn't matter here, just trigger reading of the new value.
|
||||
_instance->setAudioInputDevice(deviceId.value.toStdString());
|
||||
}, _lifetime);
|
||||
}
|
||||
|
||||
|
@ -497,10 +497,11 @@ void Call::setupOutgoingVideo() {
|
|||
_cameraDeviceId.changes(
|
||||
) | rpl::filter([=] {
|
||||
return !_videoCaptureIsScreencast;
|
||||
}) | rpl::start_with_next([=](QString deviceId) {
|
||||
_videoCaptureDeviceId = deviceId;
|
||||
}) | rpl::start_with_next([=](Webrtc::DeviceResolvedId deviceId) {
|
||||
const auto &id = deviceId.value;
|
||||
_videoCaptureDeviceId = id;
|
||||
if (_videoCapture) {
|
||||
_videoCapture->switchToDevice(deviceId.toStdString(), false);
|
||||
_videoCapture->switchToDevice(id.toStdString(), false);
|
||||
if (_instance) {
|
||||
_instance->sendVideoDeviceUpdated();
|
||||
}
|
||||
|
@ -904,24 +905,25 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
|
|||
const auto playbackDeviceIdInitial = _playbackDeviceId.current();
|
||||
const auto captureDeviceIdInitial = _captureDeviceId.current();
|
||||
const auto saveSetDeviceIdCallback = [=](
|
||||
Fn<void(Webrtc::DeviceType, QString)> setDeviceIdCallback) {
|
||||
setDeviceIdCallback(
|
||||
Webrtc::DeviceType::Playback,
|
||||
playbackDeviceIdInitial);
|
||||
setDeviceIdCallback(
|
||||
Webrtc::DeviceType::Capture,
|
||||
captureDeviceIdInitial);
|
||||
Fn<void(Webrtc::DeviceResolvedId)> setDeviceIdCallback) {
|
||||
setDeviceIdCallback(playbackDeviceIdInitial);
|
||||
setDeviceIdCallback(captureDeviceIdInitial);
|
||||
crl::on_main(weak, [=] {
|
||||
_setDeviceIdCallback = std::move(setDeviceIdCallback);
|
||||
const auto playback = _playbackDeviceId.current();
|
||||
if (_instance && playback != playbackDeviceIdInitial) {
|
||||
_setDeviceIdCallback(Webrtc::DeviceType::Playback, playback);
|
||||
_instance->setAudioOutputDevice(playback.toStdString());
|
||||
_setDeviceIdCallback(playback);
|
||||
|
||||
// Value doesn't matter here, just trigger reading of the...
|
||||
_instance->setAudioOutputDevice(
|
||||
playback.value.toStdString());
|
||||
}
|
||||
const auto capture = _captureDeviceId.current();
|
||||
if (_instance && capture != captureDeviceIdInitial) {
|
||||
_setDeviceIdCallback(Webrtc::DeviceType::Capture, capture);
|
||||
_instance->setAudioInputDevice(capture.toStdString());
|
||||
_setDeviceIdCallback(capture);
|
||||
|
||||
// Value doesn't matter here, just trigger reading of the...
|
||||
_instance->setAudioInputDevice(capture.value.toStdString());
|
||||
}
|
||||
});
|
||||
};
|
||||
|
@ -944,8 +946,8 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
|
|||
std::move(encryptionKeyValue),
|
||||
(_type == Type::Outgoing)),
|
||||
.mediaDevicesConfig = tgcalls::MediaDevicesConfig{
|
||||
.audioInputId = captureDeviceIdInitial.toStdString(),
|
||||
.audioOutputId = playbackDeviceIdInitial.toStdString(),
|
||||
.audioInputId = captureDeviceIdInitial.value.toStdString(),
|
||||
.audioOutputId = playbackDeviceIdInitial.value.toStdString(),
|
||||
.inputVolume = 1.f,//settings.callInputVolume() / 100.f,
|
||||
.outputVolume = 1.f,//settings.callOutputVolume() / 100.f,
|
||||
},
|
||||
|
@ -1223,7 +1225,7 @@ void Call::toggleCameraSharing(bool enabled) {
|
|||
}
|
||||
_delegate->callRequestPermissionsOrFail(crl::guard(this, [=] {
|
||||
toggleScreenSharing(std::nullopt);
|
||||
_videoCaptureDeviceId = _cameraDeviceId.current();
|
||||
_videoCaptureDeviceId = _cameraDeviceId.current().value;
|
||||
if (_videoCapture) {
|
||||
_videoCapture->switchToDevice(
|
||||
_videoCaptureDeviceId.toStdString(),
|
||||
|
|
|
@ -12,7 +12,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
#include "base/bytes.h"
|
||||
#include "mtproto/sender.h"
|
||||
#include "mtproto/mtproto_auth_key.h"
|
||||
#include "webrtc/webrtc_device_id.h"
|
||||
#include "webrtc/webrtc_device_resolver.h"
|
||||
|
||||
namespace Media {
|
||||
namespace Audio {
|
||||
|
@ -271,10 +271,10 @@ private:
|
|||
base::DelayedCallTimer _finishByTimeoutTimer;
|
||||
base::Timer _discardByTimeoutTimer;
|
||||
|
||||
Fn<void(Webrtc::DeviceType, QString)> _setDeviceIdCallback;
|
||||
Webrtc::DeviceId _playbackDeviceId;
|
||||
Webrtc::DeviceId _captureDeviceId;
|
||||
Webrtc::DeviceId _cameraDeviceId;
|
||||
Fn<void(Webrtc::DeviceResolvedId)> _setDeviceIdCallback;
|
||||
Webrtc::DeviceResolver _playbackDeviceId;
|
||||
Webrtc::DeviceResolver _captureDeviceId;
|
||||
Webrtc::DeviceResolver _cameraDeviceId;
|
||||
|
||||
rpl::variable<bool> _muted = false;
|
||||
|
||||
|
|
|
@ -2066,22 +2066,26 @@ void GroupCall::applyOtherParticipantUpdate(
|
|||
void GroupCall::setupMediaDevices() {
|
||||
_playbackDeviceId.changes() | rpl::filter([=] {
|
||||
return _instance && _setDeviceIdCallback;
|
||||
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||
_setDeviceIdCallback(Webrtc::DeviceType::Playback, deviceId);
|
||||
_instance->setAudioOutputDevice(deviceId.toStdString());
|
||||
}) | rpl::start_with_next([=](const Webrtc::DeviceResolvedId &deviceId) {
|
||||
_setDeviceIdCallback(deviceId);
|
||||
|
||||
// Value doesn't matter here, just trigger reading of the new value.
|
||||
_instance->setAudioOutputDevice(deviceId.value.toStdString());
|
||||
}, _lifetime);
|
||||
|
||||
_captureDeviceId.changes() | rpl::filter([=] {
|
||||
return _instance && _setDeviceIdCallback;
|
||||
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||
_setDeviceIdCallback(Webrtc::DeviceType::Capture, deviceId);
|
||||
_instance->setAudioInputDevice(deviceId.toStdString());
|
||||
}) | rpl::start_with_next([=](const Webrtc::DeviceResolvedId &deviceId) {
|
||||
_setDeviceIdCallback(deviceId);
|
||||
|
||||
// Value doesn't matter here, just trigger reading of the new value.
|
||||
_instance->setAudioInputDevice(deviceId.value.toStdString());
|
||||
}, _lifetime);
|
||||
|
||||
_cameraDeviceId.changes() | rpl::filter([=] {
|
||||
return _cameraCapture != nullptr;
|
||||
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||
_cameraCapture->switchToDevice(deviceId.toStdString(), false);
|
||||
}) | rpl::start_with_next([=](const Webrtc::DeviceResolvedId &deviceId) {
|
||||
_cameraCapture->switchToDevice(deviceId.value.toStdString(), false);
|
||||
}, _lifetime);
|
||||
}
|
||||
|
||||
|
@ -2119,7 +2123,7 @@ bool GroupCall::emitShareCameraError() {
|
|||
return emitError(Error::DisabledNoCamera);
|
||||
} else if (mutedByAdmin()) {
|
||||
return emitError(Error::MutedNoCamera);
|
||||
} else if (_cameraDeviceId.current().isEmpty()) {
|
||||
} else if (_cameraDeviceId.current().value.isEmpty()) {
|
||||
return emitError(Error::NoCamera);
|
||||
}
|
||||
return false;
|
||||
|
@ -2128,7 +2132,7 @@ bool GroupCall::emitShareCameraError() {
|
|||
void GroupCall::emitShareCameraError(Error error) {
|
||||
_cameraState = Webrtc::VideoState::Inactive;
|
||||
if (error == Error::CameraFailed
|
||||
&& _cameraDeviceId.current().isEmpty()) {
|
||||
&& _cameraDeviceId.current().value.isEmpty()) {
|
||||
error = Error::NoCamera;
|
||||
}
|
||||
_errors.fire_copy(error);
|
||||
|
@ -2182,7 +2186,7 @@ void GroupCall::setupOutgoingVideo() {
|
|||
return;
|
||||
} else if (!_cameraCapture) {
|
||||
_cameraCapture = _delegate->groupCallGetVideoCapture(
|
||||
_cameraDeviceId.current());
|
||||
_cameraDeviceId.current().value);
|
||||
if (!_cameraCapture) {
|
||||
return emitShareCameraError(Error::CameraFailed);
|
||||
}
|
||||
|
@ -2194,7 +2198,7 @@ void GroupCall::setupOutgoingVideo() {
|
|||
});
|
||||
} else {
|
||||
_cameraCapture->switchToDevice(
|
||||
_cameraDeviceId.current().toStdString(),
|
||||
_cameraDeviceId.current().value.toStdString(),
|
||||
false);
|
||||
}
|
||||
if (_instance) {
|
||||
|
@ -2343,24 +2347,25 @@ bool GroupCall::tryCreateController() {
|
|||
const auto playbackDeviceIdInitial = _playbackDeviceId.current();
|
||||
const auto captureDeviceIdInitial = _captureDeviceId.current();
|
||||
const auto saveSetDeviceIdCallback = [=](
|
||||
Fn<void(Webrtc::DeviceType, QString)> setDeviceIdCallback) {
|
||||
setDeviceIdCallback(
|
||||
Webrtc::DeviceType::Playback,
|
||||
playbackDeviceIdInitial);
|
||||
setDeviceIdCallback(
|
||||
Webrtc::DeviceType::Capture,
|
||||
captureDeviceIdInitial);
|
||||
Fn<void(Webrtc::DeviceResolvedId)> setDeviceIdCallback) {
|
||||
setDeviceIdCallback(playbackDeviceIdInitial);
|
||||
setDeviceIdCallback(captureDeviceIdInitial);
|
||||
crl::on_main(weak, [=] {
|
||||
_setDeviceIdCallback = std::move(setDeviceIdCallback);
|
||||
const auto playback = _playbackDeviceId.current();
|
||||
if (_instance && playback != playbackDeviceIdInitial) {
|
||||
_setDeviceIdCallback(Webrtc::DeviceType::Playback, playback);
|
||||
_instance->setAudioOutputDevice(playback.toStdString());
|
||||
_setDeviceIdCallback(playback);
|
||||
|
||||
// Value doesn't matter here, just trigger reading of the...
|
||||
_instance->setAudioOutputDevice(
|
||||
playback.value.toStdString());
|
||||
}
|
||||
const auto capture = _captureDeviceId.current();
|
||||
if (_instance && capture != captureDeviceIdInitial) {
|
||||
_setDeviceIdCallback(Webrtc::DeviceType::Capture, capture);
|
||||
_instance->setAudioInputDevice(capture.toStdString());
|
||||
_setDeviceIdCallback(capture);
|
||||
|
||||
// Value doesn't matter here, just trigger reading of the...
|
||||
_instance->setAudioInputDevice(capture.value.toStdString());
|
||||
}
|
||||
});
|
||||
};
|
||||
|
@ -2387,8 +2392,8 @@ bool GroupCall::tryCreateController() {
|
|||
}
|
||||
crl::on_main(weak, [=] { audioLevelsUpdated(data); });
|
||||
},
|
||||
.initialInputDeviceId = captureDeviceIdInitial.toStdString(),
|
||||
.initialOutputDeviceId = playbackDeviceIdInitial.toStdString(),
|
||||
.initialInputDeviceId = captureDeviceIdInitial.value.toStdString(),
|
||||
.initialOutputDeviceId = playbackDeviceIdInitial.value.toStdString(),
|
||||
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
|
||||
saveSetDeviceIdCallback),
|
||||
.videoCapture = _cameraCapture,
|
||||
|
|
|
@ -12,7 +12,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
#include "base/bytes.h"
|
||||
#include "mtproto/sender.h"
|
||||
#include "mtproto/mtproto_auth_key.h"
|
||||
#include "webrtc/webrtc_device_id.h"
|
||||
#include "webrtc/webrtc_device_resolver.h"
|
||||
|
||||
class History;
|
||||
|
||||
|
@ -667,10 +667,10 @@ private:
|
|||
|
||||
crl::time _lastSendProgressUpdate = 0;
|
||||
|
||||
Fn<void(Webrtc::DeviceType, QString)> _setDeviceIdCallback;
|
||||
Webrtc::DeviceId _playbackDeviceId;
|
||||
Webrtc::DeviceId _captureDeviceId;
|
||||
Webrtc::DeviceId _cameraDeviceId;
|
||||
Fn<void(Webrtc::DeviceResolvedId)> _setDeviceIdCallback;
|
||||
Webrtc::DeviceResolver _playbackDeviceId;
|
||||
Webrtc::DeviceResolver _captureDeviceId;
|
||||
Webrtc::DeviceResolver _cameraDeviceId;
|
||||
|
||||
std::shared_ptr<GlobalShortcutManager> _shortcutManager;
|
||||
std::shared_ptr<GlobalShortcutValue> _pushToTalk;
|
||||
|
|
|
@ -42,6 +42,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
#include "core/application.h"
|
||||
#include "core/core_settings.h"
|
||||
#include "webrtc/webrtc_audio_input_tester.h"
|
||||
#include "webrtc/webrtc_device_resolver.h"
|
||||
#include "settings/settings_calls.h"
|
||||
#include "main/main_session.h"
|
||||
#include "apiwrap.h"
|
||||
|
@ -249,7 +250,7 @@ void SettingsBox(
|
|||
const auto weakBox = Ui::MakeWeak(box);
|
||||
|
||||
struct State {
|
||||
std::unique_ptr<Webrtc::DeviceId> computedDeviceId;
|
||||
std::unique_ptr<Webrtc::DeviceResolver> deviceId;
|
||||
std::unique_ptr<Webrtc::AudioInputTester> micTester;
|
||||
Ui::LevelMeter *micTestLevel = nullptr;
|
||||
float micLevel = 0.;
|
||||
|
@ -770,14 +771,14 @@ void SettingsBox(
|
|||
box->setShowFinishedCallback([=] {
|
||||
// Means we finished showing the box.
|
||||
crl::on_main(box, [=] {
|
||||
state->computedDeviceId = std::make_unique<Webrtc::DeviceId>(
|
||||
state->deviceId = std::make_unique<Webrtc::DeviceResolver>(
|
||||
&Core::App().mediaDevices(),
|
||||
Webrtc::DeviceType::Capture,
|
||||
Webrtc::DeviceIdValueWithFallback(
|
||||
Core::App().settings().callCaptureDeviceIdValue(),
|
||||
Core::App().settings().captureDeviceIdValue()));
|
||||
state->micTester = std::make_unique<Webrtc::AudioInputTester>(
|
||||
state->computedDeviceId->value());
|
||||
state->deviceId->value());
|
||||
state->levelUpdateTimer.callEach(kMicTestUpdateInterval);
|
||||
});
|
||||
});
|
||||
|
@ -884,11 +885,13 @@ std::pair<Fn<void()>, rpl::lifetime> ShareInviteLinkAction(
|
|||
MicLevelTester::MicLevelTester(Fn<void()> show)
|
||||
: _show(std::move(show))
|
||||
, _timer([=] { check(); })
|
||||
, _tester(
|
||||
std::make_unique<Webrtc::AudioInputTester>(
|
||||
Webrtc::DeviceIdValueWithFallback(
|
||||
Core::App().settings().callCaptureDeviceIdValue(),
|
||||
Core::App().settings().captureDeviceIdValue()))) {
|
||||
, _deviceId(std::make_unique<Webrtc::DeviceResolver>(
|
||||
&Core::App().mediaDevices(),
|
||||
Webrtc::DeviceType::Capture,
|
||||
Webrtc::DeviceIdValueWithFallback(
|
||||
Core::App().settings().callCaptureDeviceIdValue(),
|
||||
Core::App().settings().captureDeviceIdValue())))
|
||||
, _tester(std::make_unique<Webrtc::AudioInputTester>(_deviceId->value())) {
|
||||
_timer.callEach(kMicrophoneTooltipCheckInterval);
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
|
||||
namespace Webrtc {
|
||||
class AudioInputTester;
|
||||
class DeviceResolver;
|
||||
} // namespace Webrtc
|
||||
|
||||
namespace Calls {
|
||||
|
@ -38,6 +39,7 @@ private:
|
|||
|
||||
Fn<void()> _show;
|
||||
base::Timer _timer;
|
||||
std::unique_ptr<Webrtc::DeviceResolver> _deviceId;
|
||||
std::unique_ptr<Webrtc::AudioInputTester> _tester;
|
||||
int _loudCount = 0;
|
||||
int _quietCount = 0;
|
||||
|
|
|
@ -39,6 +39,9 @@ constexpr auto kWaveformCounterBufferSize = 256 * 1024;
|
|||
QMutex AudioMutex;
|
||||
ALCdevice *AudioDevice = nullptr;
|
||||
ALCcontext *AudioContext = nullptr;
|
||||
Webrtc::DeviceResolvedId AudioDeviceLastUsedId{
|
||||
.type = Webrtc::DeviceType::Playback
|
||||
};
|
||||
|
||||
auto VolumeMultiplierAll = 1.;
|
||||
auto VolumeMultiplierSong = 1.;
|
||||
|
@ -89,8 +92,12 @@ void DestroyPlaybackDevice() {
|
|||
bool CreatePlaybackDevice() {
|
||||
if (AudioDevice) return true;
|
||||
|
||||
const auto id = Current().playbackDeviceId().toStdString();
|
||||
AudioDevice = alcOpenDevice(id.c_str());
|
||||
AudioDeviceLastUsedId = Current().playbackDeviceId();
|
||||
|
||||
const auto id = AudioDeviceLastUsedId.isDefault()
|
||||
? std::string()
|
||||
: AudioDeviceLastUsedId.value.toStdString();
|
||||
AudioDevice = alcOpenDevice(id.empty() ? nullptr : id.c_str());
|
||||
if (!AudioDevice) {
|
||||
LOG(("Audio Error: Could not create default playback device, refreshing.."));
|
||||
crl::on_main([] {
|
||||
|
@ -1380,6 +1387,20 @@ void DetachFromDevice(not_null<Audio::Instance*> instance) {
|
|||
}
|
||||
}
|
||||
|
||||
bool DetachIfDeviceChanged(
|
||||
not_null<Audio::Instance*> instance,
|
||||
const Webrtc::DeviceResolvedId &nowDeviceId) {
|
||||
QMutexLocker lock(&AudioMutex);
|
||||
if (AudioDeviceLastUsedId == nowDeviceId) {
|
||||
return false;
|
||||
}
|
||||
Audio::ClosePlaybackDevice(instance);
|
||||
if (mixer()) {
|
||||
mixer()->reattachIfNeeded();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace Player
|
||||
|
|
|
@ -30,6 +30,10 @@ struct TimePoint;
|
|||
} // namespace Streaming
|
||||
} // namespace Media
|
||||
|
||||
namespace Webrtc {
|
||||
struct DeviceResolvedId;
|
||||
} // namespace Webrtc
|
||||
|
||||
namespace Media {
|
||||
namespace Audio {
|
||||
|
||||
|
@ -378,6 +382,9 @@ bool CheckAudioDeviceConnected();
|
|||
|
||||
// Thread: Main. Locks: AudioMutex.
|
||||
void DetachFromDevice(not_null<Audio::Instance*> instance);
|
||||
bool DetachIfDeviceChanged(
|
||||
not_null<Audio::Instance*> instance,
|
||||
const Webrtc::DeviceResolvedId &nowDeviceId);
|
||||
|
||||
// Thread: Any.
|
||||
QMutex *audioPlayerMutex();
|
||||
|
|
|
@ -85,7 +85,10 @@ public:
|
|||
Inner(QThread *thread);
|
||||
~Inner();
|
||||
|
||||
void start(QString id, Fn<void(Update)> updated, Fn<void()> error);
|
||||
void start(
|
||||
Webrtc::DeviceResolvedId id,
|
||||
Fn<void(Update)> updated,
|
||||
Fn<void()> error);
|
||||
void stop(Fn<void(Result&&)> callback = nullptr);
|
||||
void pause(bool value, Fn<void(Result&&)> callback);
|
||||
|
||||
|
@ -295,7 +298,7 @@ void Instance::Inner::fail() {
|
|||
}
|
||||
|
||||
void Instance::Inner::start(
|
||||
QString id,
|
||||
Webrtc::DeviceResolvedId id,
|
||||
Fn<void(Update)> updated,
|
||||
Fn<void()> error) {
|
||||
_updated = std::move(updated);
|
||||
|
@ -305,9 +308,9 @@ void Instance::Inner::start(
|
|||
}
|
||||
|
||||
// Start OpenAL Capture
|
||||
const auto utf = id.toStdString();
|
||||
const auto utf = id.isDefault() ? std::string() : id.value.toStdString();
|
||||
d->device = alcCaptureOpenDevice(
|
||||
utf.c_str(),
|
||||
utf.empty() ? nullptr : utf.c_str(),
|
||||
kCaptureFrequency,
|
||||
AL_FORMAT_MONO16,
|
||||
kCaptureFrequency / 5);
|
||||
|
|
|
@ -272,17 +272,19 @@ Instance::Instance()
|
|||
Player::internal::DetachFromDevice(this);
|
||||
});
|
||||
|
||||
_playbackDeviceId.changes() | rpl::start_with_next([=] {
|
||||
_detachFromDeviceForce = false;
|
||||
Player::internal::DetachFromDevice(this);
|
||||
_playbackDeviceId.changes(
|
||||
) | rpl::start_with_next([=](Webrtc::DeviceResolvedId id) {
|
||||
if (Player::internal::DetachIfDeviceChanged(this, id)) {
|
||||
_detachFromDeviceForce = false;
|
||||
}
|
||||
}, _lifetime);
|
||||
}
|
||||
|
||||
QString Instance::playbackDeviceId() const {
|
||||
return _playbackDeviceId.current();
|
||||
Webrtc::DeviceResolvedId Instance::playbackDeviceId() const {
|
||||
return _playbackDeviceId.threadSafeCurrent();
|
||||
}
|
||||
|
||||
QString Instance::captureDeviceId() const {
|
||||
Webrtc::DeviceResolvedId Instance::captureDeviceId() const {
|
||||
return _captureDeviceId.current();
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
|||
|
||||
#include "base/timer.h"
|
||||
#include "base/bytes.h"
|
||||
#include "webrtc/webrtc_device_id.h"
|
||||
#include "webrtc/webrtc_device_resolver.h"
|
||||
|
||||
namespace Core {
|
||||
class FileLocation;
|
||||
|
@ -95,8 +95,11 @@ public:
|
|||
// Thread: Main.
|
||||
Instance();
|
||||
|
||||
[[nodiscard]] QString playbackDeviceId() const;
|
||||
[[nodiscard]] QString captureDeviceId() const;
|
||||
// Thread: Any. Must be locked: AudioMutex.
|
||||
[[nodiscard]] Webrtc::DeviceResolvedId playbackDeviceId() const;
|
||||
|
||||
// Thread: Main.
|
||||
[[nodiscard]] Webrtc::DeviceResolvedId captureDeviceId() const;
|
||||
|
||||
[[nodiscard]] std::unique_ptr<Track> createTrack();
|
||||
|
||||
|
@ -119,8 +122,8 @@ private:
|
|||
|
||||
private:
|
||||
std::set<Track*> _tracks;
|
||||
Webrtc::DeviceId _playbackDeviceId;
|
||||
Webrtc::DeviceId _captureDeviceId;
|
||||
Webrtc::DeviceResolver _playbackDeviceId;
|
||||
Webrtc::DeviceResolver _captureDeviceId;
|
||||
|
||||
base::Timer _updateTimer;
|
||||
|
||||
|
|
|
@ -365,7 +365,7 @@ void Calls::initCaptureButton(
|
|||
});
|
||||
|
||||
struct LevelState {
|
||||
std::unique_ptr<Webrtc::DeviceId> computedDeviceId;
|
||||
std::unique_ptr<Webrtc::DeviceResolver> deviceId;
|
||||
std::unique_ptr<Webrtc::AudioInputTester> tester;
|
||||
base::Timer timer;
|
||||
Ui::Animations::Simple animation;
|
||||
|
@ -388,18 +388,18 @@ void Calls::initCaptureButton(
|
|||
});
|
||||
_testingMicrophone.value() | rpl::start_with_next([=](bool testing) {
|
||||
if (testing) {
|
||||
state->computedDeviceId = std::make_unique<Webrtc::DeviceId>(
|
||||
state->deviceId = std::make_unique<Webrtc::DeviceResolver>(
|
||||
&Core::App().mediaDevices(),
|
||||
Webrtc::DeviceType::Capture,
|
||||
rpl::duplicate(resolvedId));
|
||||
state->tester = std::make_unique<AudioInputTester>(
|
||||
state->computedDeviceId->value());
|
||||
state->deviceId->value());
|
||||
state->timer.callEach(kMicTestUpdateInterval);
|
||||
} else {
|
||||
state->timer.cancel();
|
||||
state->animation.stop();
|
||||
state->tester = nullptr;
|
||||
state->computedDeviceId = nullptr;
|
||||
state->deviceId = nullptr;
|
||||
}
|
||||
}, level->lifetime());
|
||||
}
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 16b8f6ee0a1b4a1852266f1b3fc727f6a82c3716
|
||||
Subproject commit 72b1aa0405e14beef0b596c9bc748eb8905a7ef8
|
Loading…
Add table
Reference in a new issue