Implement media devices tracking on Windows.

This commit is contained in:
John Preston 2024-01-22 20:40:55 +04:00
parent 30e694420a
commit d3778f92d2
18 changed files with 551 additions and 354 deletions

View file

@ -215,6 +215,22 @@ Call::Call(
, _api(&_user->session().mtp())
, _type(type)
, _discardByTimeoutTimer([=] { hangup(); })
, _playbackDeviceId(
&Core::App().mediaDevices(),
Webrtc::DeviceType::Playback,
Webrtc::DeviceIdValueWithFallback(
Core::App().settings().callPlaybackDeviceIdValue(),
Core::App().settings().playbackDeviceIdValue()))
, _captureDeviceId(
&Core::App().mediaDevices(),
Webrtc::DeviceType::Capture,
Webrtc::DeviceIdValueWithFallback(
Core::App().settings().callCaptureDeviceIdValue(),
Core::App().settings().captureDeviceIdValue()))
, _cameraDeviceId(
&Core::App().mediaDevices(),
Webrtc::DeviceType::Camera,
Core::App().settings().cameraDeviceIdValue())
, _videoIncoming(
std::make_unique<Webrtc::VideoTrack>(
StartVideoState(video)))
@ -228,6 +244,7 @@ Call::Call(
_discardByTimeoutTimer.callOnce(config.callRingTimeoutMs);
startWaitingTrack();
}
setupMediaDevices();
setupOutgoingVideo();
}
@ -410,6 +427,20 @@ void Call::setMuted(bool mute) {
}
}
void Call::setupMediaDevices() {
_playbackDeviceId.changes() | rpl::filter([=] {
return _instance != nullptr;
}) | rpl::start_with_next([=](const QString &deviceId) {
_instance->setAudioOutputDevice(deviceId.toStdString());
}, _lifetime);
_captureDeviceId.changes() | rpl::filter([=] {
return _instance != nullptr;
}) | rpl::start_with_next([=](const QString &deviceId) {
_instance->setAudioInputDevice(deviceId.toStdString());
}, _lifetime);
}
void Call::setupOutgoingVideo() {
static const auto hasDevices = [] {
return !Webrtc::GetVideoInputList().empty();
@ -455,6 +486,19 @@ void Call::setupOutgoingVideo() {
}
}
}, _lifetime);
_cameraDeviceId.changes(
) | rpl::filter([=] {
return !_videoCaptureIsScreencast;
}) | rpl::start_with_next([=](QString deviceId) {
_videoCaptureDeviceId = deviceId;
if (_videoCapture) {
_videoCapture->switchToDevice(deviceId.toStdString(), false);
if (_instance) {
_instance->sendVideoDeviceUpdated();
}
}
}, _lifetime);
}
not_null<Webrtc::VideoTrack*> Call::videoIncoming() const {
@ -866,8 +910,8 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
std::move(encryptionKeyValue),
(_type == Type::Outgoing)),
.mediaDevicesConfig = tgcalls::MediaDevicesConfig{
.audioInputId = settings.callInputDeviceId().toStdString(),
.audioOutputId = settings.callOutputDeviceId().toStdString(),
.audioInputId = _captureDeviceId.current().toStdString(),
.audioOutputId = _playbackDeviceId.current().toStdString(),
.inputVolume = 1.f,//settings.callInputVolume() / 100.f,
.outputVolume = 1.f,//settings.callOutputVolume() / 100.f,
},
@ -1096,29 +1140,6 @@ void Call::setState(State state) {
}
}
void Call::setCurrentAudioDevice(bool input, const QString &deviceId) {
if (_instance) {
const auto id = deviceId.toStdString();
if (input) {
_instance->setAudioInputDevice(id);
} else {
_instance->setAudioOutputDevice(id);
}
}
}
void Call::setCurrentCameraDevice(const QString &deviceId) {
if (!_videoCaptureIsScreencast) {
_videoCaptureDeviceId = deviceId;
if (_videoCapture) {
_videoCapture->switchToDevice(deviceId.toStdString(), false);
if (_instance) {
_instance->sendVideoDeviceUpdated();
}
}
}
}
//void Call::setAudioVolume(bool input, float level) {
// if (_instance) {
// if (input) {
@ -1168,10 +1189,11 @@ void Call::toggleCameraSharing(bool enabled) {
}
_delegate->callRequestPermissionsOrFail(crl::guard(this, [=] {
toggleScreenSharing(std::nullopt);
const auto deviceId = Core::App().settings().callVideoInputDeviceId();
_videoCaptureDeviceId = deviceId;
_videoCaptureDeviceId = _cameraDeviceId.current();
if (_videoCapture) {
_videoCapture->switchToDevice(deviceId.toStdString(), false);
_videoCapture->switchToDevice(
_videoCaptureDeviceId.toStdString(),
false);
if (_instance) {
_instance->sendVideoDeviceUpdated();
}

View file

@ -12,6 +12,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "base/bytes.h"
#include "mtproto/sender.h"
#include "mtproto/mtproto_auth_key.h"
#include "webrtc/webrtc_device_id.h"
namespace Media {
namespace Audio {
@ -190,11 +191,9 @@ public:
QString getDebugLog() const;
void setCurrentAudioDevice(bool input, const QString &deviceId);
//void setAudioVolume(bool input, float level);
void setAudioDuckingEnabled(bool enabled);
void setCurrentCameraDevice(const QString &deviceId);
[[nodiscard]] QString videoDeviceId() const {
return _videoCaptureDeviceId;
}
@ -250,6 +249,7 @@ private:
void setSignalBarCount(int count);
void destroyController();
void setupMediaDevices();
void setupOutgoingVideo();
void updateRemoteMediaState(
tgcalls::AudioState audio,
@ -271,6 +271,10 @@ private:
base::DelayedCallTimer _finishByTimeoutTimer;
base::Timer _discardByTimeoutTimer;
Webrtc::DeviceId _playbackDeviceId;
Webrtc::DeviceId _captureDeviceId;
Webrtc::DeviceId _cameraDeviceId;
rpl::variable<bool> _muted = false;
DhConfig _dhConfig;

View file

@ -522,20 +522,6 @@ void Instance::showInfoPanel(not_null<GroupCall*> call) {
}
}
void Instance::setCurrentAudioDevice(bool input, const QString &deviceId) {
if (input) {
Core::App().settings().setCallInputDeviceId(deviceId);
} else {
Core::App().settings().setCallOutputDeviceId(deviceId);
}
Core::App().saveSettingsDelayed();
if (const auto call = currentCall()) {
call->setCurrentAudioDevice(input, deviceId);
} else if (const auto group = currentGroupCall()) {
group->setCurrentAudioDevice(input, deviceId);
}
}
FnMut<void()> Instance::addAsyncWaiter() {
auto semaphore = std::make_unique<crl::semaphore>();
const auto raw = semaphore.get();
@ -846,7 +832,7 @@ std::shared_ptr<tgcalls::VideoCaptureInterface> Instance::getVideoCapture(
if (deviceId) {
result->switchToDevice(
(deviceId->isEmpty()
? Core::App().settings().callVideoInputDeviceId()
? Core::App().settings().cameraDeviceId()
: *deviceId).toStdString(),
isScreenCapture);
}
@ -854,7 +840,7 @@ std::shared_ptr<tgcalls::VideoCaptureInterface> Instance::getVideoCapture(
}
const auto startDeviceId = (deviceId && !deviceId->isEmpty())
? *deviceId
: Core::App().settings().callVideoInputDeviceId();
: Core::App().settings().cameraDeviceId();
auto result = std::shared_ptr<tgcalls::VideoCaptureInterface>(
tgcalls::VideoCaptureInterface::Create(
tgcalls::StaticThreads::getThreads(),

View file

@ -103,8 +103,6 @@ public:
-> std::shared_ptr<tgcalls::VideoCaptureInterface>;
void requestPermissionsOrFail(Fn<void()> onSuccess, bool video = true);
void setCurrentAudioDevice(bool input, const QString &deviceId);
[[nodiscard]] FnMut<void()> addAsyncWaiter();
[[nodiscard]] bool isSharingScreen() const;

View file

@ -29,7 +29,6 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "base/global_shortcuts.h"
#include "base/random.h"
#include "webrtc/webrtc_video_track.h"
#include "webrtc/webrtc_media_devices.h"
#include "webrtc/webrtc_create_adm.h"
#include <tgcalls/group/GroupInstanceCustomImpl.h>
@ -52,14 +51,6 @@ constexpr auto kFixSpeakingLargeVideoDuration = 3 * crl::time(1000);
constexpr auto kFullAsMediumsCount = 4; // 1 Full is like 4 Mediums.
constexpr auto kMaxMediumQualities = 16; // 4 Fulls or 16 Mediums.
[[nodiscard]] std::unique_ptr<Webrtc::MediaDevices> CreateMediaDevices() {
const auto &settings = Core::App().settings();
return Webrtc::CreateMediaDevices(
settings.callInputDeviceId(),
settings.callOutputDeviceId(),
settings.callVideoInputDeviceId());
}
[[nodiscard]] const Data::GroupCallParticipant *LookupParticipant(
not_null<PeerData*> peer,
CallId id,
@ -590,12 +581,27 @@ GroupCall::GroupCall(
, _scheduleDate(info.scheduleDate)
, _lastSpokeCheckTimer([=] { checkLastSpoke(); })
, _checkJoinedTimer([=] { checkJoined(); })
, _playbackDeviceId(
&Core::App().mediaDevices(),
Webrtc::DeviceType::Playback,
Webrtc::DeviceIdValueWithFallback(
Core::App().settings().callPlaybackDeviceIdValue(),
Core::App().settings().playbackDeviceIdValue()))
, _captureDeviceId(
&Core::App().mediaDevices(),
Webrtc::DeviceType::Capture,
Webrtc::DeviceIdValueWithFallback(
Core::App().settings().callCaptureDeviceIdValue(),
Core::App().settings().captureDeviceIdValue()))
, _cameraDeviceId(
&Core::App().mediaDevices(),
Webrtc::DeviceType::Camera,
Webrtc::DeviceIdOrDefault(Core::App().settings().cameraDeviceIdValue()))
, _pushToTalkCancelTimer([=] { pushToTalkCancel(); })
, _connectingSoundTimer([=] { playConnectingSoundOnce(); })
, _listenersHidden(info.rtmp)
, _rtmp(info.rtmp)
, _rtmpVolume(Group::kDefaultVolume)
, _mediaDevices(CreateMediaDevices()) {
, _rtmpVolume(Group::kDefaultVolume) {
_muted.value(
) | rpl::combine_previous(
) | rpl::start_with_next([=](MuteState previous, MuteState state) {
@ -2058,28 +2064,22 @@ void GroupCall::applyOtherParticipantUpdate(
}
void GroupCall::setupMediaDevices() {
_mediaDevices->audioInputId(
) | rpl::start_with_next([=](QString id) {
_audioInputId = id;
if (_instance) {
_instance->setAudioInputDevice(id.toStdString());
}
_playbackDeviceId.changes() | rpl::filter([=] {
return _instance != nullptr;
}) | rpl::start_with_next([=](const QString &deviceId) {
_instance->setAudioOutputDevice(deviceId.toStdString());
}, _lifetime);
_mediaDevices->audioOutputId(
) | rpl::start_with_next([=](QString id) {
_audioOutputId = id;
if (_instance) {
_instance->setAudioOutputDevice(id.toStdString());
}
_captureDeviceId.changes() | rpl::filter([=] {
return _instance != nullptr;
}) | rpl::start_with_next([=](const QString &deviceId) {
_instance->setAudioInputDevice(deviceId.toStdString());
}, _lifetime);
_mediaDevices->videoInputId(
) | rpl::start_with_next([=](QString id) {
_cameraInputId = id;
if (_cameraCapture) {
_cameraCapture->switchToDevice(id.toStdString(), false);
}
_cameraDeviceId.changes() | rpl::filter([=] {
return _cameraCapture != nullptr;
}) | rpl::start_with_next([=](const QString &deviceId) {
_cameraCapture->switchToDevice(deviceId.toStdString(), false);
}, _lifetime);
}
@ -2117,7 +2117,7 @@ bool GroupCall::emitShareCameraError() {
return emitError(Error::DisabledNoCamera);
} else if (mutedByAdmin()) {
return emitError(Error::MutedNoCamera);
} else if (Webrtc::GetVideoInputList().empty()) {
} else if (_cameraDeviceId.current().isEmpty()) {
return emitError(Error::NoCamera);
}
return false;
@ -2126,7 +2126,7 @@ bool GroupCall::emitShareCameraError() {
void GroupCall::emitShareCameraError(Error error) {
_cameraState = Webrtc::VideoState::Inactive;
if (error == Error::CameraFailed
&& Webrtc::GetVideoInputList().empty()) {
&& _cameraDeviceId.current().isEmpty()) {
error = Error::NoCamera;
}
_errors.fire_copy(error);
@ -2180,7 +2180,7 @@ void GroupCall::setupOutgoingVideo() {
return;
} else if (!_cameraCapture) {
_cameraCapture = _delegate->groupCallGetVideoCapture(
_cameraInputId);
_cameraDeviceId.current());
if (!_cameraCapture) {
return emitShareCameraError(Error::CameraFailed);
}
@ -2192,7 +2192,7 @@ void GroupCall::setupOutgoingVideo() {
});
} else {
_cameraCapture->switchToDevice(
_cameraInputId.toStdString(),
_cameraDeviceId.current().toStdString(),
false);
}
if (_instance) {
@ -2360,8 +2360,8 @@ bool GroupCall::tryCreateController() {
}
crl::on_main(weak, [=] { audioLevelsUpdated(data); });
},
.initialInputDeviceId = _audioInputId.toStdString(),
.initialOutputDeviceId = _audioOutputId.toStdString(),
.initialInputDeviceId = _captureDeviceId.current().toStdString(),
.initialOutputDeviceId = _playbackDeviceId.current().toStdString(),
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
settings.callAudioBackend()),
.videoCapture = _cameraCapture,
@ -3290,14 +3290,6 @@ void GroupCall::requestVideoQuality(
updateRequestedVideoChannelsDelayed();
}
void GroupCall::setCurrentAudioDevice(bool input, const QString &deviceId) {
if (input) {
_mediaDevices->switchToAudioInput(deviceId);
} else {
_mediaDevices->switchToAudioOutput(deviceId);
}
}
void GroupCall::toggleMute(const Group::MuteRequest &data) {
if (_rtmp) {
_rtmpVolume = data.mute ? 0 : Group::kDefaultVolume;

View file

@ -12,6 +12,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "base/bytes.h"
#include "mtproto/sender.h"
#include "mtproto/mtproto_auth_key.h"
#include "webrtc/webrtc_device_id.h"
class History;
@ -381,7 +382,6 @@ public:
return _videoIsWorking.value();
}
void setCurrentAudioDevice(bool input, const QString &deviceId);
[[nodiscard]] bool isSharingScreen() const;
[[nodiscard]] rpl::producer<bool> isSharingScreenValue() const;
[[nodiscard]] bool isScreenPaused() const;
@ -667,6 +667,10 @@ private:
crl::time _lastSendProgressUpdate = 0;
Webrtc::DeviceId _playbackDeviceId;
Webrtc::DeviceId _captureDeviceId;
Webrtc::DeviceId _cameraDeviceId;
std::shared_ptr<GlobalShortcutManager> _shortcutManager;
std::shared_ptr<GlobalShortcutValue> _pushToTalk;
base::Timer _pushToTalkCancelTimer;
@ -677,11 +681,6 @@ private:
bool _reloadedStaleCall = false;
int _rtmpVolume = 0;
std::unique_ptr<Webrtc::MediaDevices> _mediaDevices;
QString _audioInputId;
QString _audioOutputId;
QString _cameraInputId;
rpl::lifetime _lifetime;
};

View file

@ -250,8 +250,6 @@ void SettingsBox(
const auto weakBox = Ui::MakeWeak(box);
struct State {
rpl::event_stream<QString> outputNameStream;
rpl::event_stream<QString> inputNameStream;
std::unique_ptr<Webrtc::AudioInputTester> micTester;
Ui::LevelMeter *micTestLevel = nullptr;
float micLevel = 0.;
@ -295,42 +293,46 @@ void SettingsBox(
Ui::AddSkip(layout);
}
auto playbackIdWithFallback = Webrtc::DeviceIdValueWithFallback(
Core::App().settings().callPlaybackDeviceIdValue(),
Core::App().settings().playbackDeviceIdValue());
AddButtonWithLabel(
layout,
tr::lng_group_call_speakers(),
rpl::single(
CurrentAudioOutputName()
) | rpl::then(
state->outputNameStream.events()
),
PlaybackDeviceNameValue(rpl::duplicate(playbackIdWithFallback)),
st::groupCallSettingsButton
)->addClickHandler([=] {
box->getDelegate()->show(ChooseAudioOutputBox(crl::guard(box, [=](
const QString &id,
const QString &name) {
state->outputNameStream.fire_copy(name);
}), &st::groupCallCheckbox, &st::groupCallRadio));
box->getDelegate()->show(ChoosePlaybackDeviceBox(
rpl::duplicate(playbackIdWithFallback),
crl::guard(box, [=](const QString &id) {
Core::App().settings().setCallPlaybackDeviceId(id);
Core::App().saveSettingsDelayed();
}),
&st::groupCallCheckbox,
&st::groupCallRadio));
});
if (!rtmp) {
auto captureIdWithFallback = Webrtc::DeviceIdValueWithFallback(
Core::App().settings().callCaptureDeviceIdValue(),
Core::App().settings().captureDeviceIdValue());
AddButtonWithLabel(
layout,
tr::lng_group_call_microphone(),
rpl::single(
CurrentAudioInputName()
) | rpl::then(
state->inputNameStream.events()
),
CaptureDeviceNameValue(rpl::duplicate(captureIdWithFallback)),
st::groupCallSettingsButton
)->addClickHandler([=] {
box->getDelegate()->show(ChooseAudioInputBox(crl::guard(box, [=](
const QString &id,
const QString &name) {
state->inputNameStream.fire_copy(name);
if (state->micTester) {
state->micTester->setDeviceId(id);
}
}), &st::groupCallCheckbox, &st::groupCallRadio));
box->getDelegate()->show(ChooseCaptureDeviceBox(
rpl::duplicate(captureIdWithFallback),
crl::guard(box, [=](const QString &id) {
Core::App().settings().setCallCaptureDeviceId(id);
Core::App().saveSettingsDelayed();
if (state->micTester) {
state->micTester->setDeviceId(id);
}
}),
&st::groupCallCheckbox,
&st::groupCallRadio));
});
state->micTestLevel = box->addRow(
@ -773,7 +775,7 @@ void SettingsBox(
crl::on_main(box, [=] {
state->micTester = std::make_unique<Webrtc::AudioInputTester>(
Core::App().settings().callAudioBackend(),
Core::App().settings().callInputDeviceId());
Core::App().settings().callCaptureDeviceId());
state->levelUpdateTimer.callEach(kMicTestUpdateInterval);
});
});
@ -883,7 +885,7 @@ MicLevelTester::MicLevelTester(Fn<void()> show)
, _tester(
std::make_unique<Webrtc::AudioInputTester>(
Core::App().settings().callAudioBackend(),
Core::App().settings().callInputDeviceId())) {
Core::App().settings().callCaptureDeviceId())) {
_timer.callEach(kMicrophoneTooltipCheckInterval);
}

View file

@ -88,6 +88,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "storage/localstorage.h"
#include "payments/payments_checkout_process.h"
#include "export/export_manager.h"
#include "webrtc/webrtc_environment.h"
#include "window/window_session_controller.h"
#include "window/window_controller.h"
#include "boxes/abstract_box.h"
@ -150,6 +151,7 @@ Application::Application()
, _private(std::make_unique<Private>())
, _platformIntegration(Platform::Integration::Create())
, _batterySaving(std::make_unique<base::BatterySaving>())
, _mediaDevices(std::make_unique<Webrtc::Environment>())
, _databases(std::make_unique<Storage::Databases>())
, _animationsManager(std::make_unique<Ui::Animations::Manager>())
, _clearEmojiImageLoaderTimer([=] { clearEmojiSourceImages(); })

View file

@ -101,6 +101,10 @@ namespace Calls {
class Instance;
} // namespace Calls
namespace Webrtc {
class Environment;
} // namespace Webrtc
namespace Core {
struct LocalUrlHandler;
@ -238,6 +242,9 @@ public:
[[nodiscard]] Media::Audio::Instance &audio() {
return *_audio;
}
[[nodiscard]] Webrtc::Environment &mediaDevices() {
return *_mediaDevices;
}
// Langpack and emoji keywords.
[[nodiscard]] Lang::Instance &langpack() {
@ -383,6 +390,7 @@ private:
const std::unique_ptr<Private> _private;
const std::unique_ptr<Platform::Integration> _platformIntegration;
const std::unique_ptr<base::BatterySaving> _batterySaving;
const std::unique_ptr<Webrtc::Environment> _mediaDevices;
const std::unique_ptr<Storage::Databases> _databases;
const std::unique_ptr<Ui::Animations::Manager> _animationsManager;

View file

@ -17,6 +17,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "ui/gl/gl_detection.h"
#include "ui/widgets/fields/input_field.h"
#include "webrtc/webrtc_create_adm.h"
#include "webrtc/webrtc_device_common.h"
#include "window/section_widget.h"
namespace Core {
@ -159,8 +160,8 @@ QByteArray Settings::serialize() const {
+ Serialize::stringSize(_downloadPath.current())
+ Serialize::bytearraySize(_downloadPathBookmark)
+ sizeof(qint32) * 9
+ Serialize::stringSize(_callOutputDeviceId)
+ Serialize::stringSize(_callInputDeviceId)
+ Serialize::stringSize(_callPlaybackDeviceId.current())
+ Serialize::stringSize(_callCaptureDeviceId.current())
+ sizeof(qint32) * 5;
for (const auto &[key, value] : _soundOverrides) {
size += Serialize::stringSize(key) + Serialize::stringSize(value);
@ -170,7 +171,7 @@ QByteArray Settings::serialize() const {
+ sizeof(qint32)
+ (_dictionariesEnabled.current().size() * sizeof(quint64))
+ sizeof(qint32) * 12
+ Serialize::stringSize(_callVideoInputDeviceId)
+ Serialize::stringSize(_cameraDeviceId.current())
+ sizeof(qint32) * 2
+ Serialize::bytearraySize(_groupCallPushToTalkShortcut)
+ sizeof(qint64)
@ -194,7 +195,7 @@ QByteArray Settings::serialize() const {
+ (_accountsOrder.size() * sizeof(quint64))
+ sizeof(qint32) * 7
+ (skipLanguages.size() * sizeof(quint64))
+ sizeof(qint32)
+ sizeof(qint32) * 2
+ sizeof(quint64)
+ sizeof(qint32) * 3
+ Serialize::bytearraySize(mediaViewPosition)
@ -204,6 +205,9 @@ QByteArray Settings::serialize() const {
for (const auto &id : _recentEmojiSkip) {
size += Serialize::stringSize(id);
}
size += sizeof(qint32) * 2
+ Serialize::stringSize(_playbackDeviceId.current())
+ Serialize::stringSize(_captureDeviceId.current());
auto result = QByteArray();
result.reserve(size);
@ -228,8 +232,8 @@ QByteArray Settings::serialize() const {
<< qint32(_notificationsCount)
<< static_cast<qint32>(_notificationsCorner)
<< qint32(_autoLock)
<< _callOutputDeviceId
<< _callInputDeviceId
<< _callPlaybackDeviceId.current()
<< _callCaptureDeviceId.current()
<< qint32(_callOutputVolume)
<< qint32(_callInputVolume)
<< qint32(_callAudioDuckingEnabled ? 1 : 0)
@ -273,7 +277,7 @@ QByteArray Settings::serialize() const {
<< qint32(_notifyFromAll ? 1 : 0)
<< qint32(_nativeWindowFrame.current() ? 1 : 0)
<< qint32(_systemDarkModeEnabled.current() ? 1 : 0)
<< _callVideoInputDeviceId
<< _cameraDeviceId.current()
<< qint32(_ipRevealWarning ? 1 : 0)
<< qint32(_groupCallPushToTalk ? 1 : 0)
<< _groupCallPushToTalkShortcut
@ -327,9 +331,7 @@ QByteArray Settings::serialize() const {
}
stream
<< qint32(_rememberedDeleteMessageOnlyForYou ? 1 : 0);
stream
<< qint32(_rememberedDeleteMessageOnlyForYou ? 1 : 0)
<< qint32(_translateChatEnabled.current() ? 1 : 0)
<< quint64(QLocale::Language(_translateToRaw.current()))
<< qint32(_windowTitleContent.current().hideChatName ? 1 : 0)
@ -339,14 +341,18 @@ QByteArray Settings::serialize() const {
<< qint32(_ignoreBatterySaving.current() ? 1 : 0)
<< quint64(_macRoundIconDigest.value_or(0))
<< qint32(_storiesClickTooltipHidden.current() ? 1 : 0)
<< qint32(_recentEmojiSkip.size())
<< qint32(_ttlVoiceClickTooltipHidden.current() ? 1 : 0);
<< qint32(_recentEmojiSkip.size());
for (const auto &id : _recentEmojiSkip) {
stream << id;
}
stream
<< qint32(_trayIconMonochrome.current() ? 1 : 0);
<< qint32(_trayIconMonochrome.current() ? 1 : 0)
<< qint32(_ttlVoiceClickTooltipHidden.current() ? 1 : 0)
<< _playbackDeviceId.current()
<< _captureDeviceId.current();
}
Ensures(result.size() == size);
return result;
}
@ -375,9 +381,11 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
qint32 notificationsCount = _notificationsCount;
qint32 notificationsCorner = static_cast<qint32>(_notificationsCorner);
qint32 autoLock = _autoLock;
QString callOutputDeviceId = _callOutputDeviceId;
QString callInputDeviceId = _callInputDeviceId;
QString callVideoInputDeviceId = _callVideoInputDeviceId;
QString playbackDeviceId = _playbackDeviceId.current();
QString captureDeviceId = _captureDeviceId.current();
QString cameraDeviceId = _cameraDeviceId.current();
QString callPlaybackDeviceId = _callPlaybackDeviceId.current();
QString callCaptureDeviceId = _callCaptureDeviceId.current();
qint32 callOutputVolume = _callOutputVolume;
qint32 callInputVolume = _callInputVolume;
qint32 callAudioDuckingEnabled = _callAudioDuckingEnabled ? 1 : 0;
@ -475,8 +483,8 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
>> notificationsCount
>> notificationsCorner
>> autoLock
>> callOutputDeviceId
>> callInputDeviceId
>> callPlaybackDeviceId
>> callCaptureDeviceId
>> callOutputVolume
>> callInputVolume
>> callAudioDuckingEnabled
@ -539,7 +547,7 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
stream >> systemDarkModeEnabled;
}
if (!stream.atEnd()) {
stream >> callVideoInputDeviceId;
stream >> cameraDeviceId;
}
if (!stream.atEnd()) {
stream >> ipRevealWarning;
@ -666,7 +674,8 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
});
}
}
}
if (!stream.atEnd()) {
stream >> rememberedDeleteMessageOnlyForYou;
}
if (!stream.atEnd()) {
@ -714,6 +723,11 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
if (!stream.atEnd()) {
stream >> ttlVoiceClickTooltipHidden;
}
if (!stream.atEnd()) {
stream
>> playbackDeviceId
>> captureDeviceId;
}
if (stream.status() != QDataStream::Ok) {
LOG(("App Error: "
"Bad data for Core::Settings::constructFromSerialized()"));
@ -757,9 +771,12 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
_countUnreadMessages = (countUnreadMessages == 1);
_notifyAboutPinned = (notifyAboutPinned == 1);
_autoLock = autoLock;
_callOutputDeviceId = callOutputDeviceId;
_callInputDeviceId = callInputDeviceId;
_callVideoInputDeviceId = callVideoInputDeviceId;
_playbackDeviceId = playbackDeviceId;
_captureDeviceId = captureDeviceId;
const auto kOldDefault = u"default"_q;
_cameraDeviceId = cameraDeviceId;
_callPlaybackDeviceId = callPlaybackDeviceId;
_callCaptureDeviceId = callCaptureDeviceId;
_callOutputVolume = callOutputVolume;
_callInputVolume = callInputVolume;
_callAudioDuckingEnabled = (callAudioDuckingEnabled == 1);
@ -1216,9 +1233,11 @@ void Settings::resetOnLastLogout() {
_notifyAboutPinned = true;
//_autoLock = 3600;
//_callOutputDeviceId = u"default"_q;
//_callInputDeviceId = u"default"_q;
//_callVideoInputDeviceId = u"default"_q;
//_playbackDeviceId = QString();
//_captureDeviceId = QString();
//_cameraDeviceId = QString();
//_callPlaybackDeviceId = QString();
//_callCaptureDeviceId = QString();
//_callOutputVolume = 100;
//_callInputVolume = 100;
//_callAudioDuckingEnabled = true;

View file

@ -263,30 +263,68 @@ public:
void setAutoLock(int value) {
_autoLock = value;
}
[[nodiscard]] QString callOutputDeviceId() const {
return _callOutputDeviceId.isEmpty()
? u"default"_q
: _callOutputDeviceId;
[[nodiscard]] QString playbackDeviceId() const {
return _playbackDeviceId.current();
}
void setCallOutputDeviceId(const QString &value) {
_callOutputDeviceId = value;
[[nodiscard]] rpl::producer<QString> playbackDeviceIdChanges() const {
return _playbackDeviceId.changes();
}
[[nodiscard]] QString callInputDeviceId() const {
return _callInputDeviceId.isEmpty()
? u"default"_q
: _callInputDeviceId;
[[nodiscard]] rpl::producer<QString> playbackDeviceIdValue() const {
return _playbackDeviceId.value();
}
void setCallInputDeviceId(const QString &value) {
_callInputDeviceId = value;
void setPlaybackDeviceId(const QString &value) {
_playbackDeviceId = value;
}
[[nodiscard]] QString callVideoInputDeviceId() const {
return _callVideoInputDeviceId.isEmpty()
? u"default"_q
: _callVideoInputDeviceId;
[[nodiscard]] QString captureDeviceId() const {
return _captureDeviceId.current();
}
void setCallVideoInputDeviceId(const QString &value) {
_callVideoInputDeviceId = value;
[[nodiscard]] rpl::producer<QString> captureDeviceIdChanges() const {
return _captureDeviceId.changes();
}
[[nodiscard]] rpl::producer<QString> captureDeviceIdValue() const {
return _captureDeviceId.value();
}
void setCaptureDeviceId(const QString &value) {
_captureDeviceId = value;
}
[[nodiscard]] QString cameraDeviceId() const {
return _cameraDeviceId.current();
}
[[nodiscard]] rpl::producer<QString> cameraDeviceIdChanges() const {
return _cameraDeviceId.changes();
}
[[nodiscard]] rpl::producer<QString> cameraDeviceIdValue() const {
return _cameraDeviceId.value();
}
void setCameraDeviceId(const QString &value) {
_cameraDeviceId = value;
}
[[nodiscard]] QString callPlaybackDeviceId() const {
return _callPlaybackDeviceId.current();
}
[[nodiscard]] rpl::producer<QString> callPlaybackDeviceIdChanges() const {
return _callPlaybackDeviceId.changes();
}
[[nodiscard]] rpl::producer<QString> callPlaybackDeviceIdValue() const {
return _callPlaybackDeviceId.value();
}
void setCallPlaybackDeviceId(const QString &value) {
_callPlaybackDeviceId = value;
}
[[nodiscard]] QString callCaptureDeviceId() const {
return _callCaptureDeviceId.current();
}
[[nodiscard]] rpl::producer<QString> callCaptureDeviceIdChanges() const {
return _callCaptureDeviceId.changes();
}
[[nodiscard]] rpl::producer<QString> callCaptureDeviceIdValue() const {
return _callCaptureDeviceId.value();
}
void setCallCaptureDeviceId(const QString &value) {
_callCaptureDeviceId = value;
}
[[nodiscard]] int callOutputVolume() const {
return _callOutputVolume;
}
@ -875,9 +913,11 @@ private:
bool _countUnreadMessages = true;
rpl::variable<bool> _notifyAboutPinned = true;
int _autoLock = 3600;
QString _callOutputDeviceId = u"default"_q;
QString _callInputDeviceId = u"default"_q;
QString _callVideoInputDeviceId = u"default"_q;
rpl::variable<QString> _playbackDeviceId;
rpl::variable<QString> _captureDeviceId;
rpl::variable<QString> _cameraDeviceId;
rpl::variable<QString> _callPlaybackDeviceId;
rpl::variable<QString> _callCaptureDeviceId;
int _callOutputVolume = 100;
int _callInputVolume = 100;
bool _callAudioDuckingEnabled = true;

View file

@ -141,7 +141,8 @@ void DestroyPlaybackDevice() {
bool CreatePlaybackDevice() {
if (AudioDevice) return true;
AudioDevice = alcOpenDevice(nullptr);
const auto id = Current().deviceId().toStdString();
AudioDevice = alcOpenDevice(id.c_str());
if (!AudioDevice) {
LOG(("Audio Error: Could not create default playback device, enumerating.."));
EnumeratePlaybackDevices();
@ -200,12 +201,12 @@ void Start(not_null<Instance*> instance) {
MixerInstance = new Player::Mixer(instance);
Platform::Audio::Init();
//Platform::Audio::Init();
}
// Thread: Main.
void Finish(not_null<Instance*> instance) {
Platform::Audio::DeInit();
//Platform::Audio::DeInit();
// MixerInstance variable should be modified under AudioMutex protection.
// So it is modified in the ~Mixer() destructor after all tracks are cleared.

View file

@ -10,6 +10,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "media/audio/media_audio_ffmpeg_loader.h"
#include "media/audio/media_audio.h"
#include "core/application.h"
#include "core/core_settings.h"
#include "core/file_location.h"
#include <al.h>
@ -242,7 +243,12 @@ Track::~Track() {
_instance->unregisterTrack(this);
}
Instance::Instance() {
Instance::Instance()
: _playbackDeviceId(
&Core::App().mediaDevices(),
Webrtc::DeviceType::Playback,
Webrtc::DeviceIdOrDefault(
Core::App().settings().playbackDeviceIdValue())) {
_updateTimer.setCallback([this] {
auto hasActive = false;
for (auto track : _tracks) {
@ -260,6 +266,15 @@ Instance::Instance() {
_detachFromDeviceForce = false;
Player::internal::DetachFromDevice(this);
});
_playbackDeviceId.changes() | rpl::start_with_next([=] {
_detachFromDeviceForce = false;
Player::internal::DetachFromDevice(this);
}, _lifetime);
}
QString Instance::deviceId() const {
return _playbackDeviceId.current();
}
std::unique_ptr<Track> Instance::createTrack() {

View file

@ -9,6 +9,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "base/timer.h"
#include "base/bytes.h"
#include "webrtc/webrtc_device_id.h"
namespace Core {
class FileLocation;
@ -94,7 +95,9 @@ public:
// Thread: Main.
Instance();
std::unique_ptr<Track> createTrack();
[[nodiscard]] QString deviceId() const;
[[nodiscard]] std::unique_ptr<Track> createTrack();
void detachTracks();
void reattachTracks();
@ -115,15 +118,18 @@ private:
private:
std::set<Track*> _tracks;
Webrtc::DeviceId _playbackDeviceId;
base::Timer _updateTimer;
base::Timer _detachFromDeviceTimer;
bool _detachFromDeviceForce = false;
rpl::lifetime _lifetime;
};
Instance &Current();
[[nodiscard]] Instance &Current();
} // namespace Audio
} // namespace Media

View file

@ -29,6 +29,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
#include "calls/calls_video_bubble.h"
#include "apiwrap.h"
#include "api/api_authorizations.h"
#include "webrtc/webrtc_environment.h"
#include "webrtc/webrtc_media_devices.h"
#include "webrtc/webrtc_video_track.h"
#include "webrtc/webrtc_audio_input_tester.h"
@ -41,6 +42,18 @@ namespace {
using namespace Webrtc;
[[nodiscard]] rpl::producer<QString> DeviceNameValue(
DeviceType type,
rpl::producer<QString> id) {
return std::move(id) | rpl::map([type](const QString &id) {
const auto list = Core::App().mediaDevices().devices(type);
const auto i = ranges::find(list, id, &DeviceInfo::id);
return (i != end(list))
? i->name
: tr::lng_settings_call_device_default(tr::now);
});
}
} // namespace
Calls::Calls(
@ -86,7 +99,7 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
const auto cameras = GetVideoInputList();
const auto i = ranges::find(
cameras,
Core::App().settings().callVideoInputDeviceId(),
Core::App().settings().cameraDeviceId(),
&VideoInput::id);
return (i != end(cameras))
? i->name
@ -111,7 +124,7 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
) | ranges::to_vector;
const auto i = ranges::find(
devices,
Core::App().settings().callVideoInputDeviceId(),
Core::App().settings().cameraDeviceId(),
&VideoInput::id);
const auto currentOption = (i != end(devices))
? int(i - begin(devices) + 1)
@ -120,14 +133,11 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
cameraNameStream->fire_copy(options[option]);
const auto deviceId = option
? devices[option - 1].id
: "default";
: kDefaultDeviceId;
if (saveToSettings) {
Core::App().settings().setCallVideoInputDeviceId(deviceId);
Core::App().settings().setCameraDeviceId(deviceId);
Core::App().saveSettingsDelayed();
}
if (const auto call = Core::App().calls().currentCall()) {
call->setCurrentCameraDevice(deviceId);
}
if (*capturerOwner) {
(*capturerOwner)->switchToDevice(
deviceId.toStdString(),
@ -186,7 +196,7 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
return;
}
*capturerOwner = Core::App().calls().getVideoCapture(
Core::App().settings().callVideoInputDeviceId(),
Core::App().settings().cameraDeviceId(),
false);
(*capturerOwner)->setPreferredAspectRatio(0.);
track->setState(VideoState::Active);
@ -220,54 +230,58 @@ void Calls::sectionSaveChanges(FnMut<void()> done) {
void Calls::setupContent() {
const auto content = Ui::CreateChild<Ui::VerticalLayout>(this);
if (!GetVideoInputList().empty()) {
Ui::AddSkip(content);
Ui::AddSubsectionTitle(content, tr::lng_settings_call_camera());
AddCameraSubsection(_controller->uiShow(), content, true);
Ui::AddSkip(content);
Ui::AddDivider(content);
}
Ui::AddSkip(content);
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_output());
//auto playbackIdWithFallback = DeviceIdValueWithFallback(
// Core::App().settings().callPlaybackDeviceIdValue(),
// Core::App().settings().playbackDeviceIdValue());
auto playbackIdWithFallback = [] {
return DeviceIdOrDefault(
Core::App().settings().playbackDeviceIdValue());
};
AddButtonWithLabel(
content,
tr::lng_settings_call_output_device(),
rpl::single(
CurrentAudioOutputName()
) | rpl::then(
_outputNameStream.events()
),
PlaybackDeviceNameValue(playbackIdWithFallback()),
st::settingsButtonNoIcon
)->addClickHandler([=] {
_controller->show(ChooseAudioOutputBox(crl::guard(this, [=](
const QString &id,
const QString &name) {
_outputNameStream.fire_copy(name);
})));
_controller->show(ChoosePlaybackDeviceBox(
playbackIdWithFallback(),
crl::guard(this, [=](const QString &id) {
//Core::App().settings().setCallPlaybackDeviceId(id);
Core::App().settings().setPlaybackDeviceId(id);
Core::App().saveSettingsDelayed();
})));
});
Ui::AddSkip(content);
Ui::AddDivider(content);
Ui::AddSkip(content);
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_input());
//auto captureIdWithFallback = DeviceIdValueWithFallback(
// Core::App().settings().callCaptureDeviceIdValue(),
// Core::App().settings().captureDeviceIdValue());
auto captureIdWithFallback = [] {
return DeviceIdOrDefault(
Core::App().settings().captureDeviceIdValue());
};
AddButtonWithLabel(
content,
tr::lng_settings_call_input_device(),
rpl::single(
CurrentAudioInputName()
) | rpl::then(
_inputNameStream.events()
),
CaptureDeviceNameValue(captureIdWithFallback()),
st::settingsButtonNoIcon
)->addClickHandler([=] {
_controller->show(ChooseAudioInputBox(crl::guard(this, [=](
const QString &id,
const QString &name) {
_inputNameStream.fire_copy(name);
if (_micTester) {
_micTester->setDeviceId(id);
}
})));
_controller->show(ChooseCaptureDeviceBox(
captureIdWithFallback(),
crl::guard(this, [=](const QString &id) {
//Core::App().settings().setCallCaptureDeviceId(id);
Core::App().settings().setCaptureDeviceId(id);
Core::App().saveSettingsDelayed();
if (_micTester) {
_micTester->setDeviceId(id);
}
})));
});
_micTestLevel = content->add(
@ -287,6 +301,15 @@ void Calls::setupContent() {
Ui::AddSkip(content);
Ui::AddDivider(content);
if (!GetVideoInputList().empty()) {
Ui::AddSkip(content);
Ui::AddSubsectionTitle(content, tr::lng_settings_call_camera());
AddCameraSubsection(_controller->uiShow(), content, true);
Ui::AddSkip(content);
Ui::AddDivider(content);
}
Ui::AddSkip(content);
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_other());
@ -310,8 +333,8 @@ void Calls::setupContent() {
tr::lng_settings_call_open_system_prefs(),
st::settingsButtonNoIcon
))->addClickHandler([=] {
const auto opened = Platform::OpenSystemSettings(
Platform::SystemSettingsType::Audio);
using namespace ::Platform;
const auto opened = OpenSystemSettings(SystemSettingsType::Audio);
if (!opened) {
_controller->show(
Ui::MakeInformBox(tr::lng_linux_no_audio_prefs()));
@ -324,26 +347,27 @@ void Calls::setupContent() {
}
void Calls::requestPermissionAndStartTestingMicrophone() {
const auto status = Platform::GetPermissionStatus(
Platform::PermissionType::Microphone);
if (status == Platform::PermissionStatus::Granted) {
using namespace ::Platform;
const auto status = GetPermissionStatus(
PermissionType::Microphone);
if (status == PermissionStatus::Granted) {
startTestingMicrophone();
} else if (status == Platform::PermissionStatus::CanRequest) {
} else if (status == PermissionStatus::CanRequest) {
const auto startTestingChecked = crl::guard(this, [=](
Platform::PermissionStatus status) {
if (status == Platform::PermissionStatus::Granted) {
PermissionStatus status) {
if (status == PermissionStatus::Granted) {
crl::on_main(crl::guard(this, [=] {
startTestingMicrophone();
}));
}
});
Platform::RequestPermission(
Platform::PermissionType::Microphone,
RequestPermission(
PermissionType::Microphone,
startTestingChecked);
} else {
const auto showSystemSettings = [controller = _controller] {
Platform::OpenSystemSettingsForPermission(
Platform::PermissionType::Microphone);
OpenSystemSettingsForPermission(
PermissionType::Microphone);
controller->hideLayer();
};
_controller->show(Ui::MakeConfirmBox({
@ -358,135 +382,215 @@ void Calls::startTestingMicrophone() {
_levelUpdateTimer.callEach(kMicTestUpdateInterval);
_micTester = std::make_unique<AudioInputTester>(
Core::App().settings().callAudioBackend(),
Core::App().settings().callInputDeviceId());
Core::App().settings().callCaptureDeviceId());
}
QString CurrentAudioOutputName() {
const auto &settings = Core::App().settings();
const auto list = GetAudioOutputList(settings.callAudioBackend());
const auto i = ranges::find(
list,
settings.callOutputDeviceId(),
&AudioOutput::id);
return (i != end(list))
? i->name
: tr::lng_settings_call_device_default(tr::now);
rpl::producer<QString> PlaybackDeviceNameValue(rpl::producer<QString> id) {
return DeviceNameValue(DeviceType::Playback, std::move(id));
}
QString CurrentAudioInputName() {
const auto &settings = Core::App().settings();
const auto list = GetAudioInputList(settings.callAudioBackend());
const auto i = ranges::find(
list,
settings.callInputDeviceId(),
&AudioInput::id);
return (i != end(list))
? i->name
: tr::lng_settings_call_device_default(tr::now);
rpl::producer<QString> CaptureDeviceNameValue(rpl::producer<QString> id) {
return DeviceNameValue(DeviceType::Capture, std::move(id));
}
object_ptr<Ui::GenericBox> ChooseAudioOutputBox(
Fn<void(QString id, QString name)> chosen,
void ChooseAudioDeviceBox(
not_null<Ui::GenericBox*> box,
rpl::producer<QString> title,
rpl::producer<std::vector<DeviceInfo>> devicesValue,
rpl::producer<QString> currentId,
Fn<void(QString id)> chosen,
const style::Checkbox *st,
const style::Radio *radioSt) {
const auto &settings = Core::App().settings();
const auto list = GetAudioOutputList(settings.callAudioBackend());
const auto options = ranges::views::concat(
ranges::views::single(tr::lng_settings_call_device_default(tr::now)),
list | ranges::views::transform(&AudioOutput::name)
) | ranges::to_vector;
const auto i = ranges::find(
list,
settings.callOutputDeviceId(),
&AudioOutput::id);
const auto currentOption = (i != end(list))
? int(i - begin(list) + 1)
: 0;
const auto save = [=](int option) {
const auto deviceId = option
? list[option - 1].id
: "default";
Core::App().calls().setCurrentAudioDevice(false, deviceId);
chosen(deviceId, options[option]);
box->setTitle(std::move(title));
box->addButton(tr::lng_box_ok(), [=] { box->closeBox(); });
const auto layout = box->verticalLayout();
const auto skip = st::boxOptionListPadding.top()
+ st::defaultBoxCheckbox.margin.top();
layout->add(object_ptr<Ui::FixedHeightWidget>(layout, skip));
if (!st) {
st = &st::defaultBoxCheckbox;
}
if (!radioSt) {
radioSt = &st::defaultRadio;
}
struct State {
std::vector<DeviceInfo> list;
base::flat_map<int, QString> ids;
rpl::variable<QString> currentId;
QString currentName;
bool ignoreValueChange = false;
};
return Box([=](not_null<Ui::GenericBox*> box) {
SingleChoiceBox(box, {
.title = tr::lng_settings_call_output_device(),
.options = options,
.initialSelection = currentOption,
.callback = save,
.st = st,
.radioSt = radioSt,
const auto state = box->lifetime().make_state<State>();
state->currentId = std::move(currentId);
const auto group = std::make_shared<Ui::RadiobuttonGroup>();
const auto fake = std::make_shared<Ui::RadiobuttonGroup>(0);
const auto buttons = layout->add(object_ptr<Ui::VerticalLayout>(layout));
const auto other = layout->add(object_ptr<Ui::VerticalLayout>(layout));
const auto margins = QMargins(
st::boxPadding.left() + st::boxOptionListPadding.left(),
0,
st::boxPadding.right(),
st::boxOptionListSkip);
const auto def = buttons->add(
object_ptr<Ui::Radiobutton>(
buttons,
group,
0,
tr::lng_settings_call_device_default(tr::now),
*st,
*radioSt),
margins);
const auto selectCurrent = [=](QString current) {
state->ignoreValueChange = true;
const auto guard = gsl::finally([&] {
state->ignoreValueChange = false;
});
if (current.isEmpty() || current == kDefaultDeviceId) {
group->setValue(0);
other->clear();
} else {
auto found = false;
for (const auto &[index, id] : state->ids) {
if (id == current) {
group->setValue(index);
found = true;
break;
}
}
if (found) {
other->clear();
} else {
group->setValue(0);
const auto i = ranges::find(
state->list,
current,
&DeviceInfo::id);
if (i != end(state->list)) {
const auto button = other->add(
object_ptr<Ui::Radiobutton>(
other,
fake,
0,
i->name,
*st,
*radioSt),
margins);
button->show();
button->setDisabled(true);
button->finishAnimating();
button->setAttribute(Qt::WA_TransparentForMouseEvents);
while (other->count() > 1) {
delete other->widgetAt(1);
}
if (const auto width = box->width()) {
other->resizeToWidth(width);
}
} else {
other->clear();
}
}
}
};
std::move(
devicesValue
) | rpl::start_with_next([=](std::vector<DeviceInfo> &&list) {
auto count = buttons->count();
auto index = 1;
state->ids.clear();
state->list = std::move(list);
state->ignoreValueChange = true;
const auto guard = gsl::finally([&] {
state->ignoreValueChange = false;
});
const auto current = state->currentId.current();
for (const auto &info : state->list) {
if (info.inactive) {
continue;
} else if (current == info.id) {
group->setValue(index);
}
const auto button = buttons->insert(
index,
object_ptr<Ui::Radiobutton>(
buttons,
group,
index,
info.name,
*st,
*radioSt),
margins);
button->show();
button->finishAnimating();
state->ids.emplace(index, info.id);
if (index < count) {
delete buttons->widgetAt(index + 1);
}
++index;
}
while (index < count) {
delete buttons->widgetAt(index);
--count;
}
if (const auto width = box->width()) {
buttons->resizeToWidth(width);
}
selectCurrent(current);
}, box->lifetime());
state->currentId.changes(
) | rpl::start_with_next(selectCurrent, box->lifetime());
def->finishAnimating();
group->setChangedCallback([=](int value) {
if (state->ignoreValueChange) {
return;
}
const auto weak = Ui::MakeWeak(box);
chosen(state->ids.take(value).value_or(kDefaultDeviceId));
if (weak) {
box->closeBox();
}
});
}
object_ptr<Ui::GenericBox> ChooseAudioInputBox(
Fn<void(QString id, QString name)> chosen,
object_ptr<Ui::GenericBox> ChoosePlaybackDeviceBox(
rpl::producer<QString> currentId,
Fn<void(QString id)> chosen,
const style::Checkbox *st,
const style::Radio *radioSt) {
const auto &settings = Core::App().settings();
const auto list = GetAudioInputList(settings.callAudioBackend());
const auto options = ranges::views::concat(
ranges::views::single(tr::lng_settings_call_device_default(tr::now)),
list | ranges::views::transform(&AudioInput::name)
) | ranges::to_vector;
const auto i = ranges::find(
list,
Core::App().settings().callInputDeviceId(),
&AudioInput::id);
const auto currentOption = (i != end(list))
? int(i - begin(list) + 1)
: 0;
const auto save = [=](int option) {
const auto deviceId = option
? list[option - 1].id
: "default";
Core::App().calls().setCurrentAudioDevice(true, deviceId);
chosen(deviceId, options[option]);
};
return Box([=](not_null<Ui::GenericBox*> box) {
SingleChoiceBox(box, {
.title = tr::lng_settings_call_input_device(),
.options = options,
.initialSelection = currentOption,
.callback = save,
.st = st,
.radioSt = radioSt,
});
});
return Box(
ChooseAudioDeviceBox,
tr::lng_settings_call_output_device(),
Core::App().mediaDevices().devicesValue(DeviceType::Playback),
std::move(currentId),
std::move(chosen),
st,
radioSt);
}
object_ptr<Ui::GenericBox> ChooseCaptureDeviceBox(
rpl::producer<QString> currentId,
Fn<void(QString id)> chosen,
const style::Checkbox *st,
const style::Radio *radioSt) {
return Box(
ChooseAudioDeviceBox,
tr::lng_settings_call_input_device(),
Core::App().mediaDevices().devicesValue(DeviceType::Capture),
std::move(currentId),
std::move(chosen),
st,
radioSt);
}
//
//object_ptr<Ui::GenericBox> ChooseAudioBackendBox(
// const style::Checkbox *st,
// const style::Radio *radioSt) {
// const auto &settings = Core::App().settings();
// const auto list = GetAudioInputList(settings.callAudioBackend());
// const auto options = std::vector<QString>{
// "OpenAL",
// "Webrtc ADM",
//#ifdef Q_OS_WIN
// "Webrtc ADM2",
//#endif // Q_OS_WIN
// };
// const auto currentOption = static_cast<int>(settings.callAudioBackend());
// const auto save = [=](int option) {
// Core::App().settings().setCallAudioBackend(
// static_cast<Webrtc::Backend>(option));
// Core::App().saveSettings();
// Core::Restart();
// };
// return Box([=](not_null<Ui::GenericBox*> box) {
// SingleChoiceBox(box, {
// .title = rpl::single<QString>("Calls audio backend"),
// .options = options,
// .initialSelection = currentOption,
// .callback = save,
// .st = st,
// .radioSt = radioSt,
// });
// });
//}
} // namespace Settings

View file

@ -54,8 +54,6 @@ private:
const not_null<Window::SessionController*> _controller;
rpl::event_stream<QString> _cameraNameStream;
rpl::event_stream<QString> _outputNameStream;
rpl::event_stream<QString> _inputNameStream;
std::unique_ptr<Webrtc::AudioInputTester> _micTester;
Ui::LevelMeter *_micTestLevel = nullptr;
float _micLevel = 0.;
@ -67,19 +65,20 @@ private:
inline constexpr auto kMicTestUpdateInterval = crl::time(100);
inline constexpr auto kMicTestAnimationDuration = crl::time(200);
[[nodiscard]] QString CurrentAudioOutputName();
[[nodiscard]] QString CurrentAudioInputName();
[[nodiscard]] object_ptr<Ui::GenericBox> ChooseAudioOutputBox(
Fn<void(QString id, QString name)> chosen,
[[nodiscard]] rpl::producer<QString> PlaybackDeviceNameValue(
rpl::producer<QString> id);
[[nodiscard]] rpl::producer<QString> CaptureDeviceNameValue(
rpl::producer<QString> id);
[[nodiscard]] object_ptr<Ui::GenericBox> ChoosePlaybackDeviceBox(
rpl::producer<QString> currentId,
Fn<void(QString id)> chosen,
const style::Checkbox *st = nullptr,
const style::Radio *radioSt = nullptr);
[[nodiscard]] object_ptr<Ui::GenericBox> ChooseAudioInputBox(
Fn<void(QString id, QString name)> chosen,
[[nodiscard]] object_ptr<Ui::GenericBox> ChooseCaptureDeviceBox(
rpl::producer<QString> currentId,
Fn<void(QString id)> chosen,
const style::Checkbox *st = nullptr,
const style::Radio *radioSt = nullptr);
//[[nodiscard]] object_ptr<Ui::GenericBox> ChooseAudioBackendBox(
// const style::Checkbox *st = nullptr,
// const style::Radio *radioSt = nullptr);
} // namespace Settings

View file

@ -1148,9 +1148,9 @@ bool ReadSetting(
settingsStream >> duckingEnabled;
if (CheckStreamStatus(settingsStream)) {
auto &app = Core::App().settings();
app.setCallOutputDeviceId(outputDeviceID);
app.setCallPlaybackDeviceId(outputDeviceID);
app.setCallCaptureDeviceId(inputDeviceID);
app.setCallOutputVolume(outputVolume);
app.setCallInputDeviceId(inputDeviceID);
app.setCallInputVolume(inputVolume);
app.setCallAudioDuckingEnabled(duckingEnabled);
}

@ -1 +1 @@
Subproject commit b68a95ad4d1ae9a1827671100a7fd76cbe448c3f
Subproject commit 5a831697880967bbccbd45177fb6cf6b11759a22