mirror of
https://github.com/AyuGram/AyuGramDesktop.git
synced 2025-06-05 22:54:01 +02:00
Implement media devices tracking on Windows.
This commit is contained in:
parent
30e694420a
commit
d3778f92d2
18 changed files with 551 additions and 354 deletions
|
@ -215,6 +215,22 @@ Call::Call(
|
||||||
, _api(&_user->session().mtp())
|
, _api(&_user->session().mtp())
|
||||||
, _type(type)
|
, _type(type)
|
||||||
, _discardByTimeoutTimer([=] { hangup(); })
|
, _discardByTimeoutTimer([=] { hangup(); })
|
||||||
|
, _playbackDeviceId(
|
||||||
|
&Core::App().mediaDevices(),
|
||||||
|
Webrtc::DeviceType::Playback,
|
||||||
|
Webrtc::DeviceIdValueWithFallback(
|
||||||
|
Core::App().settings().callPlaybackDeviceIdValue(),
|
||||||
|
Core::App().settings().playbackDeviceIdValue()))
|
||||||
|
, _captureDeviceId(
|
||||||
|
&Core::App().mediaDevices(),
|
||||||
|
Webrtc::DeviceType::Capture,
|
||||||
|
Webrtc::DeviceIdValueWithFallback(
|
||||||
|
Core::App().settings().callCaptureDeviceIdValue(),
|
||||||
|
Core::App().settings().captureDeviceIdValue()))
|
||||||
|
, _cameraDeviceId(
|
||||||
|
&Core::App().mediaDevices(),
|
||||||
|
Webrtc::DeviceType::Camera,
|
||||||
|
Core::App().settings().cameraDeviceIdValue())
|
||||||
, _videoIncoming(
|
, _videoIncoming(
|
||||||
std::make_unique<Webrtc::VideoTrack>(
|
std::make_unique<Webrtc::VideoTrack>(
|
||||||
StartVideoState(video)))
|
StartVideoState(video)))
|
||||||
|
@ -228,6 +244,7 @@ Call::Call(
|
||||||
_discardByTimeoutTimer.callOnce(config.callRingTimeoutMs);
|
_discardByTimeoutTimer.callOnce(config.callRingTimeoutMs);
|
||||||
startWaitingTrack();
|
startWaitingTrack();
|
||||||
}
|
}
|
||||||
|
setupMediaDevices();
|
||||||
setupOutgoingVideo();
|
setupOutgoingVideo();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -410,6 +427,20 @@ void Call::setMuted(bool mute) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Call::setupMediaDevices() {
|
||||||
|
_playbackDeviceId.changes() | rpl::filter([=] {
|
||||||
|
return _instance != nullptr;
|
||||||
|
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||||
|
_instance->setAudioOutputDevice(deviceId.toStdString());
|
||||||
|
}, _lifetime);
|
||||||
|
|
||||||
|
_captureDeviceId.changes() | rpl::filter([=] {
|
||||||
|
return _instance != nullptr;
|
||||||
|
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||||
|
_instance->setAudioInputDevice(deviceId.toStdString());
|
||||||
|
}, _lifetime);
|
||||||
|
}
|
||||||
|
|
||||||
void Call::setupOutgoingVideo() {
|
void Call::setupOutgoingVideo() {
|
||||||
static const auto hasDevices = [] {
|
static const auto hasDevices = [] {
|
||||||
return !Webrtc::GetVideoInputList().empty();
|
return !Webrtc::GetVideoInputList().empty();
|
||||||
|
@ -455,6 +486,19 @@ void Call::setupOutgoingVideo() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
|
||||||
|
_cameraDeviceId.changes(
|
||||||
|
) | rpl::filter([=] {
|
||||||
|
return !_videoCaptureIsScreencast;
|
||||||
|
}) | rpl::start_with_next([=](QString deviceId) {
|
||||||
|
_videoCaptureDeviceId = deviceId;
|
||||||
|
if (_videoCapture) {
|
||||||
|
_videoCapture->switchToDevice(deviceId.toStdString(), false);
|
||||||
|
if (_instance) {
|
||||||
|
_instance->sendVideoDeviceUpdated();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, _lifetime);
|
||||||
}
|
}
|
||||||
|
|
||||||
not_null<Webrtc::VideoTrack*> Call::videoIncoming() const {
|
not_null<Webrtc::VideoTrack*> Call::videoIncoming() const {
|
||||||
|
@ -866,8 +910,8 @@ void Call::createAndStartController(const MTPDphoneCall &call) {
|
||||||
std::move(encryptionKeyValue),
|
std::move(encryptionKeyValue),
|
||||||
(_type == Type::Outgoing)),
|
(_type == Type::Outgoing)),
|
||||||
.mediaDevicesConfig = tgcalls::MediaDevicesConfig{
|
.mediaDevicesConfig = tgcalls::MediaDevicesConfig{
|
||||||
.audioInputId = settings.callInputDeviceId().toStdString(),
|
.audioInputId = _captureDeviceId.current().toStdString(),
|
||||||
.audioOutputId = settings.callOutputDeviceId().toStdString(),
|
.audioOutputId = _playbackDeviceId.current().toStdString(),
|
||||||
.inputVolume = 1.f,//settings.callInputVolume() / 100.f,
|
.inputVolume = 1.f,//settings.callInputVolume() / 100.f,
|
||||||
.outputVolume = 1.f,//settings.callOutputVolume() / 100.f,
|
.outputVolume = 1.f,//settings.callOutputVolume() / 100.f,
|
||||||
},
|
},
|
||||||
|
@ -1096,29 +1140,6 @@ void Call::setState(State state) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Call::setCurrentAudioDevice(bool input, const QString &deviceId) {
|
|
||||||
if (_instance) {
|
|
||||||
const auto id = deviceId.toStdString();
|
|
||||||
if (input) {
|
|
||||||
_instance->setAudioInputDevice(id);
|
|
||||||
} else {
|
|
||||||
_instance->setAudioOutputDevice(id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Call::setCurrentCameraDevice(const QString &deviceId) {
|
|
||||||
if (!_videoCaptureIsScreencast) {
|
|
||||||
_videoCaptureDeviceId = deviceId;
|
|
||||||
if (_videoCapture) {
|
|
||||||
_videoCapture->switchToDevice(deviceId.toStdString(), false);
|
|
||||||
if (_instance) {
|
|
||||||
_instance->sendVideoDeviceUpdated();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//void Call::setAudioVolume(bool input, float level) {
|
//void Call::setAudioVolume(bool input, float level) {
|
||||||
// if (_instance) {
|
// if (_instance) {
|
||||||
// if (input) {
|
// if (input) {
|
||||||
|
@ -1168,10 +1189,11 @@ void Call::toggleCameraSharing(bool enabled) {
|
||||||
}
|
}
|
||||||
_delegate->callRequestPermissionsOrFail(crl::guard(this, [=] {
|
_delegate->callRequestPermissionsOrFail(crl::guard(this, [=] {
|
||||||
toggleScreenSharing(std::nullopt);
|
toggleScreenSharing(std::nullopt);
|
||||||
const auto deviceId = Core::App().settings().callVideoInputDeviceId();
|
_videoCaptureDeviceId = _cameraDeviceId.current();
|
||||||
_videoCaptureDeviceId = deviceId;
|
|
||||||
if (_videoCapture) {
|
if (_videoCapture) {
|
||||||
_videoCapture->switchToDevice(deviceId.toStdString(), false);
|
_videoCapture->switchToDevice(
|
||||||
|
_videoCaptureDeviceId.toStdString(),
|
||||||
|
false);
|
||||||
if (_instance) {
|
if (_instance) {
|
||||||
_instance->sendVideoDeviceUpdated();
|
_instance->sendVideoDeviceUpdated();
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "base/bytes.h"
|
#include "base/bytes.h"
|
||||||
#include "mtproto/sender.h"
|
#include "mtproto/sender.h"
|
||||||
#include "mtproto/mtproto_auth_key.h"
|
#include "mtproto/mtproto_auth_key.h"
|
||||||
|
#include "webrtc/webrtc_device_id.h"
|
||||||
|
|
||||||
namespace Media {
|
namespace Media {
|
||||||
namespace Audio {
|
namespace Audio {
|
||||||
|
@ -190,11 +191,9 @@ public:
|
||||||
|
|
||||||
QString getDebugLog() const;
|
QString getDebugLog() const;
|
||||||
|
|
||||||
void setCurrentAudioDevice(bool input, const QString &deviceId);
|
|
||||||
//void setAudioVolume(bool input, float level);
|
//void setAudioVolume(bool input, float level);
|
||||||
void setAudioDuckingEnabled(bool enabled);
|
void setAudioDuckingEnabled(bool enabled);
|
||||||
|
|
||||||
void setCurrentCameraDevice(const QString &deviceId);
|
|
||||||
[[nodiscard]] QString videoDeviceId() const {
|
[[nodiscard]] QString videoDeviceId() const {
|
||||||
return _videoCaptureDeviceId;
|
return _videoCaptureDeviceId;
|
||||||
}
|
}
|
||||||
|
@ -250,6 +249,7 @@ private:
|
||||||
void setSignalBarCount(int count);
|
void setSignalBarCount(int count);
|
||||||
void destroyController();
|
void destroyController();
|
||||||
|
|
||||||
|
void setupMediaDevices();
|
||||||
void setupOutgoingVideo();
|
void setupOutgoingVideo();
|
||||||
void updateRemoteMediaState(
|
void updateRemoteMediaState(
|
||||||
tgcalls::AudioState audio,
|
tgcalls::AudioState audio,
|
||||||
|
@ -271,6 +271,10 @@ private:
|
||||||
base::DelayedCallTimer _finishByTimeoutTimer;
|
base::DelayedCallTimer _finishByTimeoutTimer;
|
||||||
base::Timer _discardByTimeoutTimer;
|
base::Timer _discardByTimeoutTimer;
|
||||||
|
|
||||||
|
Webrtc::DeviceId _playbackDeviceId;
|
||||||
|
Webrtc::DeviceId _captureDeviceId;
|
||||||
|
Webrtc::DeviceId _cameraDeviceId;
|
||||||
|
|
||||||
rpl::variable<bool> _muted = false;
|
rpl::variable<bool> _muted = false;
|
||||||
|
|
||||||
DhConfig _dhConfig;
|
DhConfig _dhConfig;
|
||||||
|
|
|
@ -522,20 +522,6 @@ void Instance::showInfoPanel(not_null<GroupCall*> call) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Instance::setCurrentAudioDevice(bool input, const QString &deviceId) {
|
|
||||||
if (input) {
|
|
||||||
Core::App().settings().setCallInputDeviceId(deviceId);
|
|
||||||
} else {
|
|
||||||
Core::App().settings().setCallOutputDeviceId(deviceId);
|
|
||||||
}
|
|
||||||
Core::App().saveSettingsDelayed();
|
|
||||||
if (const auto call = currentCall()) {
|
|
||||||
call->setCurrentAudioDevice(input, deviceId);
|
|
||||||
} else if (const auto group = currentGroupCall()) {
|
|
||||||
group->setCurrentAudioDevice(input, deviceId);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
FnMut<void()> Instance::addAsyncWaiter() {
|
FnMut<void()> Instance::addAsyncWaiter() {
|
||||||
auto semaphore = std::make_unique<crl::semaphore>();
|
auto semaphore = std::make_unique<crl::semaphore>();
|
||||||
const auto raw = semaphore.get();
|
const auto raw = semaphore.get();
|
||||||
|
@ -846,7 +832,7 @@ std::shared_ptr<tgcalls::VideoCaptureInterface> Instance::getVideoCapture(
|
||||||
if (deviceId) {
|
if (deviceId) {
|
||||||
result->switchToDevice(
|
result->switchToDevice(
|
||||||
(deviceId->isEmpty()
|
(deviceId->isEmpty()
|
||||||
? Core::App().settings().callVideoInputDeviceId()
|
? Core::App().settings().cameraDeviceId()
|
||||||
: *deviceId).toStdString(),
|
: *deviceId).toStdString(),
|
||||||
isScreenCapture);
|
isScreenCapture);
|
||||||
}
|
}
|
||||||
|
@ -854,7 +840,7 @@ std::shared_ptr<tgcalls::VideoCaptureInterface> Instance::getVideoCapture(
|
||||||
}
|
}
|
||||||
const auto startDeviceId = (deviceId && !deviceId->isEmpty())
|
const auto startDeviceId = (deviceId && !deviceId->isEmpty())
|
||||||
? *deviceId
|
? *deviceId
|
||||||
: Core::App().settings().callVideoInputDeviceId();
|
: Core::App().settings().cameraDeviceId();
|
||||||
auto result = std::shared_ptr<tgcalls::VideoCaptureInterface>(
|
auto result = std::shared_ptr<tgcalls::VideoCaptureInterface>(
|
||||||
tgcalls::VideoCaptureInterface::Create(
|
tgcalls::VideoCaptureInterface::Create(
|
||||||
tgcalls::StaticThreads::getThreads(),
|
tgcalls::StaticThreads::getThreads(),
|
||||||
|
|
|
@ -103,8 +103,6 @@ public:
|
||||||
-> std::shared_ptr<tgcalls::VideoCaptureInterface>;
|
-> std::shared_ptr<tgcalls::VideoCaptureInterface>;
|
||||||
void requestPermissionsOrFail(Fn<void()> onSuccess, bool video = true);
|
void requestPermissionsOrFail(Fn<void()> onSuccess, bool video = true);
|
||||||
|
|
||||||
void setCurrentAudioDevice(bool input, const QString &deviceId);
|
|
||||||
|
|
||||||
[[nodiscard]] FnMut<void()> addAsyncWaiter();
|
[[nodiscard]] FnMut<void()> addAsyncWaiter();
|
||||||
|
|
||||||
[[nodiscard]] bool isSharingScreen() const;
|
[[nodiscard]] bool isSharingScreen() const;
|
||||||
|
|
|
@ -29,7 +29,6 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "base/global_shortcuts.h"
|
#include "base/global_shortcuts.h"
|
||||||
#include "base/random.h"
|
#include "base/random.h"
|
||||||
#include "webrtc/webrtc_video_track.h"
|
#include "webrtc/webrtc_video_track.h"
|
||||||
#include "webrtc/webrtc_media_devices.h"
|
|
||||||
#include "webrtc/webrtc_create_adm.h"
|
#include "webrtc/webrtc_create_adm.h"
|
||||||
|
|
||||||
#include <tgcalls/group/GroupInstanceCustomImpl.h>
|
#include <tgcalls/group/GroupInstanceCustomImpl.h>
|
||||||
|
@ -52,14 +51,6 @@ constexpr auto kFixSpeakingLargeVideoDuration = 3 * crl::time(1000);
|
||||||
constexpr auto kFullAsMediumsCount = 4; // 1 Full is like 4 Mediums.
|
constexpr auto kFullAsMediumsCount = 4; // 1 Full is like 4 Mediums.
|
||||||
constexpr auto kMaxMediumQualities = 16; // 4 Fulls or 16 Mediums.
|
constexpr auto kMaxMediumQualities = 16; // 4 Fulls or 16 Mediums.
|
||||||
|
|
||||||
[[nodiscard]] std::unique_ptr<Webrtc::MediaDevices> CreateMediaDevices() {
|
|
||||||
const auto &settings = Core::App().settings();
|
|
||||||
return Webrtc::CreateMediaDevices(
|
|
||||||
settings.callInputDeviceId(),
|
|
||||||
settings.callOutputDeviceId(),
|
|
||||||
settings.callVideoInputDeviceId());
|
|
||||||
}
|
|
||||||
|
|
||||||
[[nodiscard]] const Data::GroupCallParticipant *LookupParticipant(
|
[[nodiscard]] const Data::GroupCallParticipant *LookupParticipant(
|
||||||
not_null<PeerData*> peer,
|
not_null<PeerData*> peer,
|
||||||
CallId id,
|
CallId id,
|
||||||
|
@ -590,12 +581,27 @@ GroupCall::GroupCall(
|
||||||
, _scheduleDate(info.scheduleDate)
|
, _scheduleDate(info.scheduleDate)
|
||||||
, _lastSpokeCheckTimer([=] { checkLastSpoke(); })
|
, _lastSpokeCheckTimer([=] { checkLastSpoke(); })
|
||||||
, _checkJoinedTimer([=] { checkJoined(); })
|
, _checkJoinedTimer([=] { checkJoined(); })
|
||||||
|
, _playbackDeviceId(
|
||||||
|
&Core::App().mediaDevices(),
|
||||||
|
Webrtc::DeviceType::Playback,
|
||||||
|
Webrtc::DeviceIdValueWithFallback(
|
||||||
|
Core::App().settings().callPlaybackDeviceIdValue(),
|
||||||
|
Core::App().settings().playbackDeviceIdValue()))
|
||||||
|
, _captureDeviceId(
|
||||||
|
&Core::App().mediaDevices(),
|
||||||
|
Webrtc::DeviceType::Capture,
|
||||||
|
Webrtc::DeviceIdValueWithFallback(
|
||||||
|
Core::App().settings().callCaptureDeviceIdValue(),
|
||||||
|
Core::App().settings().captureDeviceIdValue()))
|
||||||
|
, _cameraDeviceId(
|
||||||
|
&Core::App().mediaDevices(),
|
||||||
|
Webrtc::DeviceType::Camera,
|
||||||
|
Webrtc::DeviceIdOrDefault(Core::App().settings().cameraDeviceIdValue()))
|
||||||
, _pushToTalkCancelTimer([=] { pushToTalkCancel(); })
|
, _pushToTalkCancelTimer([=] { pushToTalkCancel(); })
|
||||||
, _connectingSoundTimer([=] { playConnectingSoundOnce(); })
|
, _connectingSoundTimer([=] { playConnectingSoundOnce(); })
|
||||||
, _listenersHidden(info.rtmp)
|
, _listenersHidden(info.rtmp)
|
||||||
, _rtmp(info.rtmp)
|
, _rtmp(info.rtmp)
|
||||||
, _rtmpVolume(Group::kDefaultVolume)
|
, _rtmpVolume(Group::kDefaultVolume) {
|
||||||
, _mediaDevices(CreateMediaDevices()) {
|
|
||||||
_muted.value(
|
_muted.value(
|
||||||
) | rpl::combine_previous(
|
) | rpl::combine_previous(
|
||||||
) | rpl::start_with_next([=](MuteState previous, MuteState state) {
|
) | rpl::start_with_next([=](MuteState previous, MuteState state) {
|
||||||
|
@ -2058,28 +2064,22 @@ void GroupCall::applyOtherParticipantUpdate(
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::setupMediaDevices() {
|
void GroupCall::setupMediaDevices() {
|
||||||
_mediaDevices->audioInputId(
|
_playbackDeviceId.changes() | rpl::filter([=] {
|
||||||
) | rpl::start_with_next([=](QString id) {
|
return _instance != nullptr;
|
||||||
_audioInputId = id;
|
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||||
if (_instance) {
|
_instance->setAudioOutputDevice(deviceId.toStdString());
|
||||||
_instance->setAudioInputDevice(id.toStdString());
|
|
||||||
}
|
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
|
||||||
_mediaDevices->audioOutputId(
|
_captureDeviceId.changes() | rpl::filter([=] {
|
||||||
) | rpl::start_with_next([=](QString id) {
|
return _instance != nullptr;
|
||||||
_audioOutputId = id;
|
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||||
if (_instance) {
|
_instance->setAudioInputDevice(deviceId.toStdString());
|
||||||
_instance->setAudioOutputDevice(id.toStdString());
|
|
||||||
}
|
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
|
||||||
_mediaDevices->videoInputId(
|
_cameraDeviceId.changes() | rpl::filter([=] {
|
||||||
) | rpl::start_with_next([=](QString id) {
|
return _cameraCapture != nullptr;
|
||||||
_cameraInputId = id;
|
}) | rpl::start_with_next([=](const QString &deviceId) {
|
||||||
if (_cameraCapture) {
|
_cameraCapture->switchToDevice(deviceId.toStdString(), false);
|
||||||
_cameraCapture->switchToDevice(id.toStdString(), false);
|
|
||||||
}
|
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2117,7 +2117,7 @@ bool GroupCall::emitShareCameraError() {
|
||||||
return emitError(Error::DisabledNoCamera);
|
return emitError(Error::DisabledNoCamera);
|
||||||
} else if (mutedByAdmin()) {
|
} else if (mutedByAdmin()) {
|
||||||
return emitError(Error::MutedNoCamera);
|
return emitError(Error::MutedNoCamera);
|
||||||
} else if (Webrtc::GetVideoInputList().empty()) {
|
} else if (_cameraDeviceId.current().isEmpty()) {
|
||||||
return emitError(Error::NoCamera);
|
return emitError(Error::NoCamera);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
|
@ -2126,7 +2126,7 @@ bool GroupCall::emitShareCameraError() {
|
||||||
void GroupCall::emitShareCameraError(Error error) {
|
void GroupCall::emitShareCameraError(Error error) {
|
||||||
_cameraState = Webrtc::VideoState::Inactive;
|
_cameraState = Webrtc::VideoState::Inactive;
|
||||||
if (error == Error::CameraFailed
|
if (error == Error::CameraFailed
|
||||||
&& Webrtc::GetVideoInputList().empty()) {
|
&& _cameraDeviceId.current().isEmpty()) {
|
||||||
error = Error::NoCamera;
|
error = Error::NoCamera;
|
||||||
}
|
}
|
||||||
_errors.fire_copy(error);
|
_errors.fire_copy(error);
|
||||||
|
@ -2180,7 +2180,7 @@ void GroupCall::setupOutgoingVideo() {
|
||||||
return;
|
return;
|
||||||
} else if (!_cameraCapture) {
|
} else if (!_cameraCapture) {
|
||||||
_cameraCapture = _delegate->groupCallGetVideoCapture(
|
_cameraCapture = _delegate->groupCallGetVideoCapture(
|
||||||
_cameraInputId);
|
_cameraDeviceId.current());
|
||||||
if (!_cameraCapture) {
|
if (!_cameraCapture) {
|
||||||
return emitShareCameraError(Error::CameraFailed);
|
return emitShareCameraError(Error::CameraFailed);
|
||||||
}
|
}
|
||||||
|
@ -2192,7 +2192,7 @@ void GroupCall::setupOutgoingVideo() {
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
_cameraCapture->switchToDevice(
|
_cameraCapture->switchToDevice(
|
||||||
_cameraInputId.toStdString(),
|
_cameraDeviceId.current().toStdString(),
|
||||||
false);
|
false);
|
||||||
}
|
}
|
||||||
if (_instance) {
|
if (_instance) {
|
||||||
|
@ -2360,8 +2360,8 @@ bool GroupCall::tryCreateController() {
|
||||||
}
|
}
|
||||||
crl::on_main(weak, [=] { audioLevelsUpdated(data); });
|
crl::on_main(weak, [=] { audioLevelsUpdated(data); });
|
||||||
},
|
},
|
||||||
.initialInputDeviceId = _audioInputId.toStdString(),
|
.initialInputDeviceId = _captureDeviceId.current().toStdString(),
|
||||||
.initialOutputDeviceId = _audioOutputId.toStdString(),
|
.initialOutputDeviceId = _playbackDeviceId.current().toStdString(),
|
||||||
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
|
.createAudioDeviceModule = Webrtc::AudioDeviceModuleCreator(
|
||||||
settings.callAudioBackend()),
|
settings.callAudioBackend()),
|
||||||
.videoCapture = _cameraCapture,
|
.videoCapture = _cameraCapture,
|
||||||
|
@ -3290,14 +3290,6 @@ void GroupCall::requestVideoQuality(
|
||||||
updateRequestedVideoChannelsDelayed();
|
updateRequestedVideoChannelsDelayed();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::setCurrentAudioDevice(bool input, const QString &deviceId) {
|
|
||||||
if (input) {
|
|
||||||
_mediaDevices->switchToAudioInput(deviceId);
|
|
||||||
} else {
|
|
||||||
_mediaDevices->switchToAudioOutput(deviceId);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void GroupCall::toggleMute(const Group::MuteRequest &data) {
|
void GroupCall::toggleMute(const Group::MuteRequest &data) {
|
||||||
if (_rtmp) {
|
if (_rtmp) {
|
||||||
_rtmpVolume = data.mute ? 0 : Group::kDefaultVolume;
|
_rtmpVolume = data.mute ? 0 : Group::kDefaultVolume;
|
||||||
|
|
|
@ -12,6 +12,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "base/bytes.h"
|
#include "base/bytes.h"
|
||||||
#include "mtproto/sender.h"
|
#include "mtproto/sender.h"
|
||||||
#include "mtproto/mtproto_auth_key.h"
|
#include "mtproto/mtproto_auth_key.h"
|
||||||
|
#include "webrtc/webrtc_device_id.h"
|
||||||
|
|
||||||
class History;
|
class History;
|
||||||
|
|
||||||
|
@ -381,7 +382,6 @@ public:
|
||||||
return _videoIsWorking.value();
|
return _videoIsWorking.value();
|
||||||
}
|
}
|
||||||
|
|
||||||
void setCurrentAudioDevice(bool input, const QString &deviceId);
|
|
||||||
[[nodiscard]] bool isSharingScreen() const;
|
[[nodiscard]] bool isSharingScreen() const;
|
||||||
[[nodiscard]] rpl::producer<bool> isSharingScreenValue() const;
|
[[nodiscard]] rpl::producer<bool> isSharingScreenValue() const;
|
||||||
[[nodiscard]] bool isScreenPaused() const;
|
[[nodiscard]] bool isScreenPaused() const;
|
||||||
|
@ -667,6 +667,10 @@ private:
|
||||||
|
|
||||||
crl::time _lastSendProgressUpdate = 0;
|
crl::time _lastSendProgressUpdate = 0;
|
||||||
|
|
||||||
|
Webrtc::DeviceId _playbackDeviceId;
|
||||||
|
Webrtc::DeviceId _captureDeviceId;
|
||||||
|
Webrtc::DeviceId _cameraDeviceId;
|
||||||
|
|
||||||
std::shared_ptr<GlobalShortcutManager> _shortcutManager;
|
std::shared_ptr<GlobalShortcutManager> _shortcutManager;
|
||||||
std::shared_ptr<GlobalShortcutValue> _pushToTalk;
|
std::shared_ptr<GlobalShortcutValue> _pushToTalk;
|
||||||
base::Timer _pushToTalkCancelTimer;
|
base::Timer _pushToTalkCancelTimer;
|
||||||
|
@ -677,11 +681,6 @@ private:
|
||||||
bool _reloadedStaleCall = false;
|
bool _reloadedStaleCall = false;
|
||||||
int _rtmpVolume = 0;
|
int _rtmpVolume = 0;
|
||||||
|
|
||||||
std::unique_ptr<Webrtc::MediaDevices> _mediaDevices;
|
|
||||||
QString _audioInputId;
|
|
||||||
QString _audioOutputId;
|
|
||||||
QString _cameraInputId;
|
|
||||||
|
|
||||||
rpl::lifetime _lifetime;
|
rpl::lifetime _lifetime;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -250,8 +250,6 @@ void SettingsBox(
|
||||||
const auto weakBox = Ui::MakeWeak(box);
|
const auto weakBox = Ui::MakeWeak(box);
|
||||||
|
|
||||||
struct State {
|
struct State {
|
||||||
rpl::event_stream<QString> outputNameStream;
|
|
||||||
rpl::event_stream<QString> inputNameStream;
|
|
||||||
std::unique_ptr<Webrtc::AudioInputTester> micTester;
|
std::unique_ptr<Webrtc::AudioInputTester> micTester;
|
||||||
Ui::LevelMeter *micTestLevel = nullptr;
|
Ui::LevelMeter *micTestLevel = nullptr;
|
||||||
float micLevel = 0.;
|
float micLevel = 0.;
|
||||||
|
@ -295,42 +293,46 @@ void SettingsBox(
|
||||||
Ui::AddSkip(layout);
|
Ui::AddSkip(layout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto playbackIdWithFallback = Webrtc::DeviceIdValueWithFallback(
|
||||||
|
Core::App().settings().callPlaybackDeviceIdValue(),
|
||||||
|
Core::App().settings().playbackDeviceIdValue());
|
||||||
AddButtonWithLabel(
|
AddButtonWithLabel(
|
||||||
layout,
|
layout,
|
||||||
tr::lng_group_call_speakers(),
|
tr::lng_group_call_speakers(),
|
||||||
rpl::single(
|
PlaybackDeviceNameValue(rpl::duplicate(playbackIdWithFallback)),
|
||||||
CurrentAudioOutputName()
|
|
||||||
) | rpl::then(
|
|
||||||
state->outputNameStream.events()
|
|
||||||
),
|
|
||||||
st::groupCallSettingsButton
|
st::groupCallSettingsButton
|
||||||
)->addClickHandler([=] {
|
)->addClickHandler([=] {
|
||||||
box->getDelegate()->show(ChooseAudioOutputBox(crl::guard(box, [=](
|
box->getDelegate()->show(ChoosePlaybackDeviceBox(
|
||||||
const QString &id,
|
rpl::duplicate(playbackIdWithFallback),
|
||||||
const QString &name) {
|
crl::guard(box, [=](const QString &id) {
|
||||||
state->outputNameStream.fire_copy(name);
|
Core::App().settings().setCallPlaybackDeviceId(id);
|
||||||
}), &st::groupCallCheckbox, &st::groupCallRadio));
|
Core::App().saveSettingsDelayed();
|
||||||
|
}),
|
||||||
|
&st::groupCallCheckbox,
|
||||||
|
&st::groupCallRadio));
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!rtmp) {
|
if (!rtmp) {
|
||||||
|
auto captureIdWithFallback = Webrtc::DeviceIdValueWithFallback(
|
||||||
|
Core::App().settings().callCaptureDeviceIdValue(),
|
||||||
|
Core::App().settings().captureDeviceIdValue());
|
||||||
AddButtonWithLabel(
|
AddButtonWithLabel(
|
||||||
layout,
|
layout,
|
||||||
tr::lng_group_call_microphone(),
|
tr::lng_group_call_microphone(),
|
||||||
rpl::single(
|
CaptureDeviceNameValue(rpl::duplicate(captureIdWithFallback)),
|
||||||
CurrentAudioInputName()
|
|
||||||
) | rpl::then(
|
|
||||||
state->inputNameStream.events()
|
|
||||||
),
|
|
||||||
st::groupCallSettingsButton
|
st::groupCallSettingsButton
|
||||||
)->addClickHandler([=] {
|
)->addClickHandler([=] {
|
||||||
box->getDelegate()->show(ChooseAudioInputBox(crl::guard(box, [=](
|
box->getDelegate()->show(ChooseCaptureDeviceBox(
|
||||||
const QString &id,
|
rpl::duplicate(captureIdWithFallback),
|
||||||
const QString &name) {
|
crl::guard(box, [=](const QString &id) {
|
||||||
state->inputNameStream.fire_copy(name);
|
Core::App().settings().setCallCaptureDeviceId(id);
|
||||||
if (state->micTester) {
|
Core::App().saveSettingsDelayed();
|
||||||
state->micTester->setDeviceId(id);
|
if (state->micTester) {
|
||||||
}
|
state->micTester->setDeviceId(id);
|
||||||
}), &st::groupCallCheckbox, &st::groupCallRadio));
|
}
|
||||||
|
}),
|
||||||
|
&st::groupCallCheckbox,
|
||||||
|
&st::groupCallRadio));
|
||||||
});
|
});
|
||||||
|
|
||||||
state->micTestLevel = box->addRow(
|
state->micTestLevel = box->addRow(
|
||||||
|
@ -773,7 +775,7 @@ void SettingsBox(
|
||||||
crl::on_main(box, [=] {
|
crl::on_main(box, [=] {
|
||||||
state->micTester = std::make_unique<Webrtc::AudioInputTester>(
|
state->micTester = std::make_unique<Webrtc::AudioInputTester>(
|
||||||
Core::App().settings().callAudioBackend(),
|
Core::App().settings().callAudioBackend(),
|
||||||
Core::App().settings().callInputDeviceId());
|
Core::App().settings().callCaptureDeviceId());
|
||||||
state->levelUpdateTimer.callEach(kMicTestUpdateInterval);
|
state->levelUpdateTimer.callEach(kMicTestUpdateInterval);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -883,7 +885,7 @@ MicLevelTester::MicLevelTester(Fn<void()> show)
|
||||||
, _tester(
|
, _tester(
|
||||||
std::make_unique<Webrtc::AudioInputTester>(
|
std::make_unique<Webrtc::AudioInputTester>(
|
||||||
Core::App().settings().callAudioBackend(),
|
Core::App().settings().callAudioBackend(),
|
||||||
Core::App().settings().callInputDeviceId())) {
|
Core::App().settings().callCaptureDeviceId())) {
|
||||||
_timer.callEach(kMicrophoneTooltipCheckInterval);
|
_timer.callEach(kMicrophoneTooltipCheckInterval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -88,6 +88,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "storage/localstorage.h"
|
#include "storage/localstorage.h"
|
||||||
#include "payments/payments_checkout_process.h"
|
#include "payments/payments_checkout_process.h"
|
||||||
#include "export/export_manager.h"
|
#include "export/export_manager.h"
|
||||||
|
#include "webrtc/webrtc_environment.h"
|
||||||
#include "window/window_session_controller.h"
|
#include "window/window_session_controller.h"
|
||||||
#include "window/window_controller.h"
|
#include "window/window_controller.h"
|
||||||
#include "boxes/abstract_box.h"
|
#include "boxes/abstract_box.h"
|
||||||
|
@ -150,6 +151,7 @@ Application::Application()
|
||||||
, _private(std::make_unique<Private>())
|
, _private(std::make_unique<Private>())
|
||||||
, _platformIntegration(Platform::Integration::Create())
|
, _platformIntegration(Platform::Integration::Create())
|
||||||
, _batterySaving(std::make_unique<base::BatterySaving>())
|
, _batterySaving(std::make_unique<base::BatterySaving>())
|
||||||
|
, _mediaDevices(std::make_unique<Webrtc::Environment>())
|
||||||
, _databases(std::make_unique<Storage::Databases>())
|
, _databases(std::make_unique<Storage::Databases>())
|
||||||
, _animationsManager(std::make_unique<Ui::Animations::Manager>())
|
, _animationsManager(std::make_unique<Ui::Animations::Manager>())
|
||||||
, _clearEmojiImageLoaderTimer([=] { clearEmojiSourceImages(); })
|
, _clearEmojiImageLoaderTimer([=] { clearEmojiSourceImages(); })
|
||||||
|
|
|
@ -101,6 +101,10 @@ namespace Calls {
|
||||||
class Instance;
|
class Instance;
|
||||||
} // namespace Calls
|
} // namespace Calls
|
||||||
|
|
||||||
|
namespace Webrtc {
|
||||||
|
class Environment;
|
||||||
|
} // namespace Webrtc
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
|
|
||||||
struct LocalUrlHandler;
|
struct LocalUrlHandler;
|
||||||
|
@ -238,6 +242,9 @@ public:
|
||||||
[[nodiscard]] Media::Audio::Instance &audio() {
|
[[nodiscard]] Media::Audio::Instance &audio() {
|
||||||
return *_audio;
|
return *_audio;
|
||||||
}
|
}
|
||||||
|
[[nodiscard]] Webrtc::Environment &mediaDevices() {
|
||||||
|
return *_mediaDevices;
|
||||||
|
}
|
||||||
|
|
||||||
// Langpack and emoji keywords.
|
// Langpack and emoji keywords.
|
||||||
[[nodiscard]] Lang::Instance &langpack() {
|
[[nodiscard]] Lang::Instance &langpack() {
|
||||||
|
@ -383,6 +390,7 @@ private:
|
||||||
const std::unique_ptr<Private> _private;
|
const std::unique_ptr<Private> _private;
|
||||||
const std::unique_ptr<Platform::Integration> _platformIntegration;
|
const std::unique_ptr<Platform::Integration> _platformIntegration;
|
||||||
const std::unique_ptr<base::BatterySaving> _batterySaving;
|
const std::unique_ptr<base::BatterySaving> _batterySaving;
|
||||||
|
const std::unique_ptr<Webrtc::Environment> _mediaDevices;
|
||||||
|
|
||||||
const std::unique_ptr<Storage::Databases> _databases;
|
const std::unique_ptr<Storage::Databases> _databases;
|
||||||
const std::unique_ptr<Ui::Animations::Manager> _animationsManager;
|
const std::unique_ptr<Ui::Animations::Manager> _animationsManager;
|
||||||
|
|
|
@ -17,6 +17,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "ui/gl/gl_detection.h"
|
#include "ui/gl/gl_detection.h"
|
||||||
#include "ui/widgets/fields/input_field.h"
|
#include "ui/widgets/fields/input_field.h"
|
||||||
#include "webrtc/webrtc_create_adm.h"
|
#include "webrtc/webrtc_create_adm.h"
|
||||||
|
#include "webrtc/webrtc_device_common.h"
|
||||||
#include "window/section_widget.h"
|
#include "window/section_widget.h"
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
|
@ -159,8 +160,8 @@ QByteArray Settings::serialize() const {
|
||||||
+ Serialize::stringSize(_downloadPath.current())
|
+ Serialize::stringSize(_downloadPath.current())
|
||||||
+ Serialize::bytearraySize(_downloadPathBookmark)
|
+ Serialize::bytearraySize(_downloadPathBookmark)
|
||||||
+ sizeof(qint32) * 9
|
+ sizeof(qint32) * 9
|
||||||
+ Serialize::stringSize(_callOutputDeviceId)
|
+ Serialize::stringSize(_callPlaybackDeviceId.current())
|
||||||
+ Serialize::stringSize(_callInputDeviceId)
|
+ Serialize::stringSize(_callCaptureDeviceId.current())
|
||||||
+ sizeof(qint32) * 5;
|
+ sizeof(qint32) * 5;
|
||||||
for (const auto &[key, value] : _soundOverrides) {
|
for (const auto &[key, value] : _soundOverrides) {
|
||||||
size += Serialize::stringSize(key) + Serialize::stringSize(value);
|
size += Serialize::stringSize(key) + Serialize::stringSize(value);
|
||||||
|
@ -170,7 +171,7 @@ QByteArray Settings::serialize() const {
|
||||||
+ sizeof(qint32)
|
+ sizeof(qint32)
|
||||||
+ (_dictionariesEnabled.current().size() * sizeof(quint64))
|
+ (_dictionariesEnabled.current().size() * sizeof(quint64))
|
||||||
+ sizeof(qint32) * 12
|
+ sizeof(qint32) * 12
|
||||||
+ Serialize::stringSize(_callVideoInputDeviceId)
|
+ Serialize::stringSize(_cameraDeviceId.current())
|
||||||
+ sizeof(qint32) * 2
|
+ sizeof(qint32) * 2
|
||||||
+ Serialize::bytearraySize(_groupCallPushToTalkShortcut)
|
+ Serialize::bytearraySize(_groupCallPushToTalkShortcut)
|
||||||
+ sizeof(qint64)
|
+ sizeof(qint64)
|
||||||
|
@ -194,7 +195,7 @@ QByteArray Settings::serialize() const {
|
||||||
+ (_accountsOrder.size() * sizeof(quint64))
|
+ (_accountsOrder.size() * sizeof(quint64))
|
||||||
+ sizeof(qint32) * 7
|
+ sizeof(qint32) * 7
|
||||||
+ (skipLanguages.size() * sizeof(quint64))
|
+ (skipLanguages.size() * sizeof(quint64))
|
||||||
+ sizeof(qint32)
|
+ sizeof(qint32) * 2
|
||||||
+ sizeof(quint64)
|
+ sizeof(quint64)
|
||||||
+ sizeof(qint32) * 3
|
+ sizeof(qint32) * 3
|
||||||
+ Serialize::bytearraySize(mediaViewPosition)
|
+ Serialize::bytearraySize(mediaViewPosition)
|
||||||
|
@ -204,6 +205,9 @@ QByteArray Settings::serialize() const {
|
||||||
for (const auto &id : _recentEmojiSkip) {
|
for (const auto &id : _recentEmojiSkip) {
|
||||||
size += Serialize::stringSize(id);
|
size += Serialize::stringSize(id);
|
||||||
}
|
}
|
||||||
|
size += sizeof(qint32) * 2
|
||||||
|
+ Serialize::stringSize(_playbackDeviceId.current())
|
||||||
|
+ Serialize::stringSize(_captureDeviceId.current());
|
||||||
|
|
||||||
auto result = QByteArray();
|
auto result = QByteArray();
|
||||||
result.reserve(size);
|
result.reserve(size);
|
||||||
|
@ -228,8 +232,8 @@ QByteArray Settings::serialize() const {
|
||||||
<< qint32(_notificationsCount)
|
<< qint32(_notificationsCount)
|
||||||
<< static_cast<qint32>(_notificationsCorner)
|
<< static_cast<qint32>(_notificationsCorner)
|
||||||
<< qint32(_autoLock)
|
<< qint32(_autoLock)
|
||||||
<< _callOutputDeviceId
|
<< _callPlaybackDeviceId.current()
|
||||||
<< _callInputDeviceId
|
<< _callCaptureDeviceId.current()
|
||||||
<< qint32(_callOutputVolume)
|
<< qint32(_callOutputVolume)
|
||||||
<< qint32(_callInputVolume)
|
<< qint32(_callInputVolume)
|
||||||
<< qint32(_callAudioDuckingEnabled ? 1 : 0)
|
<< qint32(_callAudioDuckingEnabled ? 1 : 0)
|
||||||
|
@ -273,7 +277,7 @@ QByteArray Settings::serialize() const {
|
||||||
<< qint32(_notifyFromAll ? 1 : 0)
|
<< qint32(_notifyFromAll ? 1 : 0)
|
||||||
<< qint32(_nativeWindowFrame.current() ? 1 : 0)
|
<< qint32(_nativeWindowFrame.current() ? 1 : 0)
|
||||||
<< qint32(_systemDarkModeEnabled.current() ? 1 : 0)
|
<< qint32(_systemDarkModeEnabled.current() ? 1 : 0)
|
||||||
<< _callVideoInputDeviceId
|
<< _cameraDeviceId.current()
|
||||||
<< qint32(_ipRevealWarning ? 1 : 0)
|
<< qint32(_ipRevealWarning ? 1 : 0)
|
||||||
<< qint32(_groupCallPushToTalk ? 1 : 0)
|
<< qint32(_groupCallPushToTalk ? 1 : 0)
|
||||||
<< _groupCallPushToTalkShortcut
|
<< _groupCallPushToTalkShortcut
|
||||||
|
@ -327,9 +331,7 @@ QByteArray Settings::serialize() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
stream
|
stream
|
||||||
<< qint32(_rememberedDeleteMessageOnlyForYou ? 1 : 0);
|
<< qint32(_rememberedDeleteMessageOnlyForYou ? 1 : 0)
|
||||||
|
|
||||||
stream
|
|
||||||
<< qint32(_translateChatEnabled.current() ? 1 : 0)
|
<< qint32(_translateChatEnabled.current() ? 1 : 0)
|
||||||
<< quint64(QLocale::Language(_translateToRaw.current()))
|
<< quint64(QLocale::Language(_translateToRaw.current()))
|
||||||
<< qint32(_windowTitleContent.current().hideChatName ? 1 : 0)
|
<< qint32(_windowTitleContent.current().hideChatName ? 1 : 0)
|
||||||
|
@ -339,14 +341,18 @@ QByteArray Settings::serialize() const {
|
||||||
<< qint32(_ignoreBatterySaving.current() ? 1 : 0)
|
<< qint32(_ignoreBatterySaving.current() ? 1 : 0)
|
||||||
<< quint64(_macRoundIconDigest.value_or(0))
|
<< quint64(_macRoundIconDigest.value_or(0))
|
||||||
<< qint32(_storiesClickTooltipHidden.current() ? 1 : 0)
|
<< qint32(_storiesClickTooltipHidden.current() ? 1 : 0)
|
||||||
<< qint32(_recentEmojiSkip.size())
|
<< qint32(_recentEmojiSkip.size());
|
||||||
<< qint32(_ttlVoiceClickTooltipHidden.current() ? 1 : 0);
|
|
||||||
for (const auto &id : _recentEmojiSkip) {
|
for (const auto &id : _recentEmojiSkip) {
|
||||||
stream << id;
|
stream << id;
|
||||||
}
|
}
|
||||||
stream
|
stream
|
||||||
<< qint32(_trayIconMonochrome.current() ? 1 : 0);
|
<< qint32(_trayIconMonochrome.current() ? 1 : 0)
|
||||||
|
<< qint32(_ttlVoiceClickTooltipHidden.current() ? 1 : 0)
|
||||||
|
<< _playbackDeviceId.current()
|
||||||
|
<< _captureDeviceId.current();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ensures(result.size() == size);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,9 +381,11 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
|
||||||
qint32 notificationsCount = _notificationsCount;
|
qint32 notificationsCount = _notificationsCount;
|
||||||
qint32 notificationsCorner = static_cast<qint32>(_notificationsCorner);
|
qint32 notificationsCorner = static_cast<qint32>(_notificationsCorner);
|
||||||
qint32 autoLock = _autoLock;
|
qint32 autoLock = _autoLock;
|
||||||
QString callOutputDeviceId = _callOutputDeviceId;
|
QString playbackDeviceId = _playbackDeviceId.current();
|
||||||
QString callInputDeviceId = _callInputDeviceId;
|
QString captureDeviceId = _captureDeviceId.current();
|
||||||
QString callVideoInputDeviceId = _callVideoInputDeviceId;
|
QString cameraDeviceId = _cameraDeviceId.current();
|
||||||
|
QString callPlaybackDeviceId = _callPlaybackDeviceId.current();
|
||||||
|
QString callCaptureDeviceId = _callCaptureDeviceId.current();
|
||||||
qint32 callOutputVolume = _callOutputVolume;
|
qint32 callOutputVolume = _callOutputVolume;
|
||||||
qint32 callInputVolume = _callInputVolume;
|
qint32 callInputVolume = _callInputVolume;
|
||||||
qint32 callAudioDuckingEnabled = _callAudioDuckingEnabled ? 1 : 0;
|
qint32 callAudioDuckingEnabled = _callAudioDuckingEnabled ? 1 : 0;
|
||||||
|
@ -475,8 +483,8 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
|
||||||
>> notificationsCount
|
>> notificationsCount
|
||||||
>> notificationsCorner
|
>> notificationsCorner
|
||||||
>> autoLock
|
>> autoLock
|
||||||
>> callOutputDeviceId
|
>> callPlaybackDeviceId
|
||||||
>> callInputDeviceId
|
>> callCaptureDeviceId
|
||||||
>> callOutputVolume
|
>> callOutputVolume
|
||||||
>> callInputVolume
|
>> callInputVolume
|
||||||
>> callAudioDuckingEnabled
|
>> callAudioDuckingEnabled
|
||||||
|
@ -539,7 +547,7 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
|
||||||
stream >> systemDarkModeEnabled;
|
stream >> systemDarkModeEnabled;
|
||||||
}
|
}
|
||||||
if (!stream.atEnd()) {
|
if (!stream.atEnd()) {
|
||||||
stream >> callVideoInputDeviceId;
|
stream >> cameraDeviceId;
|
||||||
}
|
}
|
||||||
if (!stream.atEnd()) {
|
if (!stream.atEnd()) {
|
||||||
stream >> ipRevealWarning;
|
stream >> ipRevealWarning;
|
||||||
|
@ -666,7 +674,8 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
if (!stream.atEnd()) {
|
||||||
stream >> rememberedDeleteMessageOnlyForYou;
|
stream >> rememberedDeleteMessageOnlyForYou;
|
||||||
}
|
}
|
||||||
if (!stream.atEnd()) {
|
if (!stream.atEnd()) {
|
||||||
|
@ -714,6 +723,11 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
|
||||||
if (!stream.atEnd()) {
|
if (!stream.atEnd()) {
|
||||||
stream >> ttlVoiceClickTooltipHidden;
|
stream >> ttlVoiceClickTooltipHidden;
|
||||||
}
|
}
|
||||||
|
if (!stream.atEnd()) {
|
||||||
|
stream
|
||||||
|
>> playbackDeviceId
|
||||||
|
>> captureDeviceId;
|
||||||
|
}
|
||||||
if (stream.status() != QDataStream::Ok) {
|
if (stream.status() != QDataStream::Ok) {
|
||||||
LOG(("App Error: "
|
LOG(("App Error: "
|
||||||
"Bad data for Core::Settings::constructFromSerialized()"));
|
"Bad data for Core::Settings::constructFromSerialized()"));
|
||||||
|
@ -757,9 +771,12 @@ void Settings::addFromSerialized(const QByteArray &serialized) {
|
||||||
_countUnreadMessages = (countUnreadMessages == 1);
|
_countUnreadMessages = (countUnreadMessages == 1);
|
||||||
_notifyAboutPinned = (notifyAboutPinned == 1);
|
_notifyAboutPinned = (notifyAboutPinned == 1);
|
||||||
_autoLock = autoLock;
|
_autoLock = autoLock;
|
||||||
_callOutputDeviceId = callOutputDeviceId;
|
_playbackDeviceId = playbackDeviceId;
|
||||||
_callInputDeviceId = callInputDeviceId;
|
_captureDeviceId = captureDeviceId;
|
||||||
_callVideoInputDeviceId = callVideoInputDeviceId;
|
const auto kOldDefault = u"default"_q;
|
||||||
|
_cameraDeviceId = cameraDeviceId;
|
||||||
|
_callPlaybackDeviceId = callPlaybackDeviceId;
|
||||||
|
_callCaptureDeviceId = callCaptureDeviceId;
|
||||||
_callOutputVolume = callOutputVolume;
|
_callOutputVolume = callOutputVolume;
|
||||||
_callInputVolume = callInputVolume;
|
_callInputVolume = callInputVolume;
|
||||||
_callAudioDuckingEnabled = (callAudioDuckingEnabled == 1);
|
_callAudioDuckingEnabled = (callAudioDuckingEnabled == 1);
|
||||||
|
@ -1216,9 +1233,11 @@ void Settings::resetOnLastLogout() {
|
||||||
_notifyAboutPinned = true;
|
_notifyAboutPinned = true;
|
||||||
//_autoLock = 3600;
|
//_autoLock = 3600;
|
||||||
|
|
||||||
//_callOutputDeviceId = u"default"_q;
|
//_playbackDeviceId = QString();
|
||||||
//_callInputDeviceId = u"default"_q;
|
//_captureDeviceId = QString();
|
||||||
//_callVideoInputDeviceId = u"default"_q;
|
//_cameraDeviceId = QString();
|
||||||
|
//_callPlaybackDeviceId = QString();
|
||||||
|
//_callCaptureDeviceId = QString();
|
||||||
//_callOutputVolume = 100;
|
//_callOutputVolume = 100;
|
||||||
//_callInputVolume = 100;
|
//_callInputVolume = 100;
|
||||||
//_callAudioDuckingEnabled = true;
|
//_callAudioDuckingEnabled = true;
|
||||||
|
|
|
@ -263,30 +263,68 @@ public:
|
||||||
void setAutoLock(int value) {
|
void setAutoLock(int value) {
|
||||||
_autoLock = value;
|
_autoLock = value;
|
||||||
}
|
}
|
||||||
[[nodiscard]] QString callOutputDeviceId() const {
|
|
||||||
return _callOutputDeviceId.isEmpty()
|
[[nodiscard]] QString playbackDeviceId() const {
|
||||||
? u"default"_q
|
return _playbackDeviceId.current();
|
||||||
: _callOutputDeviceId;
|
|
||||||
}
|
}
|
||||||
void setCallOutputDeviceId(const QString &value) {
|
[[nodiscard]] rpl::producer<QString> playbackDeviceIdChanges() const {
|
||||||
_callOutputDeviceId = value;
|
return _playbackDeviceId.changes();
|
||||||
}
|
}
|
||||||
[[nodiscard]] QString callInputDeviceId() const {
|
[[nodiscard]] rpl::producer<QString> playbackDeviceIdValue() const {
|
||||||
return _callInputDeviceId.isEmpty()
|
return _playbackDeviceId.value();
|
||||||
? u"default"_q
|
|
||||||
: _callInputDeviceId;
|
|
||||||
}
|
}
|
||||||
void setCallInputDeviceId(const QString &value) {
|
void setPlaybackDeviceId(const QString &value) {
|
||||||
_callInputDeviceId = value;
|
_playbackDeviceId = value;
|
||||||
}
|
}
|
||||||
[[nodiscard]] QString callVideoInputDeviceId() const {
|
[[nodiscard]] QString captureDeviceId() const {
|
||||||
return _callVideoInputDeviceId.isEmpty()
|
return _captureDeviceId.current();
|
||||||
? u"default"_q
|
|
||||||
: _callVideoInputDeviceId;
|
|
||||||
}
|
}
|
||||||
void setCallVideoInputDeviceId(const QString &value) {
|
[[nodiscard]] rpl::producer<QString> captureDeviceIdChanges() const {
|
||||||
_callVideoInputDeviceId = value;
|
return _captureDeviceId.changes();
|
||||||
}
|
}
|
||||||
|
[[nodiscard]] rpl::producer<QString> captureDeviceIdValue() const {
|
||||||
|
return _captureDeviceId.value();
|
||||||
|
}
|
||||||
|
void setCaptureDeviceId(const QString &value) {
|
||||||
|
_captureDeviceId = value;
|
||||||
|
}
|
||||||
|
[[nodiscard]] QString cameraDeviceId() const {
|
||||||
|
return _cameraDeviceId.current();
|
||||||
|
}
|
||||||
|
[[nodiscard]] rpl::producer<QString> cameraDeviceIdChanges() const {
|
||||||
|
return _cameraDeviceId.changes();
|
||||||
|
}
|
||||||
|
[[nodiscard]] rpl::producer<QString> cameraDeviceIdValue() const {
|
||||||
|
return _cameraDeviceId.value();
|
||||||
|
}
|
||||||
|
void setCameraDeviceId(const QString &value) {
|
||||||
|
_cameraDeviceId = value;
|
||||||
|
}
|
||||||
|
[[nodiscard]] QString callPlaybackDeviceId() const {
|
||||||
|
return _callPlaybackDeviceId.current();
|
||||||
|
}
|
||||||
|
[[nodiscard]] rpl::producer<QString> callPlaybackDeviceIdChanges() const {
|
||||||
|
return _callPlaybackDeviceId.changes();
|
||||||
|
}
|
||||||
|
[[nodiscard]] rpl::producer<QString> callPlaybackDeviceIdValue() const {
|
||||||
|
return _callPlaybackDeviceId.value();
|
||||||
|
}
|
||||||
|
void setCallPlaybackDeviceId(const QString &value) {
|
||||||
|
_callPlaybackDeviceId = value;
|
||||||
|
}
|
||||||
|
[[nodiscard]] QString callCaptureDeviceId() const {
|
||||||
|
return _callCaptureDeviceId.current();
|
||||||
|
}
|
||||||
|
[[nodiscard]] rpl::producer<QString> callCaptureDeviceIdChanges() const {
|
||||||
|
return _callCaptureDeviceId.changes();
|
||||||
|
}
|
||||||
|
[[nodiscard]] rpl::producer<QString> callCaptureDeviceIdValue() const {
|
||||||
|
return _callCaptureDeviceId.value();
|
||||||
|
}
|
||||||
|
void setCallCaptureDeviceId(const QString &value) {
|
||||||
|
_callCaptureDeviceId = value;
|
||||||
|
}
|
||||||
|
|
||||||
[[nodiscard]] int callOutputVolume() const {
|
[[nodiscard]] int callOutputVolume() const {
|
||||||
return _callOutputVolume;
|
return _callOutputVolume;
|
||||||
}
|
}
|
||||||
|
@ -875,9 +913,11 @@ private:
|
||||||
bool _countUnreadMessages = true;
|
bool _countUnreadMessages = true;
|
||||||
rpl::variable<bool> _notifyAboutPinned = true;
|
rpl::variable<bool> _notifyAboutPinned = true;
|
||||||
int _autoLock = 3600;
|
int _autoLock = 3600;
|
||||||
QString _callOutputDeviceId = u"default"_q;
|
rpl::variable<QString> _playbackDeviceId;
|
||||||
QString _callInputDeviceId = u"default"_q;
|
rpl::variable<QString> _captureDeviceId;
|
||||||
QString _callVideoInputDeviceId = u"default"_q;
|
rpl::variable<QString> _cameraDeviceId;
|
||||||
|
rpl::variable<QString> _callPlaybackDeviceId;
|
||||||
|
rpl::variable<QString> _callCaptureDeviceId;
|
||||||
int _callOutputVolume = 100;
|
int _callOutputVolume = 100;
|
||||||
int _callInputVolume = 100;
|
int _callInputVolume = 100;
|
||||||
bool _callAudioDuckingEnabled = true;
|
bool _callAudioDuckingEnabled = true;
|
||||||
|
|
|
@ -141,7 +141,8 @@ void DestroyPlaybackDevice() {
|
||||||
bool CreatePlaybackDevice() {
|
bool CreatePlaybackDevice() {
|
||||||
if (AudioDevice) return true;
|
if (AudioDevice) return true;
|
||||||
|
|
||||||
AudioDevice = alcOpenDevice(nullptr);
|
const auto id = Current().deviceId().toStdString();
|
||||||
|
AudioDevice = alcOpenDevice(id.c_str());
|
||||||
if (!AudioDevice) {
|
if (!AudioDevice) {
|
||||||
LOG(("Audio Error: Could not create default playback device, enumerating.."));
|
LOG(("Audio Error: Could not create default playback device, enumerating.."));
|
||||||
EnumeratePlaybackDevices();
|
EnumeratePlaybackDevices();
|
||||||
|
@ -200,12 +201,12 @@ void Start(not_null<Instance*> instance) {
|
||||||
|
|
||||||
MixerInstance = new Player::Mixer(instance);
|
MixerInstance = new Player::Mixer(instance);
|
||||||
|
|
||||||
Platform::Audio::Init();
|
//Platform::Audio::Init();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Thread: Main.
|
// Thread: Main.
|
||||||
void Finish(not_null<Instance*> instance) {
|
void Finish(not_null<Instance*> instance) {
|
||||||
Platform::Audio::DeInit();
|
//Platform::Audio::DeInit();
|
||||||
|
|
||||||
// MixerInstance variable should be modified under AudioMutex protection.
|
// MixerInstance variable should be modified under AudioMutex protection.
|
||||||
// So it is modified in the ~Mixer() destructor after all tracks are cleared.
|
// So it is modified in the ~Mixer() destructor after all tracks are cleared.
|
||||||
|
|
|
@ -10,6 +10,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "media/audio/media_audio_ffmpeg_loader.h"
|
#include "media/audio/media_audio_ffmpeg_loader.h"
|
||||||
#include "media/audio/media_audio.h"
|
#include "media/audio/media_audio.h"
|
||||||
#include "core/application.h"
|
#include "core/application.h"
|
||||||
|
#include "core/core_settings.h"
|
||||||
#include "core/file_location.h"
|
#include "core/file_location.h"
|
||||||
|
|
||||||
#include <al.h>
|
#include <al.h>
|
||||||
|
@ -242,7 +243,12 @@ Track::~Track() {
|
||||||
_instance->unregisterTrack(this);
|
_instance->unregisterTrack(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
Instance::Instance() {
|
Instance::Instance()
|
||||||
|
: _playbackDeviceId(
|
||||||
|
&Core::App().mediaDevices(),
|
||||||
|
Webrtc::DeviceType::Playback,
|
||||||
|
Webrtc::DeviceIdOrDefault(
|
||||||
|
Core::App().settings().playbackDeviceIdValue())) {
|
||||||
_updateTimer.setCallback([this] {
|
_updateTimer.setCallback([this] {
|
||||||
auto hasActive = false;
|
auto hasActive = false;
|
||||||
for (auto track : _tracks) {
|
for (auto track : _tracks) {
|
||||||
|
@ -260,6 +266,15 @@ Instance::Instance() {
|
||||||
_detachFromDeviceForce = false;
|
_detachFromDeviceForce = false;
|
||||||
Player::internal::DetachFromDevice(this);
|
Player::internal::DetachFromDevice(this);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
_playbackDeviceId.changes() | rpl::start_with_next([=] {
|
||||||
|
_detachFromDeviceForce = false;
|
||||||
|
Player::internal::DetachFromDevice(this);
|
||||||
|
}, _lifetime);
|
||||||
|
}
|
||||||
|
|
||||||
|
QString Instance::deviceId() const {
|
||||||
|
return _playbackDeviceId.current();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<Track> Instance::createTrack() {
|
std::unique_ptr<Track> Instance::createTrack() {
|
||||||
|
|
|
@ -9,6 +9,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
|
|
||||||
#include "base/timer.h"
|
#include "base/timer.h"
|
||||||
#include "base/bytes.h"
|
#include "base/bytes.h"
|
||||||
|
#include "webrtc/webrtc_device_id.h"
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
class FileLocation;
|
class FileLocation;
|
||||||
|
@ -94,7 +95,9 @@ public:
|
||||||
// Thread: Main.
|
// Thread: Main.
|
||||||
Instance();
|
Instance();
|
||||||
|
|
||||||
std::unique_ptr<Track> createTrack();
|
[[nodiscard]] QString deviceId() const;
|
||||||
|
|
||||||
|
[[nodiscard]] std::unique_ptr<Track> createTrack();
|
||||||
|
|
||||||
void detachTracks();
|
void detachTracks();
|
||||||
void reattachTracks();
|
void reattachTracks();
|
||||||
|
@ -115,15 +118,18 @@ private:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::set<Track*> _tracks;
|
std::set<Track*> _tracks;
|
||||||
|
Webrtc::DeviceId _playbackDeviceId;
|
||||||
|
|
||||||
base::Timer _updateTimer;
|
base::Timer _updateTimer;
|
||||||
|
|
||||||
base::Timer _detachFromDeviceTimer;
|
base::Timer _detachFromDeviceTimer;
|
||||||
bool _detachFromDeviceForce = false;
|
bool _detachFromDeviceForce = false;
|
||||||
|
|
||||||
|
rpl::lifetime _lifetime;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Instance &Current();
|
[[nodiscard]] Instance &Current();
|
||||||
|
|
||||||
} // namespace Audio
|
} // namespace Audio
|
||||||
} // namespace Media
|
} // namespace Media
|
||||||
|
|
|
@ -29,6 +29,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "calls/calls_video_bubble.h"
|
#include "calls/calls_video_bubble.h"
|
||||||
#include "apiwrap.h"
|
#include "apiwrap.h"
|
||||||
#include "api/api_authorizations.h"
|
#include "api/api_authorizations.h"
|
||||||
|
#include "webrtc/webrtc_environment.h"
|
||||||
#include "webrtc/webrtc_media_devices.h"
|
#include "webrtc/webrtc_media_devices.h"
|
||||||
#include "webrtc/webrtc_video_track.h"
|
#include "webrtc/webrtc_video_track.h"
|
||||||
#include "webrtc/webrtc_audio_input_tester.h"
|
#include "webrtc/webrtc_audio_input_tester.h"
|
||||||
|
@ -41,6 +42,18 @@ namespace {
|
||||||
|
|
||||||
using namespace Webrtc;
|
using namespace Webrtc;
|
||||||
|
|
||||||
|
[[nodiscard]] rpl::producer<QString> DeviceNameValue(
|
||||||
|
DeviceType type,
|
||||||
|
rpl::producer<QString> id) {
|
||||||
|
return std::move(id) | rpl::map([type](const QString &id) {
|
||||||
|
const auto list = Core::App().mediaDevices().devices(type);
|
||||||
|
const auto i = ranges::find(list, id, &DeviceInfo::id);
|
||||||
|
return (i != end(list))
|
||||||
|
? i->name
|
||||||
|
: tr::lng_settings_call_device_default(tr::now);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Calls::Calls(
|
Calls::Calls(
|
||||||
|
@ -86,7 +99,7 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
|
||||||
const auto cameras = GetVideoInputList();
|
const auto cameras = GetVideoInputList();
|
||||||
const auto i = ranges::find(
|
const auto i = ranges::find(
|
||||||
cameras,
|
cameras,
|
||||||
Core::App().settings().callVideoInputDeviceId(),
|
Core::App().settings().cameraDeviceId(),
|
||||||
&VideoInput::id);
|
&VideoInput::id);
|
||||||
return (i != end(cameras))
|
return (i != end(cameras))
|
||||||
? i->name
|
? i->name
|
||||||
|
@ -111,7 +124,7 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
|
||||||
) | ranges::to_vector;
|
) | ranges::to_vector;
|
||||||
const auto i = ranges::find(
|
const auto i = ranges::find(
|
||||||
devices,
|
devices,
|
||||||
Core::App().settings().callVideoInputDeviceId(),
|
Core::App().settings().cameraDeviceId(),
|
||||||
&VideoInput::id);
|
&VideoInput::id);
|
||||||
const auto currentOption = (i != end(devices))
|
const auto currentOption = (i != end(devices))
|
||||||
? int(i - begin(devices) + 1)
|
? int(i - begin(devices) + 1)
|
||||||
|
@ -120,14 +133,11 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
|
||||||
cameraNameStream->fire_copy(options[option]);
|
cameraNameStream->fire_copy(options[option]);
|
||||||
const auto deviceId = option
|
const auto deviceId = option
|
||||||
? devices[option - 1].id
|
? devices[option - 1].id
|
||||||
: "default";
|
: kDefaultDeviceId;
|
||||||
if (saveToSettings) {
|
if (saveToSettings) {
|
||||||
Core::App().settings().setCallVideoInputDeviceId(deviceId);
|
Core::App().settings().setCameraDeviceId(deviceId);
|
||||||
Core::App().saveSettingsDelayed();
|
Core::App().saveSettingsDelayed();
|
||||||
}
|
}
|
||||||
if (const auto call = Core::App().calls().currentCall()) {
|
|
||||||
call->setCurrentCameraDevice(deviceId);
|
|
||||||
}
|
|
||||||
if (*capturerOwner) {
|
if (*capturerOwner) {
|
||||||
(*capturerOwner)->switchToDevice(
|
(*capturerOwner)->switchToDevice(
|
||||||
deviceId.toStdString(),
|
deviceId.toStdString(),
|
||||||
|
@ -186,7 +196,7 @@ Webrtc::VideoTrack *Calls::AddCameraSubsection(
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
*capturerOwner = Core::App().calls().getVideoCapture(
|
*capturerOwner = Core::App().calls().getVideoCapture(
|
||||||
Core::App().settings().callVideoInputDeviceId(),
|
Core::App().settings().cameraDeviceId(),
|
||||||
false);
|
false);
|
||||||
(*capturerOwner)->setPreferredAspectRatio(0.);
|
(*capturerOwner)->setPreferredAspectRatio(0.);
|
||||||
track->setState(VideoState::Active);
|
track->setState(VideoState::Active);
|
||||||
|
@ -220,54 +230,58 @@ void Calls::sectionSaveChanges(FnMut<void()> done) {
|
||||||
void Calls::setupContent() {
|
void Calls::setupContent() {
|
||||||
const auto content = Ui::CreateChild<Ui::VerticalLayout>(this);
|
const auto content = Ui::CreateChild<Ui::VerticalLayout>(this);
|
||||||
|
|
||||||
if (!GetVideoInputList().empty()) {
|
|
||||||
Ui::AddSkip(content);
|
|
||||||
Ui::AddSubsectionTitle(content, tr::lng_settings_call_camera());
|
|
||||||
AddCameraSubsection(_controller->uiShow(), content, true);
|
|
||||||
Ui::AddSkip(content);
|
|
||||||
Ui::AddDivider(content);
|
|
||||||
}
|
|
||||||
Ui::AddSkip(content);
|
Ui::AddSkip(content);
|
||||||
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_output());
|
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_output());
|
||||||
|
|
||||||
|
//auto playbackIdWithFallback = DeviceIdValueWithFallback(
|
||||||
|
// Core::App().settings().callPlaybackDeviceIdValue(),
|
||||||
|
// Core::App().settings().playbackDeviceIdValue());
|
||||||
|
auto playbackIdWithFallback = [] {
|
||||||
|
return DeviceIdOrDefault(
|
||||||
|
Core::App().settings().playbackDeviceIdValue());
|
||||||
|
};
|
||||||
AddButtonWithLabel(
|
AddButtonWithLabel(
|
||||||
content,
|
content,
|
||||||
tr::lng_settings_call_output_device(),
|
tr::lng_settings_call_output_device(),
|
||||||
rpl::single(
|
PlaybackDeviceNameValue(playbackIdWithFallback()),
|
||||||
CurrentAudioOutputName()
|
|
||||||
) | rpl::then(
|
|
||||||
_outputNameStream.events()
|
|
||||||
),
|
|
||||||
st::settingsButtonNoIcon
|
st::settingsButtonNoIcon
|
||||||
)->addClickHandler([=] {
|
)->addClickHandler([=] {
|
||||||
_controller->show(ChooseAudioOutputBox(crl::guard(this, [=](
|
_controller->show(ChoosePlaybackDeviceBox(
|
||||||
const QString &id,
|
playbackIdWithFallback(),
|
||||||
const QString &name) {
|
crl::guard(this, [=](const QString &id) {
|
||||||
_outputNameStream.fire_copy(name);
|
//Core::App().settings().setCallPlaybackDeviceId(id);
|
||||||
})));
|
Core::App().settings().setPlaybackDeviceId(id);
|
||||||
|
Core::App().saveSettingsDelayed();
|
||||||
|
})));
|
||||||
});
|
});
|
||||||
|
|
||||||
Ui::AddSkip(content);
|
Ui::AddSkip(content);
|
||||||
Ui::AddDivider(content);
|
Ui::AddDivider(content);
|
||||||
Ui::AddSkip(content);
|
Ui::AddSkip(content);
|
||||||
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_input());
|
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_input());
|
||||||
|
//auto captureIdWithFallback = DeviceIdValueWithFallback(
|
||||||
|
// Core::App().settings().callCaptureDeviceIdValue(),
|
||||||
|
// Core::App().settings().captureDeviceIdValue());
|
||||||
|
auto captureIdWithFallback = [] {
|
||||||
|
return DeviceIdOrDefault(
|
||||||
|
Core::App().settings().captureDeviceIdValue());
|
||||||
|
};
|
||||||
AddButtonWithLabel(
|
AddButtonWithLabel(
|
||||||
content,
|
content,
|
||||||
tr::lng_settings_call_input_device(),
|
tr::lng_settings_call_input_device(),
|
||||||
rpl::single(
|
CaptureDeviceNameValue(captureIdWithFallback()),
|
||||||
CurrentAudioInputName()
|
|
||||||
) | rpl::then(
|
|
||||||
_inputNameStream.events()
|
|
||||||
),
|
|
||||||
st::settingsButtonNoIcon
|
st::settingsButtonNoIcon
|
||||||
)->addClickHandler([=] {
|
)->addClickHandler([=] {
|
||||||
_controller->show(ChooseAudioInputBox(crl::guard(this, [=](
|
_controller->show(ChooseCaptureDeviceBox(
|
||||||
const QString &id,
|
captureIdWithFallback(),
|
||||||
const QString &name) {
|
crl::guard(this, [=](const QString &id) {
|
||||||
_inputNameStream.fire_copy(name);
|
//Core::App().settings().setCallCaptureDeviceId(id);
|
||||||
if (_micTester) {
|
Core::App().settings().setCaptureDeviceId(id);
|
||||||
_micTester->setDeviceId(id);
|
Core::App().saveSettingsDelayed();
|
||||||
}
|
if (_micTester) {
|
||||||
})));
|
_micTester->setDeviceId(id);
|
||||||
|
}
|
||||||
|
})));
|
||||||
});
|
});
|
||||||
|
|
||||||
_micTestLevel = content->add(
|
_micTestLevel = content->add(
|
||||||
|
@ -287,6 +301,15 @@ void Calls::setupContent() {
|
||||||
|
|
||||||
Ui::AddSkip(content);
|
Ui::AddSkip(content);
|
||||||
Ui::AddDivider(content);
|
Ui::AddDivider(content);
|
||||||
|
|
||||||
|
if (!GetVideoInputList().empty()) {
|
||||||
|
Ui::AddSkip(content);
|
||||||
|
Ui::AddSubsectionTitle(content, tr::lng_settings_call_camera());
|
||||||
|
AddCameraSubsection(_controller->uiShow(), content, true);
|
||||||
|
Ui::AddSkip(content);
|
||||||
|
Ui::AddDivider(content);
|
||||||
|
}
|
||||||
|
|
||||||
Ui::AddSkip(content);
|
Ui::AddSkip(content);
|
||||||
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_other());
|
Ui::AddSubsectionTitle(content, tr::lng_settings_call_section_other());
|
||||||
|
|
||||||
|
@ -310,8 +333,8 @@ void Calls::setupContent() {
|
||||||
tr::lng_settings_call_open_system_prefs(),
|
tr::lng_settings_call_open_system_prefs(),
|
||||||
st::settingsButtonNoIcon
|
st::settingsButtonNoIcon
|
||||||
))->addClickHandler([=] {
|
))->addClickHandler([=] {
|
||||||
const auto opened = Platform::OpenSystemSettings(
|
using namespace ::Platform;
|
||||||
Platform::SystemSettingsType::Audio);
|
const auto opened = OpenSystemSettings(SystemSettingsType::Audio);
|
||||||
if (!opened) {
|
if (!opened) {
|
||||||
_controller->show(
|
_controller->show(
|
||||||
Ui::MakeInformBox(tr::lng_linux_no_audio_prefs()));
|
Ui::MakeInformBox(tr::lng_linux_no_audio_prefs()));
|
||||||
|
@ -324,26 +347,27 @@ void Calls::setupContent() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Calls::requestPermissionAndStartTestingMicrophone() {
|
void Calls::requestPermissionAndStartTestingMicrophone() {
|
||||||
const auto status = Platform::GetPermissionStatus(
|
using namespace ::Platform;
|
||||||
Platform::PermissionType::Microphone);
|
const auto status = GetPermissionStatus(
|
||||||
if (status == Platform::PermissionStatus::Granted) {
|
PermissionType::Microphone);
|
||||||
|
if (status == PermissionStatus::Granted) {
|
||||||
startTestingMicrophone();
|
startTestingMicrophone();
|
||||||
} else if (status == Platform::PermissionStatus::CanRequest) {
|
} else if (status == PermissionStatus::CanRequest) {
|
||||||
const auto startTestingChecked = crl::guard(this, [=](
|
const auto startTestingChecked = crl::guard(this, [=](
|
||||||
Platform::PermissionStatus status) {
|
PermissionStatus status) {
|
||||||
if (status == Platform::PermissionStatus::Granted) {
|
if (status == PermissionStatus::Granted) {
|
||||||
crl::on_main(crl::guard(this, [=] {
|
crl::on_main(crl::guard(this, [=] {
|
||||||
startTestingMicrophone();
|
startTestingMicrophone();
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
Platform::RequestPermission(
|
RequestPermission(
|
||||||
Platform::PermissionType::Microphone,
|
PermissionType::Microphone,
|
||||||
startTestingChecked);
|
startTestingChecked);
|
||||||
} else {
|
} else {
|
||||||
const auto showSystemSettings = [controller = _controller] {
|
const auto showSystemSettings = [controller = _controller] {
|
||||||
Platform::OpenSystemSettingsForPermission(
|
OpenSystemSettingsForPermission(
|
||||||
Platform::PermissionType::Microphone);
|
PermissionType::Microphone);
|
||||||
controller->hideLayer();
|
controller->hideLayer();
|
||||||
};
|
};
|
||||||
_controller->show(Ui::MakeConfirmBox({
|
_controller->show(Ui::MakeConfirmBox({
|
||||||
|
@ -358,135 +382,215 @@ void Calls::startTestingMicrophone() {
|
||||||
_levelUpdateTimer.callEach(kMicTestUpdateInterval);
|
_levelUpdateTimer.callEach(kMicTestUpdateInterval);
|
||||||
_micTester = std::make_unique<AudioInputTester>(
|
_micTester = std::make_unique<AudioInputTester>(
|
||||||
Core::App().settings().callAudioBackend(),
|
Core::App().settings().callAudioBackend(),
|
||||||
Core::App().settings().callInputDeviceId());
|
Core::App().settings().callCaptureDeviceId());
|
||||||
}
|
}
|
||||||
|
|
||||||
QString CurrentAudioOutputName() {
|
rpl::producer<QString> PlaybackDeviceNameValue(rpl::producer<QString> id) {
|
||||||
const auto &settings = Core::App().settings();
|
return DeviceNameValue(DeviceType::Playback, std::move(id));
|
||||||
const auto list = GetAudioOutputList(settings.callAudioBackend());
|
|
||||||
const auto i = ranges::find(
|
|
||||||
list,
|
|
||||||
settings.callOutputDeviceId(),
|
|
||||||
&AudioOutput::id);
|
|
||||||
return (i != end(list))
|
|
||||||
? i->name
|
|
||||||
: tr::lng_settings_call_device_default(tr::now);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
QString CurrentAudioInputName() {
|
rpl::producer<QString> CaptureDeviceNameValue(rpl::producer<QString> id) {
|
||||||
const auto &settings = Core::App().settings();
|
return DeviceNameValue(DeviceType::Capture, std::move(id));
|
||||||
const auto list = GetAudioInputList(settings.callAudioBackend());
|
|
||||||
const auto i = ranges::find(
|
|
||||||
list,
|
|
||||||
settings.callInputDeviceId(),
|
|
||||||
&AudioInput::id);
|
|
||||||
return (i != end(list))
|
|
||||||
? i->name
|
|
||||||
: tr::lng_settings_call_device_default(tr::now);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
object_ptr<Ui::GenericBox> ChooseAudioOutputBox(
|
void ChooseAudioDeviceBox(
|
||||||
Fn<void(QString id, QString name)> chosen,
|
not_null<Ui::GenericBox*> box,
|
||||||
|
rpl::producer<QString> title,
|
||||||
|
rpl::producer<std::vector<DeviceInfo>> devicesValue,
|
||||||
|
rpl::producer<QString> currentId,
|
||||||
|
Fn<void(QString id)> chosen,
|
||||||
const style::Checkbox *st,
|
const style::Checkbox *st,
|
||||||
const style::Radio *radioSt) {
|
const style::Radio *radioSt) {
|
||||||
const auto &settings = Core::App().settings();
|
box->setTitle(std::move(title));
|
||||||
const auto list = GetAudioOutputList(settings.callAudioBackend());
|
box->addButton(tr::lng_box_ok(), [=] { box->closeBox(); });
|
||||||
const auto options = ranges::views::concat(
|
const auto layout = box->verticalLayout();
|
||||||
ranges::views::single(tr::lng_settings_call_device_default(tr::now)),
|
const auto skip = st::boxOptionListPadding.top()
|
||||||
list | ranges::views::transform(&AudioOutput::name)
|
+ st::defaultBoxCheckbox.margin.top();
|
||||||
) | ranges::to_vector;
|
layout->add(object_ptr<Ui::FixedHeightWidget>(layout, skip));
|
||||||
const auto i = ranges::find(
|
|
||||||
list,
|
if (!st) {
|
||||||
settings.callOutputDeviceId(),
|
st = &st::defaultBoxCheckbox;
|
||||||
&AudioOutput::id);
|
}
|
||||||
const auto currentOption = (i != end(list))
|
if (!radioSt) {
|
||||||
? int(i - begin(list) + 1)
|
radioSt = &st::defaultRadio;
|
||||||
: 0;
|
}
|
||||||
const auto save = [=](int option) {
|
|
||||||
const auto deviceId = option
|
struct State {
|
||||||
? list[option - 1].id
|
std::vector<DeviceInfo> list;
|
||||||
: "default";
|
base::flat_map<int, QString> ids;
|
||||||
Core::App().calls().setCurrentAudioDevice(false, deviceId);
|
rpl::variable<QString> currentId;
|
||||||
chosen(deviceId, options[option]);
|
QString currentName;
|
||||||
|
bool ignoreValueChange = false;
|
||||||
};
|
};
|
||||||
return Box([=](not_null<Ui::GenericBox*> box) {
|
const auto state = box->lifetime().make_state<State>();
|
||||||
SingleChoiceBox(box, {
|
state->currentId = std::move(currentId);
|
||||||
.title = tr::lng_settings_call_output_device(),
|
|
||||||
.options = options,
|
const auto group = std::make_shared<Ui::RadiobuttonGroup>();
|
||||||
.initialSelection = currentOption,
|
const auto fake = std::make_shared<Ui::RadiobuttonGroup>(0);
|
||||||
.callback = save,
|
const auto buttons = layout->add(object_ptr<Ui::VerticalLayout>(layout));
|
||||||
.st = st,
|
const auto other = layout->add(object_ptr<Ui::VerticalLayout>(layout));
|
||||||
.radioSt = radioSt,
|
const auto margins = QMargins(
|
||||||
|
st::boxPadding.left() + st::boxOptionListPadding.left(),
|
||||||
|
0,
|
||||||
|
st::boxPadding.right(),
|
||||||
|
st::boxOptionListSkip);
|
||||||
|
const auto def = buttons->add(
|
||||||
|
object_ptr<Ui::Radiobutton>(
|
||||||
|
buttons,
|
||||||
|
group,
|
||||||
|
0,
|
||||||
|
tr::lng_settings_call_device_default(tr::now),
|
||||||
|
*st,
|
||||||
|
*radioSt),
|
||||||
|
margins);
|
||||||
|
|
||||||
|
const auto selectCurrent = [=](QString current) {
|
||||||
|
state->ignoreValueChange = true;
|
||||||
|
const auto guard = gsl::finally([&] {
|
||||||
|
state->ignoreValueChange = false;
|
||||||
});
|
});
|
||||||
|
if (current.isEmpty() || current == kDefaultDeviceId) {
|
||||||
|
group->setValue(0);
|
||||||
|
other->clear();
|
||||||
|
} else {
|
||||||
|
auto found = false;
|
||||||
|
for (const auto &[index, id] : state->ids) {
|
||||||
|
if (id == current) {
|
||||||
|
group->setValue(index);
|
||||||
|
found = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (found) {
|
||||||
|
other->clear();
|
||||||
|
} else {
|
||||||
|
group->setValue(0);
|
||||||
|
const auto i = ranges::find(
|
||||||
|
state->list,
|
||||||
|
current,
|
||||||
|
&DeviceInfo::id);
|
||||||
|
if (i != end(state->list)) {
|
||||||
|
const auto button = other->add(
|
||||||
|
object_ptr<Ui::Radiobutton>(
|
||||||
|
other,
|
||||||
|
fake,
|
||||||
|
0,
|
||||||
|
i->name,
|
||||||
|
*st,
|
||||||
|
*radioSt),
|
||||||
|
margins);
|
||||||
|
button->show();
|
||||||
|
button->setDisabled(true);
|
||||||
|
button->finishAnimating();
|
||||||
|
button->setAttribute(Qt::WA_TransparentForMouseEvents);
|
||||||
|
while (other->count() > 1) {
|
||||||
|
delete other->widgetAt(1);
|
||||||
|
}
|
||||||
|
if (const auto width = box->width()) {
|
||||||
|
other->resizeToWidth(width);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
other->clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::move(
|
||||||
|
devicesValue
|
||||||
|
) | rpl::start_with_next([=](std::vector<DeviceInfo> &&list) {
|
||||||
|
auto count = buttons->count();
|
||||||
|
auto index = 1;
|
||||||
|
state->ids.clear();
|
||||||
|
state->list = std::move(list);
|
||||||
|
|
||||||
|
state->ignoreValueChange = true;
|
||||||
|
const auto guard = gsl::finally([&] {
|
||||||
|
state->ignoreValueChange = false;
|
||||||
|
});
|
||||||
|
|
||||||
|
const auto current = state->currentId.current();
|
||||||
|
for (const auto &info : state->list) {
|
||||||
|
if (info.inactive) {
|
||||||
|
continue;
|
||||||
|
} else if (current == info.id) {
|
||||||
|
group->setValue(index);
|
||||||
|
}
|
||||||
|
const auto button = buttons->insert(
|
||||||
|
index,
|
||||||
|
object_ptr<Ui::Radiobutton>(
|
||||||
|
buttons,
|
||||||
|
group,
|
||||||
|
index,
|
||||||
|
info.name,
|
||||||
|
*st,
|
||||||
|
*radioSt),
|
||||||
|
margins);
|
||||||
|
button->show();
|
||||||
|
button->finishAnimating();
|
||||||
|
|
||||||
|
state->ids.emplace(index, info.id);
|
||||||
|
if (index < count) {
|
||||||
|
delete buttons->widgetAt(index + 1);
|
||||||
|
}
|
||||||
|
++index;
|
||||||
|
}
|
||||||
|
while (index < count) {
|
||||||
|
delete buttons->widgetAt(index);
|
||||||
|
--count;
|
||||||
|
}
|
||||||
|
if (const auto width = box->width()) {
|
||||||
|
buttons->resizeToWidth(width);
|
||||||
|
}
|
||||||
|
selectCurrent(current);
|
||||||
|
}, box->lifetime());
|
||||||
|
|
||||||
|
state->currentId.changes(
|
||||||
|
) | rpl::start_with_next(selectCurrent, box->lifetime());
|
||||||
|
|
||||||
|
def->finishAnimating();
|
||||||
|
|
||||||
|
group->setChangedCallback([=](int value) {
|
||||||
|
if (state->ignoreValueChange) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const auto weak = Ui::MakeWeak(box);
|
||||||
|
chosen(state->ids.take(value).value_or(kDefaultDeviceId));
|
||||||
|
if (weak) {
|
||||||
|
box->closeBox();
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
object_ptr<Ui::GenericBox> ChooseAudioInputBox(
|
object_ptr<Ui::GenericBox> ChoosePlaybackDeviceBox(
|
||||||
Fn<void(QString id, QString name)> chosen,
|
rpl::producer<QString> currentId,
|
||||||
|
Fn<void(QString id)> chosen,
|
||||||
const style::Checkbox *st,
|
const style::Checkbox *st,
|
||||||
const style::Radio *radioSt) {
|
const style::Radio *radioSt) {
|
||||||
const auto &settings = Core::App().settings();
|
return Box(
|
||||||
const auto list = GetAudioInputList(settings.callAudioBackend());
|
ChooseAudioDeviceBox,
|
||||||
const auto options = ranges::views::concat(
|
tr::lng_settings_call_output_device(),
|
||||||
ranges::views::single(tr::lng_settings_call_device_default(tr::now)),
|
Core::App().mediaDevices().devicesValue(DeviceType::Playback),
|
||||||
list | ranges::views::transform(&AudioInput::name)
|
std::move(currentId),
|
||||||
) | ranges::to_vector;
|
std::move(chosen),
|
||||||
const auto i = ranges::find(
|
st,
|
||||||
list,
|
radioSt);
|
||||||
Core::App().settings().callInputDeviceId(),
|
}
|
||||||
&AudioInput::id);
|
|
||||||
const auto currentOption = (i != end(list))
|
object_ptr<Ui::GenericBox> ChooseCaptureDeviceBox(
|
||||||
? int(i - begin(list) + 1)
|
rpl::producer<QString> currentId,
|
||||||
: 0;
|
Fn<void(QString id)> chosen,
|
||||||
const auto save = [=](int option) {
|
const style::Checkbox *st,
|
||||||
const auto deviceId = option
|
const style::Radio *radioSt) {
|
||||||
? list[option - 1].id
|
return Box(
|
||||||
: "default";
|
ChooseAudioDeviceBox,
|
||||||
Core::App().calls().setCurrentAudioDevice(true, deviceId);
|
tr::lng_settings_call_input_device(),
|
||||||
chosen(deviceId, options[option]);
|
Core::App().mediaDevices().devicesValue(DeviceType::Capture),
|
||||||
};
|
std::move(currentId),
|
||||||
return Box([=](not_null<Ui::GenericBox*> box) {
|
std::move(chosen),
|
||||||
SingleChoiceBox(box, {
|
st,
|
||||||
.title = tr::lng_settings_call_input_device(),
|
radioSt);
|
||||||
.options = options,
|
|
||||||
.initialSelection = currentOption,
|
|
||||||
.callback = save,
|
|
||||||
.st = st,
|
|
||||||
.radioSt = radioSt,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
//
|
|
||||||
//object_ptr<Ui::GenericBox> ChooseAudioBackendBox(
|
|
||||||
// const style::Checkbox *st,
|
|
||||||
// const style::Radio *radioSt) {
|
|
||||||
// const auto &settings = Core::App().settings();
|
|
||||||
// const auto list = GetAudioInputList(settings.callAudioBackend());
|
|
||||||
// const auto options = std::vector<QString>{
|
|
||||||
// "OpenAL",
|
|
||||||
// "Webrtc ADM",
|
|
||||||
//#ifdef Q_OS_WIN
|
|
||||||
// "Webrtc ADM2",
|
|
||||||
//#endif // Q_OS_WIN
|
|
||||||
// };
|
|
||||||
// const auto currentOption = static_cast<int>(settings.callAudioBackend());
|
|
||||||
// const auto save = [=](int option) {
|
|
||||||
// Core::App().settings().setCallAudioBackend(
|
|
||||||
// static_cast<Webrtc::Backend>(option));
|
|
||||||
// Core::App().saveSettings();
|
|
||||||
// Core::Restart();
|
|
||||||
// };
|
|
||||||
// return Box([=](not_null<Ui::GenericBox*> box) {
|
|
||||||
// SingleChoiceBox(box, {
|
|
||||||
// .title = rpl::single<QString>("Calls audio backend"),
|
|
||||||
// .options = options,
|
|
||||||
// .initialSelection = currentOption,
|
|
||||||
// .callback = save,
|
|
||||||
// .st = st,
|
|
||||||
// .radioSt = radioSt,
|
|
||||||
// });
|
|
||||||
// });
|
|
||||||
//}
|
|
||||||
|
|
||||||
} // namespace Settings
|
} // namespace Settings
|
||||||
|
|
||||||
|
|
|
@ -54,8 +54,6 @@ private:
|
||||||
|
|
||||||
const not_null<Window::SessionController*> _controller;
|
const not_null<Window::SessionController*> _controller;
|
||||||
rpl::event_stream<QString> _cameraNameStream;
|
rpl::event_stream<QString> _cameraNameStream;
|
||||||
rpl::event_stream<QString> _outputNameStream;
|
|
||||||
rpl::event_stream<QString> _inputNameStream;
|
|
||||||
std::unique_ptr<Webrtc::AudioInputTester> _micTester;
|
std::unique_ptr<Webrtc::AudioInputTester> _micTester;
|
||||||
Ui::LevelMeter *_micTestLevel = nullptr;
|
Ui::LevelMeter *_micTestLevel = nullptr;
|
||||||
float _micLevel = 0.;
|
float _micLevel = 0.;
|
||||||
|
@ -67,19 +65,20 @@ private:
|
||||||
inline constexpr auto kMicTestUpdateInterval = crl::time(100);
|
inline constexpr auto kMicTestUpdateInterval = crl::time(100);
|
||||||
inline constexpr auto kMicTestAnimationDuration = crl::time(200);
|
inline constexpr auto kMicTestAnimationDuration = crl::time(200);
|
||||||
|
|
||||||
[[nodiscard]] QString CurrentAudioOutputName();
|
[[nodiscard]] rpl::producer<QString> PlaybackDeviceNameValue(
|
||||||
[[nodiscard]] QString CurrentAudioInputName();
|
rpl::producer<QString> id);
|
||||||
[[nodiscard]] object_ptr<Ui::GenericBox> ChooseAudioOutputBox(
|
[[nodiscard]] rpl::producer<QString> CaptureDeviceNameValue(
|
||||||
Fn<void(QString id, QString name)> chosen,
|
rpl::producer<QString> id);
|
||||||
|
[[nodiscard]] object_ptr<Ui::GenericBox> ChoosePlaybackDeviceBox(
|
||||||
|
rpl::producer<QString> currentId,
|
||||||
|
Fn<void(QString id)> chosen,
|
||||||
const style::Checkbox *st = nullptr,
|
const style::Checkbox *st = nullptr,
|
||||||
const style::Radio *radioSt = nullptr);
|
const style::Radio *radioSt = nullptr);
|
||||||
[[nodiscard]] object_ptr<Ui::GenericBox> ChooseAudioInputBox(
|
[[nodiscard]] object_ptr<Ui::GenericBox> ChooseCaptureDeviceBox(
|
||||||
Fn<void(QString id, QString name)> chosen,
|
rpl::producer<QString> currentId,
|
||||||
|
Fn<void(QString id)> chosen,
|
||||||
const style::Checkbox *st = nullptr,
|
const style::Checkbox *st = nullptr,
|
||||||
const style::Radio *radioSt = nullptr);
|
const style::Radio *radioSt = nullptr);
|
||||||
//[[nodiscard]] object_ptr<Ui::GenericBox> ChooseAudioBackendBox(
|
|
||||||
// const style::Checkbox *st = nullptr,
|
|
||||||
// const style::Radio *radioSt = nullptr);
|
|
||||||
|
|
||||||
} // namespace Settings
|
} // namespace Settings
|
||||||
|
|
||||||
|
|
|
@ -1148,9 +1148,9 @@ bool ReadSetting(
|
||||||
settingsStream >> duckingEnabled;
|
settingsStream >> duckingEnabled;
|
||||||
if (CheckStreamStatus(settingsStream)) {
|
if (CheckStreamStatus(settingsStream)) {
|
||||||
auto &app = Core::App().settings();
|
auto &app = Core::App().settings();
|
||||||
app.setCallOutputDeviceId(outputDeviceID);
|
app.setCallPlaybackDeviceId(outputDeviceID);
|
||||||
|
app.setCallCaptureDeviceId(inputDeviceID);
|
||||||
app.setCallOutputVolume(outputVolume);
|
app.setCallOutputVolume(outputVolume);
|
||||||
app.setCallInputDeviceId(inputDeviceID);
|
|
||||||
app.setCallInputVolume(inputVolume);
|
app.setCallInputVolume(inputVolume);
|
||||||
app.setCallAudioDuckingEnabled(duckingEnabled);
|
app.setCallAudioDuckingEnabled(duckingEnabled);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit b68a95ad4d1ae9a1827671100a7fd76cbe448c3f
|
Subproject commit 5a831697880967bbccbd45177fb6cf6b11759a22
|
Loading…
Add table
Reference in a new issue