mirror of
https://github.com/AyuGram/AyuGramDesktop.git
synced 2025-06-06 23:24:01 +02:00
Improve voice /video chat members management.
This commit is contained in:
parent
2e400d88d3
commit
54c2769d8a
7 changed files with 457 additions and 161 deletions
|
@ -108,6 +108,11 @@ constexpr auto kPlayConnectingEach = crl::time(1056) + 2 * crl::time(1000);
|
||||||
return video.value("endpoint").toString().toStdString();
|
return video.value("endpoint").toString().toStdString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] const std::string &EmptyString() {
|
||||||
|
static const auto result = std::string();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
class GroupCall::LoadPartTask final : public tgcalls::BroadcastPartTask {
|
class GroupCall::LoadPartTask final : public tgcalls::BroadcastPartTask {
|
||||||
|
@ -173,6 +178,10 @@ struct GroupCall::LargeTrack {
|
||||||
std::shared_ptr<Webrtc::SinkInterface> sink;
|
std::shared_ptr<Webrtc::SinkInterface> sink;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct GroupCall::SinkPointer {
|
||||||
|
std::shared_ptr<Webrtc::SinkInterface> data;
|
||||||
|
};
|
||||||
|
|
||||||
[[nodiscard]] bool IsGroupCallAdmin(
|
[[nodiscard]] bool IsGroupCallAdmin(
|
||||||
not_null<PeerData*> peer,
|
not_null<PeerData*> peer,
|
||||||
not_null<PeerData*> participantPeer) {
|
not_null<PeerData*> participantPeer) {
|
||||||
|
@ -447,20 +456,29 @@ GroupCall::GroupCall(
|
||||||
|
|
||||||
GroupCall::~GroupCall() {
|
GroupCall::~GroupCall() {
|
||||||
destroyController();
|
destroyController();
|
||||||
|
destroyScreencast();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GroupCall::isScreenSharing() const {
|
bool GroupCall::isSharingScreen() const {
|
||||||
return _screenOutgoing
|
return _screenOutgoing
|
||||||
&& (_screenOutgoing->state() == Webrtc::VideoState::Active);
|
&& (_screenOutgoing->state() == Webrtc::VideoState::Active);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GroupCall::isCameraSharing() const {
|
const std::string &GroupCall::screenSharingEndpoint() const {
|
||||||
|
return isSharingScreen() ? _screenEndpoint : EmptyString();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool GroupCall::isSharingCamera() const {
|
||||||
return _cameraOutgoing
|
return _cameraOutgoing
|
||||||
&& (_cameraOutgoing->state() == Webrtc::VideoState::Active);
|
&& (_cameraOutgoing->state() == Webrtc::VideoState::Active);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const std::string &GroupCall::cameraSharingEndpoint() const {
|
||||||
|
return isSharingCamera() ? _cameraEndpoint : EmptyString();
|
||||||
|
}
|
||||||
|
|
||||||
QString GroupCall::screenSharingDeviceId() const {
|
QString GroupCall::screenSharingDeviceId() const {
|
||||||
return isScreenSharing() ? _screenDeviceId : QString();
|
return isSharingScreen() ? _screenDeviceId : QString();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::toggleVideo(bool active) {
|
void GroupCall::toggleVideo(bool active) {
|
||||||
|
@ -506,26 +524,11 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
|
||||||
setScheduledDate(date);
|
setScheduledDate(date);
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
|
||||||
const auto emptyEndpoint = std::string();
|
real->participantsReloaded(
|
||||||
|
|
||||||
real->participantsSliceAdded(
|
|
||||||
) | rpl::start_with_next([=] {
|
) | rpl::start_with_next([=] {
|
||||||
const auto &participants = real->participants();
|
fillActiveVideoEndpoints();
|
||||||
for (const auto &participant : participants) {
|
|
||||||
const auto camera = participant.cameraEndpoint();
|
|
||||||
const auto screen = participant.screenEndpoint();
|
|
||||||
if (!camera.empty()
|
|
||||||
&& _activeVideoEndpoints.emplace(camera).second
|
|
||||||
&& _incomingVideoEndpoints.contains(camera)) {
|
|
||||||
_streamsVideoUpdated.fire({ camera, true });
|
|
||||||
}
|
|
||||||
if (!screen.empty()
|
|
||||||
&& _activeVideoEndpoints.emplace(screen).second
|
|
||||||
&& _incomingVideoEndpoints.contains(screen)) {
|
|
||||||
_streamsVideoUpdated.fire({ screen, true });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
fillActiveVideoEndpoints();
|
||||||
|
|
||||||
using Update = Data::GroupCall::ParticipantUpdate;
|
using Update = Data::GroupCall::ParticipantUpdate;
|
||||||
real->participantUpdated(
|
real->participantUpdated(
|
||||||
|
@ -533,6 +536,14 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
|
||||||
auto newLarge = _videoEndpointLarge.current();
|
auto newLarge = _videoEndpointLarge.current();
|
||||||
auto updateCameraNotStreams = std::string();
|
auto updateCameraNotStreams = std::string();
|
||||||
auto updateScreenNotStreams = std::string();
|
auto updateScreenNotStreams = std::string();
|
||||||
|
const auto regularEndpoint = [&](const std::string &endpoint)
|
||||||
|
-> const std::string & {
|
||||||
|
return (endpoint.empty()
|
||||||
|
|| endpoint == _cameraEndpoint
|
||||||
|
|| endpoint == _screenEndpoint)
|
||||||
|
? EmptyString()
|
||||||
|
: endpoint;
|
||||||
|
};
|
||||||
const auto guard = gsl::finally([&] {
|
const auto guard = gsl::finally([&] {
|
||||||
if (newLarge.empty()) {
|
if (newLarge.empty()) {
|
||||||
newLarge = chooseLargeVideoEndpoint();
|
newLarge = chooseLargeVideoEndpoint();
|
||||||
|
@ -549,14 +560,16 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
|
||||||
});
|
});
|
||||||
|
|
||||||
const auto &wasCameraEndpoint = (data.was && data.was->videoParams)
|
const auto &wasCameraEndpoint = (data.was && data.was->videoParams)
|
||||||
? data.was->videoParams->camera.endpoint
|
? regularEndpoint(data.was->videoParams->camera.endpoint)
|
||||||
: emptyEndpoint;
|
: EmptyString();
|
||||||
const auto &nowCameraEndpoint = (data.now && data.now->videoParams)
|
const auto &nowCameraEndpoint = (data.now && data.now->videoParams)
|
||||||
? data.now->videoParams->camera.endpoint
|
? regularEndpoint(data.now->videoParams->camera.endpoint)
|
||||||
: emptyEndpoint;
|
: EmptyString();
|
||||||
if (wasCameraEndpoint != nowCameraEndpoint) {
|
if (wasCameraEndpoint != nowCameraEndpoint) {
|
||||||
if (!nowCameraEndpoint.empty()
|
if (!nowCameraEndpoint.empty()
|
||||||
&& _activeVideoEndpoints.emplace(nowCameraEndpoint).second
|
&& _activeVideoEndpoints.emplace(
|
||||||
|
nowCameraEndpoint,
|
||||||
|
EndpointType::Camera).second
|
||||||
&& _incomingVideoEndpoints.contains(nowCameraEndpoint)) {
|
&& _incomingVideoEndpoints.contains(nowCameraEndpoint)) {
|
||||||
_streamsVideoUpdated.fire({ nowCameraEndpoint, true });
|
_streamsVideoUpdated.fire({ nowCameraEndpoint, true });
|
||||||
}
|
}
|
||||||
|
@ -565,19 +578,21 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
|
||||||
&& _incomingVideoEndpoints.contains(wasCameraEndpoint)) {
|
&& _incomingVideoEndpoints.contains(wasCameraEndpoint)) {
|
||||||
updateCameraNotStreams = wasCameraEndpoint;
|
updateCameraNotStreams = wasCameraEndpoint;
|
||||||
if (newLarge == wasCameraEndpoint) {
|
if (newLarge == wasCameraEndpoint) {
|
||||||
newLarge = std::string();
|
_videoEndpointPinned = newLarge = std::string();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const auto &wasScreenEndpoint = (data.was && data.was->videoParams)
|
const auto &wasScreenEndpoint = (data.was && data.was->videoParams)
|
||||||
? data.was->videoParams->screen.endpoint
|
? data.was->videoParams->screen.endpoint
|
||||||
: emptyEndpoint;
|
: EmptyString();
|
||||||
const auto &nowScreenEndpoint = (data.now && data.now->videoParams)
|
const auto &nowScreenEndpoint = (data.now && data.now->videoParams)
|
||||||
? data.now->videoParams->screen.endpoint
|
? data.now->videoParams->screen.endpoint
|
||||||
: emptyEndpoint;
|
: EmptyString();
|
||||||
if (wasScreenEndpoint != nowScreenEndpoint) {
|
if (wasScreenEndpoint != nowScreenEndpoint) {
|
||||||
if (!nowScreenEndpoint.empty()
|
if (!nowScreenEndpoint.empty()
|
||||||
&& _activeVideoEndpoints.emplace(nowScreenEndpoint).second
|
&& _activeVideoEndpoints.emplace(
|
||||||
|
nowScreenEndpoint,
|
||||||
|
EndpointType::Screen).second
|
||||||
&& _incomingVideoEndpoints.contains(nowScreenEndpoint)) {
|
&& _incomingVideoEndpoints.contains(nowScreenEndpoint)) {
|
||||||
_streamsVideoUpdated.fire({ nowScreenEndpoint, true });
|
_streamsVideoUpdated.fire({ nowScreenEndpoint, true });
|
||||||
}
|
}
|
||||||
|
@ -586,7 +601,7 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
|
||||||
&& _incomingVideoEndpoints.contains(wasScreenEndpoint)) {
|
&& _incomingVideoEndpoints.contains(wasScreenEndpoint)) {
|
||||||
updateScreenNotStreams = wasScreenEndpoint;
|
updateScreenNotStreams = wasScreenEndpoint;
|
||||||
if (newLarge == wasScreenEndpoint) {
|
if (newLarge == wasScreenEndpoint) {
|
||||||
newLarge = std::string();
|
_videoEndpointPinned = newLarge = std::string();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -600,7 +615,8 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (nowScreenEndpoint != newLarge
|
if (nowScreenEndpoint != newLarge
|
||||||
&& streamsVideo(nowScreenEndpoint)) {
|
&& streamsVideo(nowScreenEndpoint)
|
||||||
|
&& activeVideoEndpointType(newLarge) != EndpointType::Screen) {
|
||||||
newLarge = nowScreenEndpoint;
|
newLarge = nowScreenEndpoint;
|
||||||
}
|
}
|
||||||
const auto &participants = real->participants();
|
const auto &participants = real->participants();
|
||||||
|
@ -639,8 +655,8 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
|
||||||
newLarge = soundingEndpoint;
|
newLarge = soundingEndpoint;
|
||||||
}
|
}
|
||||||
} else if ((nowSpeaking || nowSounding)
|
} else if ((nowSpeaking || nowSounding)
|
||||||
&& (nowScreenEndpoint != newLarge)
|
|
||||||
&& (nowCameraEndpoint != newLarge)
|
&& (nowCameraEndpoint != newLarge)
|
||||||
|
&& (activeVideoEndpointType(newLarge) != EndpointType::Screen)
|
||||||
&& streamsVideo(nowCameraEndpoint)) {
|
&& streamsVideo(nowCameraEndpoint)) {
|
||||||
const auto participant = real->participantByEndpoint(newLarge);
|
const auto participant = real->participantByEndpoint(newLarge);
|
||||||
const auto screen = participant
|
const auto screen = participant
|
||||||
|
@ -701,6 +717,7 @@ void GroupCall::setState(State state) {
|
||||||
// Destroy controller before destroying Call Panel,
|
// Destroy controller before destroying Call Panel,
|
||||||
// so that the panel hide animation is smooth.
|
// so that the panel hide animation is smooth.
|
||||||
destroyController();
|
destroyController();
|
||||||
|
destroyScreencast();
|
||||||
}
|
}
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case State::HangingUp:
|
case State::HangingUp:
|
||||||
|
@ -833,6 +850,90 @@ void GroupCall::join(const MTPInputGroupCall &inputCall) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void GroupCall::setMyEndpointType(
|
||||||
|
const std::string &endpoint,
|
||||||
|
EndpointType type) {
|
||||||
|
if (endpoint.empty()) {
|
||||||
|
return;
|
||||||
|
} else if (type == EndpointType::None) {
|
||||||
|
const auto was1 = _incomingVideoEndpoints.remove(endpoint);
|
||||||
|
const auto was2 = _activeVideoEndpoints.remove(endpoint);
|
||||||
|
if (was1 && was2) {
|
||||||
|
auto newLarge = _videoEndpointLarge.current();
|
||||||
|
if (newLarge == endpoint) {
|
||||||
|
_videoEndpointPinned = std::string();
|
||||||
|
_videoEndpointLarge = chooseLargeVideoEndpoint();
|
||||||
|
}
|
||||||
|
_streamsVideoUpdated.fire({ endpoint, false });
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const auto now1 = _incomingVideoEndpoints.emplace(endpoint).second;
|
||||||
|
const auto now2 = _activeVideoEndpoints.emplace(
|
||||||
|
endpoint,
|
||||||
|
type).second;
|
||||||
|
if (now1 && now2) {
|
||||||
|
_streamsVideoUpdated.fire({ endpoint, true });
|
||||||
|
}
|
||||||
|
const auto nowLarge = activeVideoEndpointType(
|
||||||
|
_videoEndpointLarge.current());
|
||||||
|
if (_videoEndpointPinned.empty()
|
||||||
|
&& ((type == EndpointType::Screen
|
||||||
|
&& nowLarge != EndpointType::Screen)
|
||||||
|
|| (type == EndpointType::Camera
|
||||||
|
&& nowLarge == EndpointType::None))) {
|
||||||
|
_videoEndpointLarge = endpoint;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void GroupCall::setScreenEndpoint(std::string endpoint) {
|
||||||
|
if (_screenEndpoint == endpoint) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!_screenEndpoint.empty()) {
|
||||||
|
setMyEndpointType(_screenEndpoint, EndpointType::None);
|
||||||
|
}
|
||||||
|
_screenEndpoint = std::move(endpoint);
|
||||||
|
if (_screenEndpoint.empty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (_instance) {
|
||||||
|
_instance->setIgnoreVideoEndpointIds({ _screenEndpoint });
|
||||||
|
}
|
||||||
|
if (isSharingScreen()) {
|
||||||
|
setMyEndpointType(_screenEndpoint, EndpointType::Screen);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void GroupCall::setCameraEndpoint(std::string endpoint) {
|
||||||
|
if (_cameraEndpoint == endpoint) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!_cameraEndpoint.empty()) {
|
||||||
|
setMyEndpointType(_cameraEndpoint, EndpointType::None);
|
||||||
|
}
|
||||||
|
_cameraEndpoint = std::move(endpoint);
|
||||||
|
if (_cameraEndpoint.empty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (isSharingCamera()) {
|
||||||
|
setMyEndpointType(_cameraEndpoint, EndpointType::Camera);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void GroupCall::addVideoOutput(
|
||||||
|
const std::string &endpoint,
|
||||||
|
SinkPointer sink) {
|
||||||
|
if (_cameraEndpoint == endpoint) {
|
||||||
|
_cameraCapture->setOutput(sink.data);
|
||||||
|
} else if (_screenEndpoint == endpoint) {
|
||||||
|
_screenCapture->setOutput(sink.data);
|
||||||
|
} else {
|
||||||
|
Assert(_instance != nullptr);
|
||||||
|
_instance->addIncomingVideoOutput(endpoint, sink.data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void GroupCall::rejoin() {
|
void GroupCall::rejoin() {
|
||||||
rejoin(_joinAs);
|
rejoin(_joinAs);
|
||||||
}
|
}
|
||||||
|
@ -913,6 +1014,10 @@ void GroupCall::rejoin(not_null<PeerData*> as) {
|
||||||
applyQueuedSelfUpdates();
|
applyQueuedSelfUpdates();
|
||||||
checkFirstTimeJoined();
|
checkFirstTimeJoined();
|
||||||
sendSelfUpdate(SendUpdateType::VideoMuted);
|
sendSelfUpdate(SendUpdateType::VideoMuted);
|
||||||
|
if (_screenSsrc && isSharingScreen()) {
|
||||||
|
LOG(("Call Info: Screen rejoin after rejoin()."));
|
||||||
|
rejoinPresentation();
|
||||||
|
}
|
||||||
}).fail([=](const MTP::Error &error) {
|
}).fail([=](const MTP::Error &error) {
|
||||||
const auto type = error.type();
|
const auto type = error.type();
|
||||||
LOG(("Call Error: Could not join, error: %1").arg(type));
|
LOG(("Call Error: Could not join, error: %1").arg(type));
|
||||||
|
@ -970,22 +1075,16 @@ void GroupCall::rejoinPresentation() {
|
||||||
const auto type = error.type();
|
const auto type = error.type();
|
||||||
LOG(("Call Error: "
|
LOG(("Call Error: "
|
||||||
"Could not screen join, error: %1").arg(type));
|
"Could not screen join, error: %1").arg(type));
|
||||||
|
|
||||||
if (type == u"GROUPCALL_SSRC_DUPLICATE_MUCH") {
|
if (type == u"GROUPCALL_SSRC_DUPLICATE_MUCH") {
|
||||||
rejoinPresentation();
|
rejoinPresentation();
|
||||||
return;
|
} else if (type == u"GROUPCALL_JOIN_MISSING"_q
|
||||||
|
|| type == u"GROUPCALL_FORBIDDEN"_q) {
|
||||||
|
_screenSsrc = ssrc;
|
||||||
|
rejoin();
|
||||||
|
} else {
|
||||||
|
_screenSsrc = 0;
|
||||||
|
setScreenEndpoint(std::string());
|
||||||
}
|
}
|
||||||
|
|
||||||
//hangup();
|
|
||||||
//Ui::ShowMultilineToast({
|
|
||||||
// .text = { type == u"GROUPCALL_ANONYMOUS_FORBIDDEN"_q
|
|
||||||
// ? tr::lng_group_call_no_anonymous(tr::now)
|
|
||||||
// : type == u"GROUPCALL_PARTICIPANTS_TOO_MUCH"_q
|
|
||||||
// ? tr::lng_group_call_too_many(tr::now)
|
|
||||||
// : type == u"GROUPCALL_FORBIDDEN"_q
|
|
||||||
// ? tr::lng_group_not_accessible(tr::now)
|
|
||||||
// : Lang::Hard::ServerError() },
|
|
||||||
//});
|
|
||||||
}).send();
|
}).send();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -999,12 +1098,14 @@ void GroupCall::leavePresentation() {
|
||||||
inputCall()
|
inputCall()
|
||||||
)).done([=](const MTPUpdates &updates) {
|
)).done([=](const MTPUpdates &updates) {
|
||||||
_screenSsrc = 0;
|
_screenSsrc = 0;
|
||||||
|
setScreenEndpoint(std::string());
|
||||||
_peer->session().api().applyUpdates(updates);
|
_peer->session().api().applyUpdates(updates);
|
||||||
}).fail([=](const MTP::Error &error) {
|
}).fail([=](const MTP::Error &error) {
|
||||||
const auto type = error.type();
|
const auto type = error.type();
|
||||||
LOG(("Call Error: "
|
LOG(("Call Error: "
|
||||||
"Could not screen leave, error: %1").arg(type));
|
"Could not screen leave, error: %1").arg(type));
|
||||||
_screenSsrc = 0;
|
_screenSsrc = 0;
|
||||||
|
setScreenEndpoint(std::string());
|
||||||
}).send();
|
}).send();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1233,11 +1334,9 @@ void GroupCall::toggleScheduleStartSubscribed(bool subscribed) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::addVideoOutput(
|
void GroupCall::addVideoOutput(
|
||||||
const std::string &endpointId,
|
const std::string &endpoint,
|
||||||
not_null<Webrtc::VideoTrack*> track) {
|
not_null<Webrtc::VideoTrack*> track) {
|
||||||
if (_instance) {
|
addVideoOutput(endpoint, { track->sink() });
|
||||||
_instance->addIncomingVideoOutput(endpointId, track->sink());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::setMuted(MuteState mute) {
|
void GroupCall::setMuted(MuteState mute) {
|
||||||
|
@ -1314,16 +1413,15 @@ void GroupCall::handlePossibleCreateOrJoinResponse(
|
||||||
setScreenInstanceMode(InstanceMode::Rtc);
|
setScreenInstanceMode(InstanceMode::Rtc);
|
||||||
data.vparams().match([&](const MTPDdataJSON &data) {
|
data.vparams().match([&](const MTPDdataJSON &data) {
|
||||||
const auto json = data.vdata().v;
|
const auto json = data.vdata().v;
|
||||||
_screenEndpoint = ParseVideoEndpoint(json);
|
setScreenEndpoint(ParseVideoEndpoint(json));
|
||||||
if (!_screenEndpoint.empty() && _instance) {
|
|
||||||
_instance->setIgnoreVideoEndpointIds({ _screenEndpoint });
|
|
||||||
}
|
|
||||||
_screenInstance->setJoinResponsePayload(json.toStdString());
|
_screenInstance->setJoinResponsePayload(json.toStdString());
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
setInstanceMode(InstanceMode::Rtc);
|
setInstanceMode(InstanceMode::Rtc);
|
||||||
data.vparams().match([&](const MTPDdataJSON &data) {
|
data.vparams().match([&](const MTPDdataJSON &data) {
|
||||||
_instance->setJoinResponsePayload(data.vdata().v.toStdString());
|
const auto json = data.vdata().v;
|
||||||
|
setCameraEndpoint(ParseVideoEndpoint(json));
|
||||||
|
_instance->setJoinResponsePayload(json.toStdString());
|
||||||
});
|
});
|
||||||
checkMediaChannelDescriptions();
|
checkMediaChannelDescriptions();
|
||||||
}
|
}
|
||||||
|
@ -1534,7 +1632,6 @@ void GroupCall::ensureOutgoingVideo() {
|
||||||
if (!_cameraCapture) {
|
if (!_cameraCapture) {
|
||||||
_cameraCapture = _delegate->groupCallGetVideoCapture(
|
_cameraCapture = _delegate->groupCallGetVideoCapture(
|
||||||
_cameraInputId);
|
_cameraInputId);
|
||||||
_cameraCapture->setOutput(_cameraOutgoing->sink());
|
|
||||||
} else {
|
} else {
|
||||||
_cameraCapture->switchToDevice(_cameraInputId.toStdString());
|
_cameraCapture->switchToDevice(_cameraInputId.toStdString());
|
||||||
}
|
}
|
||||||
|
@ -1542,9 +1639,13 @@ void GroupCall::ensureOutgoingVideo() {
|
||||||
_instance->setVideoCapture(_cameraCapture);
|
_instance->setVideoCapture(_cameraCapture);
|
||||||
}
|
}
|
||||||
_cameraCapture->setState(tgcalls::VideoState::Active);
|
_cameraCapture->setState(tgcalls::VideoState::Active);
|
||||||
} else if (_cameraCapture) {
|
setMyEndpointType(_cameraEndpoint, EndpointType::Camera);
|
||||||
|
} else {
|
||||||
|
if (_cameraCapture) {
|
||||||
_cameraCapture->setState(tgcalls::VideoState::Inactive);
|
_cameraCapture->setState(tgcalls::VideoState::Inactive);
|
||||||
}
|
}
|
||||||
|
setMyEndpointType(_cameraEndpoint, EndpointType::None);
|
||||||
|
}
|
||||||
sendSelfUpdate(SendUpdateType::VideoMuted);
|
sendSelfUpdate(SendUpdateType::VideoMuted);
|
||||||
applyMeInCallLocally();
|
applyMeInCallLocally();
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
@ -1559,7 +1660,6 @@ void GroupCall::ensureOutgoingVideo() {
|
||||||
tgcalls::VideoCaptureInterface::Create(
|
tgcalls::VideoCaptureInterface::Create(
|
||||||
tgcalls::StaticThreads::getThreads(),
|
tgcalls::StaticThreads::getThreads(),
|
||||||
_screenDeviceId.toStdString()));
|
_screenDeviceId.toStdString()));
|
||||||
_screenCapture->setOutput(_screenOutgoing->sink());
|
|
||||||
} else {
|
} else {
|
||||||
_screenCapture->switchToDevice(_screenDeviceId.toStdString());
|
_screenCapture->switchToDevice(_screenDeviceId.toStdString());
|
||||||
}
|
}
|
||||||
|
@ -1567,9 +1667,13 @@ void GroupCall::ensureOutgoingVideo() {
|
||||||
_screenInstance->setVideoCapture(_screenCapture);
|
_screenInstance->setVideoCapture(_screenCapture);
|
||||||
}
|
}
|
||||||
_screenCapture->setState(tgcalls::VideoState::Active);
|
_screenCapture->setState(tgcalls::VideoState::Active);
|
||||||
} else if (_screenCapture) {
|
setMyEndpointType(_screenEndpoint, EndpointType::Screen);
|
||||||
|
} else {
|
||||||
|
if (_screenCapture) {
|
||||||
_screenCapture->setState(tgcalls::VideoState::Inactive);
|
_screenCapture->setState(tgcalls::VideoState::Inactive);
|
||||||
}
|
}
|
||||||
|
setMyEndpointType(_screenEndpoint, EndpointType::None);
|
||||||
|
}
|
||||||
joinLeavePresentation();
|
joinLeavePresentation();
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
}
|
}
|
||||||
|
@ -1657,7 +1761,7 @@ void GroupCall::ensureControllerCreated() {
|
||||||
.incomingVideoSourcesUpdated = [=](
|
.incomingVideoSourcesUpdated = [=](
|
||||||
std::vector<std::string> endpointIds) {
|
std::vector<std::string> endpointIds) {
|
||||||
crl::on_main(weak, [=, endpoints = std::move(endpointIds)] {
|
crl::on_main(weak, [=, endpoints = std::move(endpointIds)] {
|
||||||
setIncomingVideoStreams(endpoints);
|
setIncomingVideoEndpoints(endpoints);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
.requestBroadcastPart = [=](
|
.requestBroadcastPart = [=](
|
||||||
|
@ -1722,14 +1826,15 @@ void GroupCall::ensureControllerCreated() {
|
||||||
}
|
}
|
||||||
_videoLargeTrackWrap->sink = Webrtc::CreateProxySink(
|
_videoLargeTrackWrap->sink = Webrtc::CreateProxySink(
|
||||||
_videoLargeTrackWrap->track.sink());
|
_videoLargeTrackWrap->track.sink());
|
||||||
_instance->addIncomingVideoOutput(
|
addVideoOutput(endpoint, { _videoLargeTrackWrap->sink });
|
||||||
endpoint,
|
|
||||||
_videoLargeTrackWrap->sink);
|
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
|
||||||
updateInstanceMuteState();
|
updateInstanceMuteState();
|
||||||
updateInstanceVolumes();
|
updateInstanceVolumes();
|
||||||
|
|
||||||
|
if (!_screenEndpoint.empty()) {
|
||||||
|
_instance->setIgnoreVideoEndpointIds({ _screenEndpoint });
|
||||||
|
}
|
||||||
//raw->setAudioOutputDuckingEnabled(settings.callAudioDuckingEnabled());
|
//raw->setAudioOutputDuckingEnabled(settings.callAudioDuckingEnabled());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1909,10 +2014,10 @@ bool GroupCall::mediaChannelDescriptionsFill(
|
||||||
} else if (const auto byScreen
|
} else if (const auto byScreen
|
||||||
= real->participantPeerByScreenSsrc(ssrc)) {
|
= real->participantPeerByScreenSsrc(ssrc)) {
|
||||||
addVideoChannel(byScreen, &ParticipantVideoParams::screen);
|
addVideoChannel(byScreen, &ParticipantVideoParams::screen);
|
||||||
} else if (resolved(ssrc)) {
|
|
||||||
add(std::nullopt);
|
|
||||||
} else if (!resolved) {
|
} else if (!resolved) {
|
||||||
_unresolvedSsrcs.emplace(ssrc);
|
_unresolvedSsrcs.emplace(ssrc);
|
||||||
|
} else if (resolved(ssrc)) {
|
||||||
|
add(std::nullopt);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
@ -1926,25 +2031,33 @@ void GroupCall::mediaChannelDescriptionsCancel(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::setIncomingVideoStreams(
|
void GroupCall::setIncomingVideoEndpoints(
|
||||||
const std::vector<std::string> &endpoints) {
|
const std::vector<std::string> &endpoints) {
|
||||||
const auto large = _videoEndpointLarge.current();
|
auto newLarge = _videoEndpointLarge.current();
|
||||||
auto newLarge = large;
|
auto newLargeFound = false;
|
||||||
if (!large.empty() && !ranges::contains(endpoints, large)) {
|
|
||||||
newLarge = _videoEndpointPinned = std::string();
|
|
||||||
}
|
|
||||||
auto removed = _incomingVideoEndpoints;
|
auto removed = _incomingVideoEndpoints;
|
||||||
for (const auto &endpoint : endpoints) {
|
const auto feedOne = [&](const std::string &endpoint) {
|
||||||
const auto i = removed.find(endpoint);
|
if (endpoint.empty()) {
|
||||||
const auto videoActive = _activeVideoEndpoints.contains(endpoint);
|
return;
|
||||||
if (i != end(removed)) {
|
} else if (endpoint == newLarge) {
|
||||||
removed.erase(i);
|
newLargeFound = true;
|
||||||
} else {
|
}
|
||||||
|
if (!removed.remove(endpoint)) {
|
||||||
_incomingVideoEndpoints.emplace(endpoint);
|
_incomingVideoEndpoints.emplace(endpoint);
|
||||||
if (videoActive) {
|
if (_activeVideoEndpoints.contains(endpoint)) {
|
||||||
_streamsVideoUpdated.fire({ endpoint, true });
|
_streamsVideoUpdated.fire({ endpoint, true });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
for (const auto &endpoint : endpoints) {
|
||||||
|
if (endpoint != _cameraEndpoint && endpoint != _screenEndpoint) {
|
||||||
|
feedOne(endpoint);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
feedOne(cameraSharingEndpoint());
|
||||||
|
feedOne(screenSharingEndpoint());
|
||||||
|
if (!newLarge.empty() && !newLargeFound) {
|
||||||
|
newLarge = _videoEndpointPinned = std::string();
|
||||||
}
|
}
|
||||||
if (newLarge.empty()) {
|
if (newLarge.empty()) {
|
||||||
_videoEndpointLarge = chooseLargeVideoEndpoint();
|
_videoEndpointLarge = chooseLargeVideoEndpoint();
|
||||||
|
@ -1956,6 +2069,65 @@ void GroupCall::setIncomingVideoStreams(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void GroupCall::fillActiveVideoEndpoints() {
|
||||||
|
const auto real = lookupReal();
|
||||||
|
Assert(real != nullptr);
|
||||||
|
|
||||||
|
const auto &participants = real->participants();
|
||||||
|
auto newLarge = _videoEndpointLarge.current();
|
||||||
|
auto newLargeFound = false;
|
||||||
|
auto removed = _activeVideoEndpoints;
|
||||||
|
const auto feedOne = [&](
|
||||||
|
const std::string &endpoint,
|
||||||
|
EndpointType type) {
|
||||||
|
if (endpoint.empty()) {
|
||||||
|
return;
|
||||||
|
} else if (endpoint == newLarge) {
|
||||||
|
newLargeFound = true;
|
||||||
|
}
|
||||||
|
if (!removed.remove(endpoint)) {
|
||||||
|
_activeVideoEndpoints.emplace(endpoint, type);
|
||||||
|
if (_incomingVideoEndpoints.contains(endpoint)) {
|
||||||
|
_streamsVideoUpdated.fire({ endpoint, true });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
for (const auto &participant : participants) {
|
||||||
|
const auto camera = participant.cameraEndpoint();
|
||||||
|
if (camera != _cameraEndpoint && camera != _screenEndpoint) {
|
||||||
|
feedOne(camera, EndpointType::Camera);
|
||||||
|
}
|
||||||
|
const auto screen = participant.screenEndpoint();
|
||||||
|
if (screen != _cameraEndpoint && screen != _screenEndpoint) {
|
||||||
|
feedOne(screen, EndpointType::Screen);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
feedOne(cameraSharingEndpoint(), EndpointType::Camera);
|
||||||
|
feedOne(screenSharingEndpoint(), EndpointType::Screen);
|
||||||
|
if (!newLarge.empty() && !newLargeFound) {
|
||||||
|
newLarge = _videoEndpointPinned = std::string();
|
||||||
|
}
|
||||||
|
if (newLarge.empty()) {
|
||||||
|
_videoEndpointLarge = chooseLargeVideoEndpoint();
|
||||||
|
}
|
||||||
|
for (const auto &[endpoint, type] : removed) {
|
||||||
|
if (_activeVideoEndpoints.remove(endpoint)) {
|
||||||
|
_streamsVideoUpdated.fire({ endpoint, false });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
GroupCall::EndpointType GroupCall::activeVideoEndpointType(
|
||||||
|
const std::string &endpoint) const {
|
||||||
|
if (endpoint.empty()) {
|
||||||
|
return EndpointType::None;
|
||||||
|
}
|
||||||
|
const auto i = _activeVideoEndpoints.find(endpoint);
|
||||||
|
return (i != end(_activeVideoEndpoints))
|
||||||
|
? i->second
|
||||||
|
: EndpointType::None;
|
||||||
|
}
|
||||||
|
|
||||||
std::string GroupCall::chooseLargeVideoEndpoint() const {
|
std::string GroupCall::chooseLargeVideoEndpoint() const {
|
||||||
const auto real = lookupReal();
|
const auto real = lookupReal();
|
||||||
if (!real) {
|
if (!real) {
|
||||||
|
@ -1965,9 +2137,13 @@ std::string GroupCall::chooseLargeVideoEndpoint() const {
|
||||||
auto screenEndpoint = std::string();
|
auto screenEndpoint = std::string();
|
||||||
auto speakingEndpoint = std::string();
|
auto speakingEndpoint = std::string();
|
||||||
auto soundingEndpoint = std::string();
|
auto soundingEndpoint = std::string();
|
||||||
|
const auto &myCameraEndpoint = cameraSharingEndpoint();
|
||||||
|
const auto &myScreenEndpoint = screenSharingEndpoint();
|
||||||
const auto &participants = real->participants();
|
const auto &participants = real->participants();
|
||||||
for (const auto &endpoint : _incomingVideoEndpoints) {
|
for (const auto &endpoint : _incomingVideoEndpoints) {
|
||||||
if (!_activeVideoEndpoints.contains(endpoint)) {
|
if (!_activeVideoEndpoints.contains(endpoint)
|
||||||
|
|| endpoint == _cameraEndpoint
|
||||||
|
|| endpoint == _screenEndpoint) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (const auto participant = real->participantByEndpoint(endpoint)) {
|
if (const auto participant = real->participantByEndpoint(endpoint)) {
|
||||||
|
@ -1989,11 +2165,17 @@ std::string GroupCall::chooseLargeVideoEndpoint() const {
|
||||||
}
|
}
|
||||||
return !screenEndpoint.empty()
|
return !screenEndpoint.empty()
|
||||||
? screenEndpoint
|
? screenEndpoint
|
||||||
|
: streamsVideo(myScreenEndpoint)
|
||||||
|
? myScreenEndpoint
|
||||||
: !speakingEndpoint.empty()
|
: !speakingEndpoint.empty()
|
||||||
? speakingEndpoint
|
? speakingEndpoint
|
||||||
: !soundingEndpoint.empty()
|
: !soundingEndpoint.empty()
|
||||||
? soundingEndpoint
|
? soundingEndpoint
|
||||||
: anyEndpoint;
|
: !anyEndpoint.empty()
|
||||||
|
? anyEndpoint
|
||||||
|
: streamsVideo(myCameraEndpoint)
|
||||||
|
? myCameraEndpoint
|
||||||
|
: std::string();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::updateInstanceMuteState() {
|
void GroupCall::updateInstanceMuteState() {
|
||||||
|
@ -2124,25 +2306,22 @@ void GroupCall::checkJoined() {
|
||||||
if (!ranges::contains(result.v, MTP_int(_mySsrc))) {
|
if (!ranges::contains(result.v, MTP_int(_mySsrc))) {
|
||||||
LOG(("Call Info: Rejoin after no _mySsrc in checkGroupCall."));
|
LOG(("Call Info: Rejoin after no _mySsrc in checkGroupCall."));
|
||||||
rejoin();
|
rejoin();
|
||||||
} else if (state() == State::Connecting) {
|
} else {
|
||||||
|
if (state() == State::Connecting) {
|
||||||
_checkJoinedTimer.callOnce(kCheckJoinedTimeout);
|
_checkJoinedTimer.callOnce(kCheckJoinedTimeout);
|
||||||
}
|
}
|
||||||
if (_screenSsrc
|
if (_screenSsrc
|
||||||
&& !ranges::contains(result.v, MTP_int(_screenSsrc))
|
&& !ranges::contains(result.v, MTP_int(_screenSsrc))
|
||||||
&& isScreenSharing()) {
|
&& isSharingScreen()) {
|
||||||
LOG(("Call Info: "
|
LOG(("Call Info: "
|
||||||
"Screen rejoin after _screenSsrc not found."));
|
"Screen rejoin after _screenSsrc not found."));
|
||||||
rejoinPresentation();
|
rejoinPresentation();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}).fail([=](const MTP::Error &error) {
|
}).fail([=](const MTP::Error &error) {
|
||||||
LOG(("Call Info: Full rejoin after error '%1' in checkGroupCall."
|
LOG(("Call Info: Full rejoin after error '%1' in checkGroupCall."
|
||||||
).arg(error.type()));
|
).arg(error.type()));
|
||||||
rejoin();
|
rejoin();
|
||||||
if (_screenSsrc && isScreenSharing()) {
|
|
||||||
LOG(("Call Info: "
|
|
||||||
"Full screen rejoin after _screenSsrc not found."));
|
|
||||||
rejoinPresentation();
|
|
||||||
}
|
|
||||||
}).send();
|
}).send();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2493,4 +2672,12 @@ void GroupCall::destroyController() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void GroupCall::destroyScreencast() {
|
||||||
|
if (_screenInstance) {
|
||||||
|
DEBUG_LOG(("Call Info: Destroying call screen controller.."));
|
||||||
|
_screenInstance.reset();
|
||||||
|
DEBUG_LOG(("Call Info: Call screen controller destroyed."));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Calls
|
} // namespace Calls
|
||||||
|
|
|
@ -232,7 +232,7 @@ public:
|
||||||
[[nodiscard]] bool streamsVideo(const std::string &endpoint) const {
|
[[nodiscard]] bool streamsVideo(const std::string &endpoint) const {
|
||||||
return !endpoint.empty()
|
return !endpoint.empty()
|
||||||
&& _incomingVideoEndpoints.contains(endpoint)
|
&& _incomingVideoEndpoints.contains(endpoint)
|
||||||
&& _activeVideoEndpoints.contains(endpoint);
|
&& activeVideoEndpointType(endpoint) != EndpointType::None;
|
||||||
}
|
}
|
||||||
[[nodiscard]] const std::string &videoEndpointPinned() const {
|
[[nodiscard]] const std::string &videoEndpointPinned() const {
|
||||||
return _videoEndpointPinned;
|
return _videoEndpointPinned;
|
||||||
|
@ -265,8 +265,10 @@ public:
|
||||||
|
|
||||||
void setCurrentAudioDevice(bool input, const QString &deviceId);
|
void setCurrentAudioDevice(bool input, const QString &deviceId);
|
||||||
void setCurrentVideoDevice(const QString &deviceId);
|
void setCurrentVideoDevice(const QString &deviceId);
|
||||||
[[nodiscard]] bool isScreenSharing() const;
|
[[nodiscard]] bool isSharingScreen() const;
|
||||||
[[nodiscard]] bool isCameraSharing() const;
|
[[nodiscard]] const std::string &screenSharingEndpoint() const;
|
||||||
|
[[nodiscard]] bool isSharingCamera() const;
|
||||||
|
[[nodiscard]] const std::string &cameraSharingEndpoint() const;
|
||||||
[[nodiscard]] QString screenSharingDeviceId() const;
|
[[nodiscard]] QString screenSharingDeviceId() const;
|
||||||
void toggleVideo(bool active);
|
void toggleVideo(bool active);
|
||||||
void toggleScreenSharing(std::optional<QString> uniqueId);
|
void toggleScreenSharing(std::optional<QString> uniqueId);
|
||||||
|
@ -300,6 +302,7 @@ public:
|
||||||
private:
|
private:
|
||||||
using GlobalShortcutValue = base::GlobalShortcutValue;
|
using GlobalShortcutValue = base::GlobalShortcutValue;
|
||||||
struct LargeTrack;
|
struct LargeTrack;
|
||||||
|
struct SinkPointer;
|
||||||
|
|
||||||
struct LoadingPart {
|
struct LoadingPart {
|
||||||
std::shared_ptr<LoadPartTask> task;
|
std::shared_ptr<LoadPartTask> task;
|
||||||
|
@ -321,6 +324,11 @@ private:
|
||||||
RaiseHand,
|
RaiseHand,
|
||||||
VideoMuted,
|
VideoMuted,
|
||||||
};
|
};
|
||||||
|
enum class EndpointType {
|
||||||
|
None,
|
||||||
|
Camera,
|
||||||
|
Screen,
|
||||||
|
};
|
||||||
|
|
||||||
[[nodiscard]] bool mediaChannelDescriptionsFill(
|
[[nodiscard]] bool mediaChannelDescriptionsFill(
|
||||||
not_null<MediaChannelDescriptionsTask*> task,
|
not_null<MediaChannelDescriptionsTask*> task,
|
||||||
|
@ -370,8 +378,12 @@ private:
|
||||||
void stopConnectingSound();
|
void stopConnectingSound();
|
||||||
void playConnectingSoundOnce();
|
void playConnectingSoundOnce();
|
||||||
|
|
||||||
void setIncomingVideoStreams(const std::vector<std::string> &endpoints);
|
void setIncomingVideoEndpoints(
|
||||||
|
const std::vector<std::string> &endpoints);
|
||||||
|
void fillActiveVideoEndpoints();
|
||||||
[[nodiscard]] std::string chooseLargeVideoEndpoint() const;
|
[[nodiscard]] std::string chooseLargeVideoEndpoint() const;
|
||||||
|
[[nodiscard]] EndpointType activeVideoEndpointType(
|
||||||
|
const std::string &endpoint) const;
|
||||||
|
|
||||||
void editParticipant(
|
void editParticipant(
|
||||||
not_null<PeerData*> participantPeer,
|
not_null<PeerData*> participantPeer,
|
||||||
|
@ -387,6 +399,10 @@ private:
|
||||||
|
|
||||||
void setupMediaDevices();
|
void setupMediaDevices();
|
||||||
void ensureOutgoingVideo();
|
void ensureOutgoingVideo();
|
||||||
|
void setMyEndpointType(const std::string &endpoint, EndpointType type);
|
||||||
|
void setScreenEndpoint(std::string endpoint);
|
||||||
|
void setCameraEndpoint(std::string endpoint);
|
||||||
|
void addVideoOutput(const std::string &endpoint, SinkPointer sink);
|
||||||
|
|
||||||
[[nodiscard]] MTPInputGroupCall inputCall() const;
|
[[nodiscard]] MTPInputGroupCall inputCall() const;
|
||||||
|
|
||||||
|
@ -423,6 +439,8 @@ private:
|
||||||
uint64 _accessHash = 0;
|
uint64 _accessHash = 0;
|
||||||
uint32 _mySsrc = 0;
|
uint32 _mySsrc = 0;
|
||||||
uint32 _screenSsrc = 0;
|
uint32 _screenSsrc = 0;
|
||||||
|
std::string _cameraEndpoint;
|
||||||
|
std::string _screenEndpoint;
|
||||||
TimeId _scheduleDate = 0;
|
TimeId _scheduleDate = 0;
|
||||||
base::flat_set<uint32> _mySsrcs;
|
base::flat_set<uint32> _mySsrcs;
|
||||||
mtpRequestId _createRequestId = 0;
|
mtpRequestId _createRequestId = 0;
|
||||||
|
@ -443,12 +461,11 @@ private:
|
||||||
std::shared_ptr<tgcalls::VideoCaptureInterface> _screenCapture;
|
std::shared_ptr<tgcalls::VideoCaptureInterface> _screenCapture;
|
||||||
std::unique_ptr<Webrtc::VideoTrack> _screenOutgoing;
|
std::unique_ptr<Webrtc::VideoTrack> _screenOutgoing;
|
||||||
QString _screenDeviceId;
|
QString _screenDeviceId;
|
||||||
std::string _screenEndpoint;
|
|
||||||
|
|
||||||
rpl::event_stream<LevelUpdate> _levelUpdates;
|
rpl::event_stream<LevelUpdate> _levelUpdates;
|
||||||
rpl::event_stream<StreamsVideoUpdate> _streamsVideoUpdated;
|
rpl::event_stream<StreamsVideoUpdate> _streamsVideoUpdated;
|
||||||
base::flat_set<std::string> _incomingVideoEndpoints;
|
base::flat_set<std::string> _incomingVideoEndpoints;
|
||||||
base::flat_set<std::string> _activeVideoEndpoints;
|
base::flat_map<std::string, EndpointType> _activeVideoEndpoints;
|
||||||
rpl::variable<std::string> _videoEndpointLarge;
|
rpl::variable<std::string> _videoEndpointLarge;
|
||||||
std::string _videoEndpointPinned;
|
std::string _videoEndpointPinned;
|
||||||
std::unique_ptr<LargeTrack> _videoLargeTrackWrap;
|
std::unique_ptr<LargeTrack> _videoLargeTrackWrap;
|
||||||
|
|
|
@ -385,7 +385,13 @@ private:
|
||||||
Row *findRow(not_null<PeerData*> participantPeer) const;
|
Row *findRow(not_null<PeerData*> participantPeer) const;
|
||||||
const Data::GroupCallParticipant *findParticipant(
|
const Data::GroupCallParticipant *findParticipant(
|
||||||
const std::string &endpoint) const;
|
const std::string &endpoint) const;
|
||||||
Row *findRow(const std::string &endpoint) const;
|
const std::string &computeScreenEndpoint(
|
||||||
|
not_null<const Data::GroupCallParticipant*> participant) const;
|
||||||
|
const std::string &computeCameraEndpoint(
|
||||||
|
not_null<const Data::GroupCallParticipant*> participant) const;
|
||||||
|
void setRowVideoEndpoint(
|
||||||
|
not_null<Row*> row,
|
||||||
|
const std::string &endpoint);
|
||||||
|
|
||||||
void appendInvitedUsers();
|
void appendInvitedUsers();
|
||||||
void scheduleRaisedHandStatusRemove();
|
void scheduleRaisedHandStatusRemove();
|
||||||
|
@ -408,6 +414,7 @@ private:
|
||||||
base::Timer _raisedHandStatusRemoveTimer;
|
base::Timer _raisedHandStatusRemoveTimer;
|
||||||
|
|
||||||
base::flat_map<uint32, not_null<Row*>> _soundingRowBySsrc;
|
base::flat_map<uint32, not_null<Row*>> _soundingRowBySsrc;
|
||||||
|
base::flat_map<std::string, not_null<Row*>> _videoEndpoints;
|
||||||
Ui::Animations::Basic _soundingAnimation;
|
Ui::Animations::Basic _soundingAnimation;
|
||||||
|
|
||||||
crl::time _soundingAnimationHideLastTime = 0;
|
crl::time _soundingAnimationHideLastTime = 0;
|
||||||
|
@ -1036,6 +1043,7 @@ const std::string &Row::videoTrackEndpoint() const {
|
||||||
|
|
||||||
void Row::clearVideoTrack() {
|
void Row::clearVideoTrack() {
|
||||||
_videoTrackLifetime.destroy();
|
_videoTrackLifetime.destroy();
|
||||||
|
_videoTrackEndpoint = std::string();
|
||||||
_videoTrackShown = nullptr;
|
_videoTrackShown = nullptr;
|
||||||
_videoTrack = nullptr;
|
_videoTrack = nullptr;
|
||||||
_delegate->rowUpdateRow(this);
|
_delegate->rowUpdateRow(this);
|
||||||
|
@ -1139,6 +1147,25 @@ MembersController::~MembersController() {
|
||||||
base::take(_menu);
|
base::take(_menu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MembersController::setRowVideoEndpoint(
|
||||||
|
not_null<Row*> row,
|
||||||
|
const std::string &endpoint) {
|
||||||
|
const auto was = row->videoTrackEndpoint();
|
||||||
|
if (was != endpoint) {
|
||||||
|
if (!was.empty()) {
|
||||||
|
_videoEndpoints.remove(was);
|
||||||
|
}
|
||||||
|
if (!endpoint.empty()) {
|
||||||
|
_videoEndpoints.emplace(endpoint, row);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (endpoint.empty()) {
|
||||||
|
row->clearVideoTrack();
|
||||||
|
} else {
|
||||||
|
_call->addVideoOutput(endpoint, row->createVideoTrack(endpoint));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void MembersController::setupListChangeViewers() {
|
void MembersController::setupListChangeViewers() {
|
||||||
_call->real(
|
_call->real(
|
||||||
) | rpl::start_with_next([=](not_null<Data::GroupCall*> real) {
|
) | rpl::start_with_next([=](not_null<Data::GroupCall*> real) {
|
||||||
|
@ -1164,12 +1191,15 @@ void MembersController::setupListChangeViewers() {
|
||||||
) | rpl::filter([=](const std::string &largeEndpoint) {
|
) | rpl::filter([=](const std::string &largeEndpoint) {
|
||||||
return (_largeEndpoint != largeEndpoint);
|
return (_largeEndpoint != largeEndpoint);
|
||||||
}) | rpl::start_with_next([=](const std::string &largeEndpoint) {
|
}) | rpl::start_with_next([=](const std::string &largeEndpoint) {
|
||||||
|
if (_call->streamsVideo(_largeEndpoint)) {
|
||||||
if (const auto participant = findParticipant(_largeEndpoint)) {
|
if (const auto participant = findParticipant(_largeEndpoint)) {
|
||||||
if (participant->cameraEndpoint() == _largeEndpoint) {
|
|
||||||
if (const auto row = findRow(participant->peer)) {
|
if (const auto row = findRow(participant->peer)) {
|
||||||
_call->addVideoOutput(
|
const auto current = row->videoTrackEndpoint();
|
||||||
_largeEndpoint,
|
if (current.empty()
|
||||||
row->createVideoTrack(_largeEndpoint));
|
|| (computeScreenEndpoint(participant) == _largeEndpoint
|
||||||
|
&& computeCameraEndpoint(participant) == current)) {
|
||||||
|
setRowVideoEndpoint(row, _largeEndpoint);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1177,7 +1207,17 @@ void MembersController::setupListChangeViewers() {
|
||||||
if (const auto participant = findParticipant(_largeEndpoint)) {
|
if (const auto participant = findParticipant(_largeEndpoint)) {
|
||||||
if (const auto row = findRow(participant->peer)) {
|
if (const auto row = findRow(participant->peer)) {
|
||||||
if (row->videoTrackEndpoint() == _largeEndpoint) {
|
if (row->videoTrackEndpoint() == _largeEndpoint) {
|
||||||
row->clearVideoTrack();
|
const auto &camera = computeCameraEndpoint(participant);
|
||||||
|
const auto &screen = computeScreenEndpoint(participant);
|
||||||
|
if (_largeEndpoint == camera
|
||||||
|
&& _call->streamsVideo(screen)) {
|
||||||
|
setRowVideoEndpoint(row, screen);
|
||||||
|
} else if (_largeEndpoint == screen
|
||||||
|
&& _call->streamsVideo(camera)) {
|
||||||
|
setRowVideoEndpoint(row, camera);
|
||||||
|
} else {
|
||||||
|
setRowVideoEndpoint(row, std::string());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1186,20 +1226,47 @@ void MembersController::setupListChangeViewers() {
|
||||||
_call->streamsVideoUpdates(
|
_call->streamsVideoUpdates(
|
||||||
) | rpl::start_with_next([=](StreamsVideoUpdate update) {
|
) | rpl::start_with_next([=](StreamsVideoUpdate update) {
|
||||||
Assert(update.endpoint != _largeEndpoint);
|
Assert(update.endpoint != _largeEndpoint);
|
||||||
if (const auto participant = findParticipant(update.endpoint)) {
|
|
||||||
if (update.streams) {
|
if (update.streams) {
|
||||||
if (participant->cameraEndpoint() == update.endpoint
|
if (const auto participant = findParticipant(update.endpoint)) {
|
||||||
|| !_call->streamsVideo(participant->cameraEndpoint())) {
|
|
||||||
if (const auto row = findRow(participant->peer)) {
|
if (const auto row = findRow(participant->peer)) {
|
||||||
_call->addVideoOutput(
|
const auto &camera = computeCameraEndpoint(participant);
|
||||||
update.endpoint,
|
const auto &screen = computeScreenEndpoint(participant);
|
||||||
row->createVideoTrack(update.endpoint));
|
if (update.endpoint == camera
|
||||||
|
&& (!_call->streamsVideo(screen)
|
||||||
|
|| _largeEndpoint == screen)) {
|
||||||
|
setRowVideoEndpoint(row, camera);
|
||||||
|
} else if (update.endpoint == screen
|
||||||
|
&& (_largeEndpoint != screen)) {
|
||||||
|
setRowVideoEndpoint(row, screen);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (const auto row = findRow(participant->peer)) {
|
const auto i = _videoEndpoints.find(update.endpoint);
|
||||||
if (row->videoTrackEndpoint() == update.endpoint) {
|
if (i != end(_videoEndpoints)) {
|
||||||
row->clearVideoTrack();
|
const auto row = i->second;
|
||||||
|
const auto real = _call->lookupReal();
|
||||||
|
Assert(real != nullptr);
|
||||||
|
const auto &participants = real->participants();
|
||||||
|
const auto j = ranges::find(
|
||||||
|
participants,
|
||||||
|
row->peer(),
|
||||||
|
&Data::GroupCallParticipant::peer);
|
||||||
|
if (j == end(participants)) {
|
||||||
|
setRowVideoEndpoint(row, std::string());
|
||||||
|
} else {
|
||||||
|
const auto &camera = computeCameraEndpoint(&*j);
|
||||||
|
const auto &screen = computeScreenEndpoint(&*j);
|
||||||
|
if (update.endpoint == camera
|
||||||
|
&& (_largeEndpoint != screen)
|
||||||
|
&& _call->streamsVideo(screen)) {
|
||||||
|
setRowVideoEndpoint(row, screen);
|
||||||
|
} else if (update.endpoint == screen
|
||||||
|
&& (_largeEndpoint != camera)
|
||||||
|
&& _call->streamsVideo(camera)) {
|
||||||
|
setRowVideoEndpoint(row, camera);
|
||||||
|
} else {
|
||||||
|
setRowVideoEndpoint(row, std::string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1225,7 +1292,7 @@ void MembersController::setupListChangeViewers() {
|
||||||
void MembersController::subscribeToChanges(not_null<Data::GroupCall*> real) {
|
void MembersController::subscribeToChanges(not_null<Data::GroupCall*> real) {
|
||||||
_fullCount = real->fullCountValue();
|
_fullCount = real->fullCountValue();
|
||||||
|
|
||||||
real->participantsSliceAdded(
|
real->participantsReloaded(
|
||||||
) | rpl::start_with_next([=] {
|
) | rpl::start_with_next([=] {
|
||||||
prepareRows(real);
|
prepareRows(real);
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
@ -1531,12 +1598,33 @@ const Data::GroupCallParticipant *MembersController::findParticipant(
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
const auto real = _call->lookupReal();
|
const auto real = _call->lookupReal();
|
||||||
return real ? real->participantByEndpoint(endpoint) : nullptr;
|
if (!real) {
|
||||||
|
return nullptr;
|
||||||
|
} else if (endpoint == _call->screenSharingEndpoint()
|
||||||
|
|| endpoint == _call->cameraSharingEndpoint()) {
|
||||||
|
const auto &participants = real->participants();
|
||||||
|
const auto i = ranges::find(
|
||||||
|
participants,
|
||||||
|
_call->joinAs(),
|
||||||
|
&Data::GroupCallParticipant::peer);
|
||||||
|
return (i != end(participants)) ? &*i : nullptr;
|
||||||
|
} else {
|
||||||
|
return real->participantByEndpoint(endpoint);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Row *MembersController::findRow(const std::string &endpoint) const {
|
const std::string &MembersController::computeScreenEndpoint(
|
||||||
const auto participant = findParticipant(endpoint);
|
not_null<const Data::GroupCallParticipant*> participant) const {
|
||||||
return participant ? findRow(participant->peer) : nullptr;
|
return (participant->peer == _call->joinAs())
|
||||||
|
? _call->screenSharingEndpoint()
|
||||||
|
: participant->screenEndpoint();
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string &MembersController::computeCameraEndpoint(
|
||||||
|
not_null<const Data::GroupCallParticipant*> participant) const {
|
||||||
|
return (participant->peer == _call->joinAs())
|
||||||
|
? _call->cameraSharingEndpoint()
|
||||||
|
: participant->cameraEndpoint();
|
||||||
}
|
}
|
||||||
|
|
||||||
Main::Session &MembersController::session() const {
|
Main::Session &MembersController::session() const {
|
||||||
|
@ -1894,8 +1982,8 @@ base::unique_qptr<Ui::PopupMenu> MembersController::createRowContextMenu(
|
||||||
participantPeer,
|
participantPeer,
|
||||||
&Data::GroupCallParticipant::peer);
|
&Data::GroupCallParticipant::peer);
|
||||||
if (i != end(participants)) {
|
if (i != end(participants)) {
|
||||||
const auto camera = i->cameraEndpoint();
|
const auto &camera = computeCameraEndpoint(&*i);
|
||||||
const auto screen = i->screenEndpoint();
|
const auto &screen = computeScreenEndpoint(&*i);
|
||||||
const auto streamsScreen = _call->streamsVideo(screen);
|
const auto streamsScreen = _call->streamsVideo(screen);
|
||||||
if (streamsScreen || _call->streamsVideo(camera)) {
|
if (streamsScreen || _call->streamsVideo(camera)) {
|
||||||
const auto callback = [=] {
|
const auto callback = [=] {
|
||||||
|
|
|
@ -722,7 +722,7 @@ void Panel::refreshLeftButton() {
|
||||||
&st::groupCallVideoActiveSmall);
|
&st::groupCallVideoActiveSmall);
|
||||||
_video->show();
|
_video->show();
|
||||||
_video->setClickedCallback([=] {
|
_video->setClickedCallback([=] {
|
||||||
_call->toggleVideo(!_call->isCameraSharing());
|
_call->toggleVideo(!_call->isSharingCamera());
|
||||||
});
|
});
|
||||||
_video->setText(tr::lng_group_call_video());
|
_video->setText(tr::lng_group_call_video());
|
||||||
_video->setColorOverrides(_mute->colorOverrides());
|
_video->setColorOverrides(_mute->colorOverrides());
|
||||||
|
|
|
@ -113,42 +113,50 @@ void GroupCall::requestParticipants() {
|
||||||
: _nextOffset),
|
: _nextOffset),
|
||||||
MTP_int(kRequestPerPage)
|
MTP_int(kRequestPerPage)
|
||||||
)).done([=](const MTPphone_GroupParticipants &result) {
|
)).done([=](const MTPphone_GroupParticipants &result) {
|
||||||
_participantsRequestId = 0;
|
|
||||||
processSavedFullCall();
|
|
||||||
result.match([&](const MTPDphone_groupParticipants &data) {
|
result.match([&](const MTPDphone_groupParticipants &data) {
|
||||||
|
_participantsRequestId = 0;
|
||||||
|
const auto reloaded = processSavedFullCall();
|
||||||
_nextOffset = qs(data.vnext_offset());
|
_nextOffset = qs(data.vnext_offset());
|
||||||
_peer->owner().processUsers(data.vusers());
|
_peer->owner().processUsers(data.vusers());
|
||||||
_peer->owner().processChats(data.vchats());
|
_peer->owner().processChats(data.vchats());
|
||||||
applyParticipantsSlice(
|
applyParticipantsSlice(
|
||||||
data.vparticipants().v,
|
data.vparticipants().v,
|
||||||
ApplySliceSource::SliceLoaded);
|
(reloaded
|
||||||
|
? ApplySliceSource::FullReloaded
|
||||||
|
: ApplySliceSource::SliceLoaded));
|
||||||
setServerParticipantsCount(data.vcount().v);
|
setServerParticipantsCount(data.vcount().v);
|
||||||
if (data.vparticipants().v.isEmpty()) {
|
if (data.vparticipants().v.isEmpty()) {
|
||||||
_allParticipantsLoaded = true;
|
_allParticipantsLoaded = true;
|
||||||
}
|
}
|
||||||
finishParticipantsSliceRequest();
|
finishParticipantsSliceRequest();
|
||||||
|
if (reloaded) {
|
||||||
|
_participantsReloaded.fire({});
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}).fail([=](const MTP::Error &error) {
|
}).fail([=](const MTP::Error &error) {
|
||||||
_participantsRequestId = 0;
|
_participantsRequestId = 0;
|
||||||
processSavedFullCall();
|
const auto reloaded = processSavedFullCall();
|
||||||
setServerParticipantsCount(_participants.size());
|
setServerParticipantsCount(_participants.size());
|
||||||
_allParticipantsLoaded = true;
|
_allParticipantsLoaded = true;
|
||||||
finishParticipantsSliceRequest();
|
finishParticipantsSliceRequest();
|
||||||
|
if (reloaded) {
|
||||||
|
_participantsReloaded.fire({});
|
||||||
|
}
|
||||||
}).send();
|
}).send();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::processSavedFullCall() {
|
bool GroupCall::processSavedFullCall() {
|
||||||
if (!_savedFull) {
|
if (!_savedFull) {
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
_reloadRequestId = 0;
|
_reloadRequestId = 0;
|
||||||
processFullCallFields(*base::take(_savedFull));
|
processFullCallFields(*base::take(_savedFull));
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::finishParticipantsSliceRequest() {
|
void GroupCall::finishParticipantsSliceRequest() {
|
||||||
computeParticipantsCount();
|
computeParticipantsCount();
|
||||||
processQueuedUpdates();
|
processQueuedUpdates();
|
||||||
_participantsSliceAdded.fire({});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::setServerParticipantsCount(int count) {
|
void GroupCall::setServerParticipantsCount(int count) {
|
||||||
|
@ -237,8 +245,8 @@ const GroupCallParticipant *GroupCall::participantByEndpoint(
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
rpl::producer<> GroupCall::participantsSliceAdded() {
|
rpl::producer<> GroupCall::participantsReloaded() {
|
||||||
return _participantsSliceAdded.events();
|
return _participantsReloaded.events();
|
||||||
}
|
}
|
||||||
|
|
||||||
auto GroupCall::participantUpdated() const
|
auto GroupCall::participantUpdated() const
|
||||||
|
@ -348,7 +356,7 @@ void GroupCall::processFullCallFields(const MTPphone_GroupCall &call) {
|
||||||
|
|
||||||
applyParticipantsSlice(
|
applyParticipantsSlice(
|
||||||
participants,
|
participants,
|
||||||
ApplySliceSource::SliceLoaded);
|
ApplySliceSource::FullReloaded);
|
||||||
_nextOffset = nextOffset;
|
_nextOffset = nextOffset;
|
||||||
|
|
||||||
applyCallFields(data);
|
applyCallFields(data);
|
||||||
|
@ -362,6 +370,7 @@ void GroupCall::processFullCall(const MTPphone_GroupCall &call) {
|
||||||
processFullCallUsersChats(call);
|
processFullCallUsersChats(call);
|
||||||
processFullCallFields(call);
|
processFullCallFields(call);
|
||||||
finishParticipantsSliceRequest();
|
finishParticipantsSliceRequest();
|
||||||
|
_participantsReloaded.fire({});
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::applyCallFields(const MTPDgroupCall &data) {
|
void GroupCall::applyCallFields(const MTPDgroupCall &data) {
|
||||||
|
@ -540,7 +549,7 @@ void GroupCall::applyParticipantsSlice(
|
||||||
eraseVideoSsrcs(*i);
|
eraseVideoSsrcs(*i);
|
||||||
_speakingByActiveFinishes.remove(participantPeer);
|
_speakingByActiveFinishes.remove(participantPeer);
|
||||||
_participants.erase(i);
|
_participants.erase(i);
|
||||||
if (sliceSource != ApplySliceSource::SliceLoaded) {
|
if (sliceSource != ApplySliceSource::FullReloaded) {
|
||||||
_participantUpdates.fire(std::move(update));
|
_participantUpdates.fire(std::move(update));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -578,12 +587,9 @@ void GroupCall::applyParticipantsSlice(
|
||||||
&& (!was || was->onlyMinLoaded);
|
&& (!was || was->onlyMinLoaded);
|
||||||
const auto raisedHandRating
|
const auto raisedHandRating
|
||||||
= data.vraise_hand_rating().value_or_empty();
|
= data.vraise_hand_rating().value_or_empty();
|
||||||
const auto hasVideoParamsInformation = true/*(sliceSource
|
|
||||||
!= ApplySliceSource::UpdateConstructed)*/;
|
|
||||||
const auto value = Participant{
|
const auto value = Participant{
|
||||||
.peer = participantPeer,
|
.peer = participantPeer,
|
||||||
.videoParams = (hasVideoParamsInformation
|
.videoParams = Calls::ParseVideoParams(
|
||||||
? Calls::ParseVideoParams(
|
|
||||||
(data.vvideo()
|
(data.vvideo()
|
||||||
? data.vvideo()->c_dataJSON().vdata().v
|
? data.vvideo()->c_dataJSON().vdata().v
|
||||||
: QByteArray()),
|
: QByteArray()),
|
||||||
|
@ -592,10 +598,7 @@ void GroupCall::applyParticipantsSlice(
|
||||||
: QByteArray()),
|
: QByteArray()),
|
||||||
(i != end(_participants)
|
(i != end(_participants)
|
||||||
? i->videoParams
|
? i->videoParams
|
||||||
: nullptr))
|
: nullptr)),
|
||||||
: (i != end(_participants))
|
|
||||||
? i->videoParams
|
|
||||||
: nullptr),
|
|
||||||
.date = data.vdate().v,
|
.date = data.vdate().v,
|
||||||
.lastActive = lastActive,
|
.lastActive = lastActive,
|
||||||
.raisedHandRating = raisedHandRating,
|
.raisedHandRating = raisedHandRating,
|
||||||
|
@ -633,7 +636,7 @@ void GroupCall::applyParticipantsSlice(
|
||||||
if (data.is_just_joined()) {
|
if (data.is_just_joined()) {
|
||||||
++_serverParticipantsCount;
|
++_serverParticipantsCount;
|
||||||
}
|
}
|
||||||
if (sliceSource != ApplySliceSource::SliceLoaded) {
|
if (sliceSource != ApplySliceSource::FullReloaded) {
|
||||||
_participantUpdates.fire({
|
_participantUpdates.fire({
|
||||||
.was = was,
|
.was = was,
|
||||||
.now = value,
|
.now = value,
|
||||||
|
|
|
@ -111,7 +111,7 @@ public:
|
||||||
[[nodiscard]] const Participant *participantByEndpoint(
|
[[nodiscard]] const Participant *participantByEndpoint(
|
||||||
const std::string &endpoint) const;
|
const std::string &endpoint) const;
|
||||||
|
|
||||||
[[nodiscard]] rpl::producer<> participantsSliceAdded();
|
[[nodiscard]] rpl::producer<> participantsReloaded();
|
||||||
[[nodiscard]] rpl::producer<ParticipantUpdate> participantUpdated() const;
|
[[nodiscard]] rpl::producer<ParticipantUpdate> participantUpdated() const;
|
||||||
|
|
||||||
void enqueueUpdate(const MTPUpdate &update);
|
void enqueueUpdate(const MTPUpdate &update);
|
||||||
|
@ -146,6 +146,7 @@ public:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
enum class ApplySliceSource {
|
enum class ApplySliceSource {
|
||||||
|
FullReloaded,
|
||||||
SliceLoaded,
|
SliceLoaded,
|
||||||
UnknownLoaded,
|
UnknownLoaded,
|
||||||
UpdateReceived,
|
UpdateReceived,
|
||||||
|
@ -175,7 +176,7 @@ private:
|
||||||
void processFullCallFields(const MTPphone_GroupCall &call);
|
void processFullCallFields(const MTPphone_GroupCall &call);
|
||||||
[[nodiscard]] bool requestParticipantsAfterReload(
|
[[nodiscard]] bool requestParticipantsAfterReload(
|
||||||
const MTPphone_GroupCall &call) const;
|
const MTPphone_GroupCall &call) const;
|
||||||
void processSavedFullCall();
|
[[nodiscard]] bool processSavedFullCall();
|
||||||
void finishParticipantsSliceRequest();
|
void finishParticipantsSliceRequest();
|
||||||
|
|
||||||
void emplaceVideoSsrcs(const Participant &participant);
|
void emplaceVideoSsrcs(const Participant &participant);
|
||||||
|
@ -218,7 +219,7 @@ private:
|
||||||
mtpRequestId _unknownParticipantPeersRequestId = 0;
|
mtpRequestId _unknownParticipantPeersRequestId = 0;
|
||||||
|
|
||||||
rpl::event_stream<ParticipantUpdate> _participantUpdates;
|
rpl::event_stream<ParticipantUpdate> _participantUpdates;
|
||||||
rpl::event_stream<> _participantsSliceAdded;
|
rpl::event_stream<> _participantsReloaded;
|
||||||
|
|
||||||
bool _joinMuted = false;
|
bool _joinMuted = false;
|
||||||
bool _canChangeJoinMuted = true;
|
bool _canChangeJoinMuted = true;
|
||||||
|
|
|
@ -309,7 +309,7 @@ rpl::producer<Ui::GroupCallBarContent> GroupCallTracker::ContentByCall(
|
||||||
}
|
}
|
||||||
}, lifetime);
|
}, lifetime);
|
||||||
|
|
||||||
call->participantsSliceAdded(
|
call->participantsReloaded(
|
||||||
) | rpl::filter([=] {
|
) | rpl::filter([=] {
|
||||||
return RegenerateUserpics(state, call, userpicSize);
|
return RegenerateUserpics(state, call, userpicSize);
|
||||||
}) | rpl::start_with_next(pushNext, lifetime);
|
}) | rpl::start_with_next(pushNext, lifetime);
|
||||||
|
|
Loading…
Add table
Reference in a new issue