Improve voice /video chat members management.

This commit is contained in:
John Preston 2021-05-03 21:43:24 +04:00
parent 2e400d88d3
commit 54c2769d8a
7 changed files with 457 additions and 161 deletions

View file

@ -108,6 +108,11 @@ constexpr auto kPlayConnectingEach = crl::time(1056) + 2 * crl::time(1000);
return video.value("endpoint").toString().toStdString();
}
[[nodiscard]] const std::string &EmptyString() {
static const auto result = std::string();
return result;
}
} // namespace
class GroupCall::LoadPartTask final : public tgcalls::BroadcastPartTask {
@ -173,6 +178,10 @@ struct GroupCall::LargeTrack {
std::shared_ptr<Webrtc::SinkInterface> sink;
};
struct GroupCall::SinkPointer {
std::shared_ptr<Webrtc::SinkInterface> data;
};
[[nodiscard]] bool IsGroupCallAdmin(
not_null<PeerData*> peer,
not_null<PeerData*> participantPeer) {
@ -447,20 +456,29 @@ GroupCall::GroupCall(
GroupCall::~GroupCall() {
destroyController();
destroyScreencast();
}
bool GroupCall::isScreenSharing() const {
bool GroupCall::isSharingScreen() const {
return _screenOutgoing
&& (_screenOutgoing->state() == Webrtc::VideoState::Active);
}
bool GroupCall::isCameraSharing() const {
const std::string &GroupCall::screenSharingEndpoint() const {
return isSharingScreen() ? _screenEndpoint : EmptyString();
}
bool GroupCall::isSharingCamera() const {
return _cameraOutgoing
&& (_cameraOutgoing->state() == Webrtc::VideoState::Active);
}
const std::string &GroupCall::cameraSharingEndpoint() const {
return isSharingCamera() ? _cameraEndpoint : EmptyString();
}
QString GroupCall::screenSharingDeviceId() const {
return isScreenSharing() ? _screenDeviceId : QString();
return isSharingScreen() ? _screenDeviceId : QString();
}
void GroupCall::toggleVideo(bool active) {
@ -506,26 +524,11 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
setScheduledDate(date);
}, _lifetime);
const auto emptyEndpoint = std::string();
real->participantsSliceAdded(
real->participantsReloaded(
) | rpl::start_with_next([=] {
const auto &participants = real->participants();
for (const auto &participant : participants) {
const auto camera = participant.cameraEndpoint();
const auto screen = participant.screenEndpoint();
if (!camera.empty()
&& _activeVideoEndpoints.emplace(camera).second
&& _incomingVideoEndpoints.contains(camera)) {
_streamsVideoUpdated.fire({ camera, true });
}
if (!screen.empty()
&& _activeVideoEndpoints.emplace(screen).second
&& _incomingVideoEndpoints.contains(screen)) {
_streamsVideoUpdated.fire({ screen, true });
}
}
fillActiveVideoEndpoints();
}, _lifetime);
fillActiveVideoEndpoints();
using Update = Data::GroupCall::ParticipantUpdate;
real->participantUpdated(
@ -533,6 +536,14 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
auto newLarge = _videoEndpointLarge.current();
auto updateCameraNotStreams = std::string();
auto updateScreenNotStreams = std::string();
const auto regularEndpoint = [&](const std::string &endpoint)
-> const std::string & {
return (endpoint.empty()
|| endpoint == _cameraEndpoint
|| endpoint == _screenEndpoint)
? EmptyString()
: endpoint;
};
const auto guard = gsl::finally([&] {
if (newLarge.empty()) {
newLarge = chooseLargeVideoEndpoint();
@ -549,14 +560,16 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
});
const auto &wasCameraEndpoint = (data.was && data.was->videoParams)
? data.was->videoParams->camera.endpoint
: emptyEndpoint;
? regularEndpoint(data.was->videoParams->camera.endpoint)
: EmptyString();
const auto &nowCameraEndpoint = (data.now && data.now->videoParams)
? data.now->videoParams->camera.endpoint
: emptyEndpoint;
? regularEndpoint(data.now->videoParams->camera.endpoint)
: EmptyString();
if (wasCameraEndpoint != nowCameraEndpoint) {
if (!nowCameraEndpoint.empty()
&& _activeVideoEndpoints.emplace(nowCameraEndpoint).second
&& _activeVideoEndpoints.emplace(
nowCameraEndpoint,
EndpointType::Camera).second
&& _incomingVideoEndpoints.contains(nowCameraEndpoint)) {
_streamsVideoUpdated.fire({ nowCameraEndpoint, true });
}
@ -565,19 +578,21 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
&& _incomingVideoEndpoints.contains(wasCameraEndpoint)) {
updateCameraNotStreams = wasCameraEndpoint;
if (newLarge == wasCameraEndpoint) {
newLarge = std::string();
_videoEndpointPinned = newLarge = std::string();
}
}
}
const auto &wasScreenEndpoint = (data.was && data.was->videoParams)
? data.was->videoParams->screen.endpoint
: emptyEndpoint;
: EmptyString();
const auto &nowScreenEndpoint = (data.now && data.now->videoParams)
? data.now->videoParams->screen.endpoint
: emptyEndpoint;
: EmptyString();
if (wasScreenEndpoint != nowScreenEndpoint) {
if (!nowScreenEndpoint.empty()
&& _activeVideoEndpoints.emplace(nowScreenEndpoint).second
&& _activeVideoEndpoints.emplace(
nowScreenEndpoint,
EndpointType::Screen).second
&& _incomingVideoEndpoints.contains(nowScreenEndpoint)) {
_streamsVideoUpdated.fire({ nowScreenEndpoint, true });
}
@ -586,7 +601,7 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
&& _incomingVideoEndpoints.contains(wasScreenEndpoint)) {
updateScreenNotStreams = wasScreenEndpoint;
if (newLarge == wasScreenEndpoint) {
newLarge = std::string();
_videoEndpointPinned = newLarge = std::string();
}
}
}
@ -600,7 +615,8 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
return;
}
if (nowScreenEndpoint != newLarge
&& streamsVideo(nowScreenEndpoint)) {
&& streamsVideo(nowScreenEndpoint)
&& activeVideoEndpointType(newLarge) != EndpointType::Screen) {
newLarge = nowScreenEndpoint;
}
const auto &participants = real->participants();
@ -639,8 +655,8 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
newLarge = soundingEndpoint;
}
} else if ((nowSpeaking || nowSounding)
&& (nowScreenEndpoint != newLarge)
&& (nowCameraEndpoint != newLarge)
&& (activeVideoEndpointType(newLarge) != EndpointType::Screen)
&& streamsVideo(nowCameraEndpoint)) {
const auto participant = real->participantByEndpoint(newLarge);
const auto screen = participant
@ -701,6 +717,7 @@ void GroupCall::setState(State state) {
// Destroy controller before destroying Call Panel,
// so that the panel hide animation is smooth.
destroyController();
destroyScreencast();
}
switch (state) {
case State::HangingUp:
@ -833,6 +850,90 @@ void GroupCall::join(const MTPInputGroupCall &inputCall) {
});
}
void GroupCall::setMyEndpointType(
const std::string &endpoint,
EndpointType type) {
if (endpoint.empty()) {
return;
} else if (type == EndpointType::None) {
const auto was1 = _incomingVideoEndpoints.remove(endpoint);
const auto was2 = _activeVideoEndpoints.remove(endpoint);
if (was1 && was2) {
auto newLarge = _videoEndpointLarge.current();
if (newLarge == endpoint) {
_videoEndpointPinned = std::string();
_videoEndpointLarge = chooseLargeVideoEndpoint();
}
_streamsVideoUpdated.fire({ endpoint, false });
}
} else {
const auto now1 = _incomingVideoEndpoints.emplace(endpoint).second;
const auto now2 = _activeVideoEndpoints.emplace(
endpoint,
type).second;
if (now1 && now2) {
_streamsVideoUpdated.fire({ endpoint, true });
}
const auto nowLarge = activeVideoEndpointType(
_videoEndpointLarge.current());
if (_videoEndpointPinned.empty()
&& ((type == EndpointType::Screen
&& nowLarge != EndpointType::Screen)
|| (type == EndpointType::Camera
&& nowLarge == EndpointType::None))) {
_videoEndpointLarge = endpoint;
}
}
}
void GroupCall::setScreenEndpoint(std::string endpoint) {
if (_screenEndpoint == endpoint) {
return;
}
if (!_screenEndpoint.empty()) {
setMyEndpointType(_screenEndpoint, EndpointType::None);
}
_screenEndpoint = std::move(endpoint);
if (_screenEndpoint.empty()) {
return;
}
if (_instance) {
_instance->setIgnoreVideoEndpointIds({ _screenEndpoint });
}
if (isSharingScreen()) {
setMyEndpointType(_screenEndpoint, EndpointType::Screen);
}
}
void GroupCall::setCameraEndpoint(std::string endpoint) {
if (_cameraEndpoint == endpoint) {
return;
}
if (!_cameraEndpoint.empty()) {
setMyEndpointType(_cameraEndpoint, EndpointType::None);
}
_cameraEndpoint = std::move(endpoint);
if (_cameraEndpoint.empty()) {
return;
}
if (isSharingCamera()) {
setMyEndpointType(_cameraEndpoint, EndpointType::Camera);
}
}
void GroupCall::addVideoOutput(
const std::string &endpoint,
SinkPointer sink) {
if (_cameraEndpoint == endpoint) {
_cameraCapture->setOutput(sink.data);
} else if (_screenEndpoint == endpoint) {
_screenCapture->setOutput(sink.data);
} else {
Assert(_instance != nullptr);
_instance->addIncomingVideoOutput(endpoint, sink.data);
}
}
void GroupCall::rejoin() {
rejoin(_joinAs);
}
@ -913,6 +1014,10 @@ void GroupCall::rejoin(not_null<PeerData*> as) {
applyQueuedSelfUpdates();
checkFirstTimeJoined();
sendSelfUpdate(SendUpdateType::VideoMuted);
if (_screenSsrc && isSharingScreen()) {
LOG(("Call Info: Screen rejoin after rejoin()."));
rejoinPresentation();
}
}).fail([=](const MTP::Error &error) {
const auto type = error.type();
LOG(("Call Error: Could not join, error: %1").arg(type));
@ -970,22 +1075,16 @@ void GroupCall::rejoinPresentation() {
const auto type = error.type();
LOG(("Call Error: "
"Could not screen join, error: %1").arg(type));
if (type == u"GROUPCALL_SSRC_DUPLICATE_MUCH") {
rejoinPresentation();
return;
} else if (type == u"GROUPCALL_JOIN_MISSING"_q
|| type == u"GROUPCALL_FORBIDDEN"_q) {
_screenSsrc = ssrc;
rejoin();
} else {
_screenSsrc = 0;
setScreenEndpoint(std::string());
}
//hangup();
//Ui::ShowMultilineToast({
// .text = { type == u"GROUPCALL_ANONYMOUS_FORBIDDEN"_q
// ? tr::lng_group_call_no_anonymous(tr::now)
// : type == u"GROUPCALL_PARTICIPANTS_TOO_MUCH"_q
// ? tr::lng_group_call_too_many(tr::now)
// : type == u"GROUPCALL_FORBIDDEN"_q
// ? tr::lng_group_not_accessible(tr::now)
// : Lang::Hard::ServerError() },
//});
}).send();
});
});
@ -999,12 +1098,14 @@ void GroupCall::leavePresentation() {
inputCall()
)).done([=](const MTPUpdates &updates) {
_screenSsrc = 0;
setScreenEndpoint(std::string());
_peer->session().api().applyUpdates(updates);
}).fail([=](const MTP::Error &error) {
const auto type = error.type();
LOG(("Call Error: "
"Could not screen leave, error: %1").arg(type));
_screenSsrc = 0;
setScreenEndpoint(std::string());
}).send();
}
@ -1233,11 +1334,9 @@ void GroupCall::toggleScheduleStartSubscribed(bool subscribed) {
}
void GroupCall::addVideoOutput(
const std::string &endpointId,
const std::string &endpoint,
not_null<Webrtc::VideoTrack*> track) {
if (_instance) {
_instance->addIncomingVideoOutput(endpointId, track->sink());
}
addVideoOutput(endpoint, { track->sink() });
}
void GroupCall::setMuted(MuteState mute) {
@ -1314,16 +1413,15 @@ void GroupCall::handlePossibleCreateOrJoinResponse(
setScreenInstanceMode(InstanceMode::Rtc);
data.vparams().match([&](const MTPDdataJSON &data) {
const auto json = data.vdata().v;
_screenEndpoint = ParseVideoEndpoint(json);
if (!_screenEndpoint.empty() && _instance) {
_instance->setIgnoreVideoEndpointIds({ _screenEndpoint });
}
setScreenEndpoint(ParseVideoEndpoint(json));
_screenInstance->setJoinResponsePayload(json.toStdString());
});
} else {
setInstanceMode(InstanceMode::Rtc);
data.vparams().match([&](const MTPDdataJSON &data) {
_instance->setJoinResponsePayload(data.vdata().v.toStdString());
const auto json = data.vdata().v;
setCameraEndpoint(ParseVideoEndpoint(json));
_instance->setJoinResponsePayload(json.toStdString());
});
checkMediaChannelDescriptions();
}
@ -1534,7 +1632,6 @@ void GroupCall::ensureOutgoingVideo() {
if (!_cameraCapture) {
_cameraCapture = _delegate->groupCallGetVideoCapture(
_cameraInputId);
_cameraCapture->setOutput(_cameraOutgoing->sink());
} else {
_cameraCapture->switchToDevice(_cameraInputId.toStdString());
}
@ -1542,8 +1639,12 @@ void GroupCall::ensureOutgoingVideo() {
_instance->setVideoCapture(_cameraCapture);
}
_cameraCapture->setState(tgcalls::VideoState::Active);
} else if (_cameraCapture) {
_cameraCapture->setState(tgcalls::VideoState::Inactive);
setMyEndpointType(_cameraEndpoint, EndpointType::Camera);
} else {
if (_cameraCapture) {
_cameraCapture->setState(tgcalls::VideoState::Inactive);
}
setMyEndpointType(_cameraEndpoint, EndpointType::None);
}
sendSelfUpdate(SendUpdateType::VideoMuted);
applyMeInCallLocally();
@ -1559,7 +1660,6 @@ void GroupCall::ensureOutgoingVideo() {
tgcalls::VideoCaptureInterface::Create(
tgcalls::StaticThreads::getThreads(),
_screenDeviceId.toStdString()));
_screenCapture->setOutput(_screenOutgoing->sink());
} else {
_screenCapture->switchToDevice(_screenDeviceId.toStdString());
}
@ -1567,8 +1667,12 @@ void GroupCall::ensureOutgoingVideo() {
_screenInstance->setVideoCapture(_screenCapture);
}
_screenCapture->setState(tgcalls::VideoState::Active);
} else if (_screenCapture) {
_screenCapture->setState(tgcalls::VideoState::Inactive);
setMyEndpointType(_screenEndpoint, EndpointType::Screen);
} else {
if (_screenCapture) {
_screenCapture->setState(tgcalls::VideoState::Inactive);
}
setMyEndpointType(_screenEndpoint, EndpointType::None);
}
joinLeavePresentation();
}, _lifetime);
@ -1657,7 +1761,7 @@ void GroupCall::ensureControllerCreated() {
.incomingVideoSourcesUpdated = [=](
std::vector<std::string> endpointIds) {
crl::on_main(weak, [=, endpoints = std::move(endpointIds)] {
setIncomingVideoStreams(endpoints);
setIncomingVideoEndpoints(endpoints);
});
},
.requestBroadcastPart = [=](
@ -1722,14 +1826,15 @@ void GroupCall::ensureControllerCreated() {
}
_videoLargeTrackWrap->sink = Webrtc::CreateProxySink(
_videoLargeTrackWrap->track.sink());
_instance->addIncomingVideoOutput(
endpoint,
_videoLargeTrackWrap->sink);
addVideoOutput(endpoint, { _videoLargeTrackWrap->sink });
}, _lifetime);
updateInstanceMuteState();
updateInstanceVolumes();
if (!_screenEndpoint.empty()) {
_instance->setIgnoreVideoEndpointIds({ _screenEndpoint });
}
//raw->setAudioOutputDuckingEnabled(settings.callAudioDuckingEnabled());
}
@ -1909,10 +2014,10 @@ bool GroupCall::mediaChannelDescriptionsFill(
} else if (const auto byScreen
= real->participantPeerByScreenSsrc(ssrc)) {
addVideoChannel(byScreen, &ParticipantVideoParams::screen);
} else if (resolved(ssrc)) {
add(std::nullopt);
} else if (!resolved) {
_unresolvedSsrcs.emplace(ssrc);
} else if (resolved(ssrc)) {
add(std::nullopt);
}
}
return result;
@ -1926,25 +2031,33 @@ void GroupCall::mediaChannelDescriptionsCancel(
}
}
void GroupCall::setIncomingVideoStreams(
void GroupCall::setIncomingVideoEndpoints(
const std::vector<std::string> &endpoints) {
const auto large = _videoEndpointLarge.current();
auto newLarge = large;
if (!large.empty() && !ranges::contains(endpoints, large)) {
newLarge = _videoEndpointPinned = std::string();
}
auto newLarge = _videoEndpointLarge.current();
auto newLargeFound = false;
auto removed = _incomingVideoEndpoints;
for (const auto &endpoint : endpoints) {
const auto i = removed.find(endpoint);
const auto videoActive = _activeVideoEndpoints.contains(endpoint);
if (i != end(removed)) {
removed.erase(i);
} else {
const auto feedOne = [&](const std::string &endpoint) {
if (endpoint.empty()) {
return;
} else if (endpoint == newLarge) {
newLargeFound = true;
}
if (!removed.remove(endpoint)) {
_incomingVideoEndpoints.emplace(endpoint);
if (videoActive) {
if (_activeVideoEndpoints.contains(endpoint)) {
_streamsVideoUpdated.fire({ endpoint, true });
}
}
};
for (const auto &endpoint : endpoints) {
if (endpoint != _cameraEndpoint && endpoint != _screenEndpoint) {
feedOne(endpoint);
}
}
feedOne(cameraSharingEndpoint());
feedOne(screenSharingEndpoint());
if (!newLarge.empty() && !newLargeFound) {
newLarge = _videoEndpointPinned = std::string();
}
if (newLarge.empty()) {
_videoEndpointLarge = chooseLargeVideoEndpoint();
@ -1956,6 +2069,65 @@ void GroupCall::setIncomingVideoStreams(
}
}
void GroupCall::fillActiveVideoEndpoints() {
const auto real = lookupReal();
Assert(real != nullptr);
const auto &participants = real->participants();
auto newLarge = _videoEndpointLarge.current();
auto newLargeFound = false;
auto removed = _activeVideoEndpoints;
const auto feedOne = [&](
const std::string &endpoint,
EndpointType type) {
if (endpoint.empty()) {
return;
} else if (endpoint == newLarge) {
newLargeFound = true;
}
if (!removed.remove(endpoint)) {
_activeVideoEndpoints.emplace(endpoint, type);
if (_incomingVideoEndpoints.contains(endpoint)) {
_streamsVideoUpdated.fire({ endpoint, true });
}
}
};
for (const auto &participant : participants) {
const auto camera = participant.cameraEndpoint();
if (camera != _cameraEndpoint && camera != _screenEndpoint) {
feedOne(camera, EndpointType::Camera);
}
const auto screen = participant.screenEndpoint();
if (screen != _cameraEndpoint && screen != _screenEndpoint) {
feedOne(screen, EndpointType::Screen);
}
}
feedOne(cameraSharingEndpoint(), EndpointType::Camera);
feedOne(screenSharingEndpoint(), EndpointType::Screen);
if (!newLarge.empty() && !newLargeFound) {
newLarge = _videoEndpointPinned = std::string();
}
if (newLarge.empty()) {
_videoEndpointLarge = chooseLargeVideoEndpoint();
}
for (const auto &[endpoint, type] : removed) {
if (_activeVideoEndpoints.remove(endpoint)) {
_streamsVideoUpdated.fire({ endpoint, false });
}
}
}
GroupCall::EndpointType GroupCall::activeVideoEndpointType(
const std::string &endpoint) const {
if (endpoint.empty()) {
return EndpointType::None;
}
const auto i = _activeVideoEndpoints.find(endpoint);
return (i != end(_activeVideoEndpoints))
? i->second
: EndpointType::None;
}
std::string GroupCall::chooseLargeVideoEndpoint() const {
const auto real = lookupReal();
if (!real) {
@ -1965,9 +2137,13 @@ std::string GroupCall::chooseLargeVideoEndpoint() const {
auto screenEndpoint = std::string();
auto speakingEndpoint = std::string();
auto soundingEndpoint = std::string();
const auto &myCameraEndpoint = cameraSharingEndpoint();
const auto &myScreenEndpoint = screenSharingEndpoint();
const auto &participants = real->participants();
for (const auto &endpoint : _incomingVideoEndpoints) {
if (!_activeVideoEndpoints.contains(endpoint)) {
if (!_activeVideoEndpoints.contains(endpoint)
|| endpoint == _cameraEndpoint
|| endpoint == _screenEndpoint) {
continue;
}
if (const auto participant = real->participantByEndpoint(endpoint)) {
@ -1989,11 +2165,17 @@ std::string GroupCall::chooseLargeVideoEndpoint() const {
}
return !screenEndpoint.empty()
? screenEndpoint
: streamsVideo(myScreenEndpoint)
? myScreenEndpoint
: !speakingEndpoint.empty()
? speakingEndpoint
: !soundingEndpoint.empty()
? soundingEndpoint
: anyEndpoint;
: !anyEndpoint.empty()
? anyEndpoint
: streamsVideo(myCameraEndpoint)
? myCameraEndpoint
: std::string();
}
void GroupCall::updateInstanceMuteState() {
@ -2124,25 +2306,22 @@ void GroupCall::checkJoined() {
if (!ranges::contains(result.v, MTP_int(_mySsrc))) {
LOG(("Call Info: Rejoin after no _mySsrc in checkGroupCall."));
rejoin();
} else if (state() == State::Connecting) {
_checkJoinedTimer.callOnce(kCheckJoinedTimeout);
}
if (_screenSsrc
&& !ranges::contains(result.v, MTP_int(_screenSsrc))
&& isScreenSharing()) {
LOG(("Call Info: "
"Screen rejoin after _screenSsrc not found."));
rejoinPresentation();
} else {
if (state() == State::Connecting) {
_checkJoinedTimer.callOnce(kCheckJoinedTimeout);
}
if (_screenSsrc
&& !ranges::contains(result.v, MTP_int(_screenSsrc))
&& isSharingScreen()) {
LOG(("Call Info: "
"Screen rejoin after _screenSsrc not found."));
rejoinPresentation();
}
}
}).fail([=](const MTP::Error &error) {
LOG(("Call Info: Full rejoin after error '%1' in checkGroupCall."
).arg(error.type()));
rejoin();
if (_screenSsrc && isScreenSharing()) {
LOG(("Call Info: "
"Full screen rejoin after _screenSsrc not found."));
rejoinPresentation();
}
}).send();
}
@ -2493,4 +2672,12 @@ void GroupCall::destroyController() {
}
}
void GroupCall::destroyScreencast() {
if (_screenInstance) {
DEBUG_LOG(("Call Info: Destroying call screen controller.."));
_screenInstance.reset();
DEBUG_LOG(("Call Info: Call screen controller destroyed."));
}
}
} // namespace Calls

View file

@ -232,7 +232,7 @@ public:
[[nodiscard]] bool streamsVideo(const std::string &endpoint) const {
return !endpoint.empty()
&& _incomingVideoEndpoints.contains(endpoint)
&& _activeVideoEndpoints.contains(endpoint);
&& activeVideoEndpointType(endpoint) != EndpointType::None;
}
[[nodiscard]] const std::string &videoEndpointPinned() const {
return _videoEndpointPinned;
@ -265,8 +265,10 @@ public:
void setCurrentAudioDevice(bool input, const QString &deviceId);
void setCurrentVideoDevice(const QString &deviceId);
[[nodiscard]] bool isScreenSharing() const;
[[nodiscard]] bool isCameraSharing() const;
[[nodiscard]] bool isSharingScreen() const;
[[nodiscard]] const std::string &screenSharingEndpoint() const;
[[nodiscard]] bool isSharingCamera() const;
[[nodiscard]] const std::string &cameraSharingEndpoint() const;
[[nodiscard]] QString screenSharingDeviceId() const;
void toggleVideo(bool active);
void toggleScreenSharing(std::optional<QString> uniqueId);
@ -300,6 +302,7 @@ public:
private:
using GlobalShortcutValue = base::GlobalShortcutValue;
struct LargeTrack;
struct SinkPointer;
struct LoadingPart {
std::shared_ptr<LoadPartTask> task;
@ -321,6 +324,11 @@ private:
RaiseHand,
VideoMuted,
};
enum class EndpointType {
None,
Camera,
Screen,
};
[[nodiscard]] bool mediaChannelDescriptionsFill(
not_null<MediaChannelDescriptionsTask*> task,
@ -370,8 +378,12 @@ private:
void stopConnectingSound();
void playConnectingSoundOnce();
void setIncomingVideoStreams(const std::vector<std::string> &endpoints);
void setIncomingVideoEndpoints(
const std::vector<std::string> &endpoints);
void fillActiveVideoEndpoints();
[[nodiscard]] std::string chooseLargeVideoEndpoint() const;
[[nodiscard]] EndpointType activeVideoEndpointType(
const std::string &endpoint) const;
void editParticipant(
not_null<PeerData*> participantPeer,
@ -387,6 +399,10 @@ private:
void setupMediaDevices();
void ensureOutgoingVideo();
void setMyEndpointType(const std::string &endpoint, EndpointType type);
void setScreenEndpoint(std::string endpoint);
void setCameraEndpoint(std::string endpoint);
void addVideoOutput(const std::string &endpoint, SinkPointer sink);
[[nodiscard]] MTPInputGroupCall inputCall() const;
@ -423,6 +439,8 @@ private:
uint64 _accessHash = 0;
uint32 _mySsrc = 0;
uint32 _screenSsrc = 0;
std::string _cameraEndpoint;
std::string _screenEndpoint;
TimeId _scheduleDate = 0;
base::flat_set<uint32> _mySsrcs;
mtpRequestId _createRequestId = 0;
@ -443,12 +461,11 @@ private:
std::shared_ptr<tgcalls::VideoCaptureInterface> _screenCapture;
std::unique_ptr<Webrtc::VideoTrack> _screenOutgoing;
QString _screenDeviceId;
std::string _screenEndpoint;
rpl::event_stream<LevelUpdate> _levelUpdates;
rpl::event_stream<StreamsVideoUpdate> _streamsVideoUpdated;
base::flat_set<std::string> _incomingVideoEndpoints;
base::flat_set<std::string> _activeVideoEndpoints;
base::flat_map<std::string, EndpointType> _activeVideoEndpoints;
rpl::variable<std::string> _videoEndpointLarge;
std::string _videoEndpointPinned;
std::unique_ptr<LargeTrack> _videoLargeTrackWrap;

View file

@ -385,7 +385,13 @@ private:
Row *findRow(not_null<PeerData*> participantPeer) const;
const Data::GroupCallParticipant *findParticipant(
const std::string &endpoint) const;
Row *findRow(const std::string &endpoint) const;
const std::string &computeScreenEndpoint(
not_null<const Data::GroupCallParticipant*> participant) const;
const std::string &computeCameraEndpoint(
not_null<const Data::GroupCallParticipant*> participant) const;
void setRowVideoEndpoint(
not_null<Row*> row,
const std::string &endpoint);
void appendInvitedUsers();
void scheduleRaisedHandStatusRemove();
@ -408,6 +414,7 @@ private:
base::Timer _raisedHandStatusRemoveTimer;
base::flat_map<uint32, not_null<Row*>> _soundingRowBySsrc;
base::flat_map<std::string, not_null<Row*>> _videoEndpoints;
Ui::Animations::Basic _soundingAnimation;
crl::time _soundingAnimationHideLastTime = 0;
@ -1036,6 +1043,7 @@ const std::string &Row::videoTrackEndpoint() const {
void Row::clearVideoTrack() {
_videoTrackLifetime.destroy();
_videoTrackEndpoint = std::string();
_videoTrackShown = nullptr;
_videoTrack = nullptr;
_delegate->rowUpdateRow(this);
@ -1139,6 +1147,25 @@ MembersController::~MembersController() {
base::take(_menu);
}
void MembersController::setRowVideoEndpoint(
not_null<Row*> row,
const std::string &endpoint) {
const auto was = row->videoTrackEndpoint();
if (was != endpoint) {
if (!was.empty()) {
_videoEndpoints.remove(was);
}
if (!endpoint.empty()) {
_videoEndpoints.emplace(endpoint, row);
}
}
if (endpoint.empty()) {
row->clearVideoTrack();
} else {
_call->addVideoOutput(endpoint, row->createVideoTrack(endpoint));
}
}
void MembersController::setupListChangeViewers() {
_call->real(
) | rpl::start_with_next([=](not_null<Data::GroupCall*> real) {
@ -1164,12 +1191,15 @@ void MembersController::setupListChangeViewers() {
) | rpl::filter([=](const std::string &largeEndpoint) {
return (_largeEndpoint != largeEndpoint);
}) | rpl::start_with_next([=](const std::string &largeEndpoint) {
if (const auto participant = findParticipant(_largeEndpoint)) {
if (participant->cameraEndpoint() == _largeEndpoint) {
if (_call->streamsVideo(_largeEndpoint)) {
if (const auto participant = findParticipant(_largeEndpoint)) {
if (const auto row = findRow(participant->peer)) {
_call->addVideoOutput(
_largeEndpoint,
row->createVideoTrack(_largeEndpoint));
const auto current = row->videoTrackEndpoint();
if (current.empty()
|| (computeScreenEndpoint(participant) == _largeEndpoint
&& computeCameraEndpoint(participant) == current)) {
setRowVideoEndpoint(row, _largeEndpoint);
}
}
}
}
@ -1177,7 +1207,17 @@ void MembersController::setupListChangeViewers() {
if (const auto participant = findParticipant(_largeEndpoint)) {
if (const auto row = findRow(participant->peer)) {
if (row->videoTrackEndpoint() == _largeEndpoint) {
row->clearVideoTrack();
const auto &camera = computeCameraEndpoint(participant);
const auto &screen = computeScreenEndpoint(participant);
if (_largeEndpoint == camera
&& _call->streamsVideo(screen)) {
setRowVideoEndpoint(row, screen);
} else if (_largeEndpoint == screen
&& _call->streamsVideo(camera)) {
setRowVideoEndpoint(row, camera);
} else {
setRowVideoEndpoint(row, std::string());
}
}
}
}
@ -1186,20 +1226,47 @@ void MembersController::setupListChangeViewers() {
_call->streamsVideoUpdates(
) | rpl::start_with_next([=](StreamsVideoUpdate update) {
Assert(update.endpoint != _largeEndpoint);
if (const auto participant = findParticipant(update.endpoint)) {
if (update.streams) {
if (participant->cameraEndpoint() == update.endpoint
|| !_call->streamsVideo(participant->cameraEndpoint())) {
if (const auto row = findRow(participant->peer)) {
_call->addVideoOutput(
update.endpoint,
row->createVideoTrack(update.endpoint));
if (update.streams) {
if (const auto participant = findParticipant(update.endpoint)) {
if (const auto row = findRow(participant->peer)) {
const auto &camera = computeCameraEndpoint(participant);
const auto &screen = computeScreenEndpoint(participant);
if (update.endpoint == camera
&& (!_call->streamsVideo(screen)
|| _largeEndpoint == screen)) {
setRowVideoEndpoint(row, camera);
} else if (update.endpoint == screen
&& (_largeEndpoint != screen)) {
setRowVideoEndpoint(row, screen);
}
}
} else {
if (const auto row = findRow(participant->peer)) {
if (row->videoTrackEndpoint() == update.endpoint) {
row->clearVideoTrack();
}
} else {
const auto i = _videoEndpoints.find(update.endpoint);
if (i != end(_videoEndpoints)) {
const auto row = i->second;
const auto real = _call->lookupReal();
Assert(real != nullptr);
const auto &participants = real->participants();
const auto j = ranges::find(
participants,
row->peer(),
&Data::GroupCallParticipant::peer);
if (j == end(participants)) {
setRowVideoEndpoint(row, std::string());
} else {
const auto &camera = computeCameraEndpoint(&*j);
const auto &screen = computeScreenEndpoint(&*j);
if (update.endpoint == camera
&& (_largeEndpoint != screen)
&& _call->streamsVideo(screen)) {
setRowVideoEndpoint(row, screen);
} else if (update.endpoint == screen
&& (_largeEndpoint != camera)
&& _call->streamsVideo(camera)) {
setRowVideoEndpoint(row, camera);
} else {
setRowVideoEndpoint(row, std::string());
}
}
}
@ -1225,7 +1292,7 @@ void MembersController::setupListChangeViewers() {
void MembersController::subscribeToChanges(not_null<Data::GroupCall*> real) {
_fullCount = real->fullCountValue();
real->participantsSliceAdded(
real->participantsReloaded(
) | rpl::start_with_next([=] {
prepareRows(real);
}, _lifetime);
@ -1531,12 +1598,33 @@ const Data::GroupCallParticipant *MembersController::findParticipant(
return nullptr;
}
const auto real = _call->lookupReal();
return real ? real->participantByEndpoint(endpoint) : nullptr;
if (!real) {
return nullptr;
} else if (endpoint == _call->screenSharingEndpoint()
|| endpoint == _call->cameraSharingEndpoint()) {
const auto &participants = real->participants();
const auto i = ranges::find(
participants,
_call->joinAs(),
&Data::GroupCallParticipant::peer);
return (i != end(participants)) ? &*i : nullptr;
} else {
return real->participantByEndpoint(endpoint);
}
}
Row *MembersController::findRow(const std::string &endpoint) const {
const auto participant = findParticipant(endpoint);
return participant ? findRow(participant->peer) : nullptr;
const std::string &MembersController::computeScreenEndpoint(
not_null<const Data::GroupCallParticipant*> participant) const {
return (participant->peer == _call->joinAs())
? _call->screenSharingEndpoint()
: participant->screenEndpoint();
}
const std::string &MembersController::computeCameraEndpoint(
not_null<const Data::GroupCallParticipant*> participant) const {
return (participant->peer == _call->joinAs())
? _call->cameraSharingEndpoint()
: participant->cameraEndpoint();
}
Main::Session &MembersController::session() const {
@ -1894,8 +1982,8 @@ base::unique_qptr<Ui::PopupMenu> MembersController::createRowContextMenu(
participantPeer,
&Data::GroupCallParticipant::peer);
if (i != end(participants)) {
const auto camera = i->cameraEndpoint();
const auto screen = i->screenEndpoint();
const auto &camera = computeCameraEndpoint(&*i);
const auto &screen = computeScreenEndpoint(&*i);
const auto streamsScreen = _call->streamsVideo(screen);
if (streamsScreen || _call->streamsVideo(camera)) {
const auto callback = [=] {

View file

@ -722,7 +722,7 @@ void Panel::refreshLeftButton() {
&st::groupCallVideoActiveSmall);
_video->show();
_video->setClickedCallback([=] {
_call->toggleVideo(!_call->isCameraSharing());
_call->toggleVideo(!_call->isSharingCamera());
});
_video->setText(tr::lng_group_call_video());
_video->setColorOverrides(_mute->colorOverrides());

View file

@ -113,42 +113,50 @@ void GroupCall::requestParticipants() {
: _nextOffset),
MTP_int(kRequestPerPage)
)).done([=](const MTPphone_GroupParticipants &result) {
_participantsRequestId = 0;
processSavedFullCall();
result.match([&](const MTPDphone_groupParticipants &data) {
_participantsRequestId = 0;
const auto reloaded = processSavedFullCall();
_nextOffset = qs(data.vnext_offset());
_peer->owner().processUsers(data.vusers());
_peer->owner().processChats(data.vchats());
applyParticipantsSlice(
data.vparticipants().v,
ApplySliceSource::SliceLoaded);
(reloaded
? ApplySliceSource::FullReloaded
: ApplySliceSource::SliceLoaded));
setServerParticipantsCount(data.vcount().v);
if (data.vparticipants().v.isEmpty()) {
_allParticipantsLoaded = true;
}
finishParticipantsSliceRequest();
if (reloaded) {
_participantsReloaded.fire({});
}
});
}).fail([=](const MTP::Error &error) {
_participantsRequestId = 0;
processSavedFullCall();
const auto reloaded = processSavedFullCall();
setServerParticipantsCount(_participants.size());
_allParticipantsLoaded = true;
finishParticipantsSliceRequest();
if (reloaded) {
_participantsReloaded.fire({});
}
}).send();
}
void GroupCall::processSavedFullCall() {
bool GroupCall::processSavedFullCall() {
if (!_savedFull) {
return;
return false;
}
_reloadRequestId = 0;
processFullCallFields(*base::take(_savedFull));
return true;
}
void GroupCall::finishParticipantsSliceRequest() {
computeParticipantsCount();
processQueuedUpdates();
_participantsSliceAdded.fire({});
}
void GroupCall::setServerParticipantsCount(int count) {
@ -237,8 +245,8 @@ const GroupCallParticipant *GroupCall::participantByEndpoint(
return nullptr;
}
rpl::producer<> GroupCall::participantsSliceAdded() {
return _participantsSliceAdded.events();
rpl::producer<> GroupCall::participantsReloaded() {
return _participantsReloaded.events();
}
auto GroupCall::participantUpdated() const
@ -348,7 +356,7 @@ void GroupCall::processFullCallFields(const MTPphone_GroupCall &call) {
applyParticipantsSlice(
participants,
ApplySliceSource::SliceLoaded);
ApplySliceSource::FullReloaded);
_nextOffset = nextOffset;
applyCallFields(data);
@ -362,6 +370,7 @@ void GroupCall::processFullCall(const MTPphone_GroupCall &call) {
processFullCallUsersChats(call);
processFullCallFields(call);
finishParticipantsSliceRequest();
_participantsReloaded.fire({});
}
void GroupCall::applyCallFields(const MTPDgroupCall &data) {
@ -540,7 +549,7 @@ void GroupCall::applyParticipantsSlice(
eraseVideoSsrcs(*i);
_speakingByActiveFinishes.remove(participantPeer);
_participants.erase(i);
if (sliceSource != ApplySliceSource::SliceLoaded) {
if (sliceSource != ApplySliceSource::FullReloaded) {
_participantUpdates.fire(std::move(update));
}
}
@ -578,24 +587,18 @@ void GroupCall::applyParticipantsSlice(
&& (!was || was->onlyMinLoaded);
const auto raisedHandRating
= data.vraise_hand_rating().value_or_empty();
const auto hasVideoParamsInformation = true/*(sliceSource
!= ApplySliceSource::UpdateConstructed)*/;
const auto value = Participant{
.peer = participantPeer,
.videoParams = (hasVideoParamsInformation
? Calls::ParseVideoParams(
(data.vvideo()
? data.vvideo()->c_dataJSON().vdata().v
: QByteArray()),
(data.vpresentation()
? data.vpresentation()->c_dataJSON().vdata().v
: QByteArray()),
(i != end(_participants)
? i->videoParams
: nullptr))
: (i != end(_participants))
? i->videoParams
: nullptr),
.videoParams = Calls::ParseVideoParams(
(data.vvideo()
? data.vvideo()->c_dataJSON().vdata().v
: QByteArray()),
(data.vpresentation()
? data.vpresentation()->c_dataJSON().vdata().v
: QByteArray()),
(i != end(_participants)
? i->videoParams
: nullptr)),
.date = data.vdate().v,
.lastActive = lastActive,
.raisedHandRating = raisedHandRating,
@ -633,7 +636,7 @@ void GroupCall::applyParticipantsSlice(
if (data.is_just_joined()) {
++_serverParticipantsCount;
}
if (sliceSource != ApplySliceSource::SliceLoaded) {
if (sliceSource != ApplySliceSource::FullReloaded) {
_participantUpdates.fire({
.was = was,
.now = value,

View file

@ -111,7 +111,7 @@ public:
[[nodiscard]] const Participant *participantByEndpoint(
const std::string &endpoint) const;
[[nodiscard]] rpl::producer<> participantsSliceAdded();
[[nodiscard]] rpl::producer<> participantsReloaded();
[[nodiscard]] rpl::producer<ParticipantUpdate> participantUpdated() const;
void enqueueUpdate(const MTPUpdate &update);
@ -146,6 +146,7 @@ public:
private:
enum class ApplySliceSource {
FullReloaded,
SliceLoaded,
UnknownLoaded,
UpdateReceived,
@ -175,7 +176,7 @@ private:
void processFullCallFields(const MTPphone_GroupCall &call);
[[nodiscard]] bool requestParticipantsAfterReload(
const MTPphone_GroupCall &call) const;
void processSavedFullCall();
[[nodiscard]] bool processSavedFullCall();
void finishParticipantsSliceRequest();
void emplaceVideoSsrcs(const Participant &participant);
@ -218,7 +219,7 @@ private:
mtpRequestId _unknownParticipantPeersRequestId = 0;
rpl::event_stream<ParticipantUpdate> _participantUpdates;
rpl::event_stream<> _participantsSliceAdded;
rpl::event_stream<> _participantsReloaded;
bool _joinMuted = false;
bool _canChangeJoinMuted = true;

View file

@ -309,7 +309,7 @@ rpl::producer<Ui::GroupCallBarContent> GroupCallTracker::ContentByCall(
}
}, lifetime);
call->participantsSliceAdded(
call->participantsReloaded(
) | rpl::filter([=] {
return RegenerateUserpics(state, call, userpicSize);
}) | rpl::start_with_next(pushNext, lifetime);