mirror of
https://github.com/AyuGram/AyuGramDesktop.git
synced 2025-06-05 06:33:57 +02:00
Track peer together with video endpoint.
This commit is contained in:
parent
909a3cef9b
commit
8001efe6ab
3 changed files with 143 additions and 84 deletions
|
@ -563,7 +563,7 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
|
||||||
: endpoint;
|
: endpoint;
|
||||||
};
|
};
|
||||||
const auto guard = gsl::finally([&] {
|
const auto guard = gsl::finally([&] {
|
||||||
if (newLarge.empty()) {
|
if (!newLarge) {
|
||||||
newLarge = chooseLargeVideoEndpoint();
|
newLarge = chooseLargeVideoEndpoint();
|
||||||
}
|
}
|
||||||
if (_videoEndpointLarge.current() != newLarge) {
|
if (_videoEndpointLarge.current() != newLarge) {
|
||||||
|
@ -577,6 +577,7 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const auto peer = data.was ? data.was->peer : data.now->peer;
|
||||||
const auto &wasCameraEndpoint = (data.was && data.was->videoParams)
|
const auto &wasCameraEndpoint = (data.was && data.was->videoParams)
|
||||||
? regularEndpoint(data.was->videoParams->camera.endpoint)
|
? regularEndpoint(data.was->videoParams->camera.endpoint)
|
||||||
: EmptyString();
|
: EmptyString();
|
||||||
|
@ -595,8 +596,9 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
|
||||||
&& _activeVideoEndpoints.remove(wasCameraEndpoint)
|
&& _activeVideoEndpoints.remove(wasCameraEndpoint)
|
||||||
&& _incomingVideoEndpoints.contains(wasCameraEndpoint)) {
|
&& _incomingVideoEndpoints.contains(wasCameraEndpoint)) {
|
||||||
updateCameraNotStreams = wasCameraEndpoint;
|
updateCameraNotStreams = wasCameraEndpoint;
|
||||||
if (newLarge == wasCameraEndpoint) {
|
if (newLarge.endpoint == wasCameraEndpoint) {
|
||||||
_videoEndpointPinned = newLarge = std::string();
|
newLarge = VideoEndpoint();
|
||||||
|
_videoEndpointPinned = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -618,8 +620,9 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
|
||||||
&& _activeVideoEndpoints.remove(wasScreenEndpoint)
|
&& _activeVideoEndpoints.remove(wasScreenEndpoint)
|
||||||
&& _incomingVideoEndpoints.contains(wasScreenEndpoint)) {
|
&& _incomingVideoEndpoints.contains(wasScreenEndpoint)) {
|
||||||
updateScreenNotStreams = wasScreenEndpoint;
|
updateScreenNotStreams = wasScreenEndpoint;
|
||||||
if (newLarge == wasScreenEndpoint) {
|
if (newLarge.endpoint == wasScreenEndpoint) {
|
||||||
_videoEndpointPinned = newLarge = std::string();
|
newLarge = VideoEndpoint();
|
||||||
|
_videoEndpointPinned = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -629,62 +632,67 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
|
||||||
const auto wasSounding = data.was && data.was->sounding;
|
const auto wasSounding = data.was && data.was->sounding;
|
||||||
if (nowSpeaking == wasSpeaking && nowSounding == wasSounding) {
|
if (nowSpeaking == wasSpeaking && nowSounding == wasSounding) {
|
||||||
return;
|
return;
|
||||||
} else if (!_videoEndpointPinned.current().empty()) {
|
} else if (_videoEndpointPinned.current()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (nowScreenEndpoint != newLarge
|
if (nowScreenEndpoint != newLarge.endpoint
|
||||||
&& streamsVideo(nowScreenEndpoint)
|
&& streamsVideo(nowScreenEndpoint)
|
||||||
&& activeVideoEndpointType(newLarge) != EndpointType::Screen) {
|
&& (activeVideoEndpointType(newLarge.endpoint)
|
||||||
newLarge = nowScreenEndpoint;
|
!= EndpointType::Screen)) {
|
||||||
|
newLarge = { peer, nowScreenEndpoint };
|
||||||
}
|
}
|
||||||
const auto &participants = real->participants();
|
const auto &participants = real->participants();
|
||||||
if (!nowSpeaking
|
if (!nowSpeaking
|
||||||
&& (wasSpeaking || wasSounding)
|
&& (wasSpeaking || wasSounding)
|
||||||
&& (wasCameraEndpoint == newLarge)) {
|
&& (wasCameraEndpoint == newLarge.endpoint)) {
|
||||||
auto screenEndpoint = std::string();
|
auto screenEndpoint = VideoEndpoint();
|
||||||
auto speakingEndpoint = std::string();
|
auto speakingEndpoint = VideoEndpoint();
|
||||||
auto soundingEndpoint = std::string();
|
auto soundingEndpoint = VideoEndpoint();
|
||||||
for (const auto &participant : participants) {
|
for (const auto &participant : participants) {
|
||||||
const auto params = participant.videoParams.get();
|
const auto params = participant.videoParams.get();
|
||||||
if (!params) {
|
if (!params) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
const auto peer = participant.peer;
|
||||||
if (streamsVideo(params->screen.endpoint)) {
|
if (streamsVideo(params->screen.endpoint)) {
|
||||||
screenEndpoint = params->screen.endpoint;
|
screenEndpoint = { peer, params->screen.endpoint };
|
||||||
break;
|
break;
|
||||||
} else if (participant.speaking
|
} else if (participant.speaking
|
||||||
&& speakingEndpoint.empty()) {
|
&& !speakingEndpoint) {
|
||||||
if (streamsVideo(params->camera.endpoint)) {
|
if (streamsVideo(params->camera.endpoint)) {
|
||||||
speakingEndpoint = params->camera.endpoint;
|
speakingEndpoint = { peer, params->camera.endpoint };
|
||||||
}
|
}
|
||||||
} else if (!nowSounding
|
} else if (!nowSounding
|
||||||
&& participant.sounding
|
&& participant.sounding
|
||||||
&& soundingEndpoint.empty()) {
|
&& !soundingEndpoint) {
|
||||||
if (streamsVideo(params->camera.endpoint)) {
|
if (streamsVideo(params->camera.endpoint)) {
|
||||||
soundingEndpoint = params->camera.endpoint;
|
soundingEndpoint = { peer, params->camera.endpoint };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!screenEndpoint.empty()) {
|
if (screenEndpoint) {
|
||||||
newLarge = screenEndpoint;
|
newLarge = screenEndpoint;
|
||||||
} else if (!speakingEndpoint.empty()) {
|
} else if (speakingEndpoint) {
|
||||||
newLarge = speakingEndpoint;
|
newLarge = speakingEndpoint;
|
||||||
} else if (!soundingEndpoint.empty()) {
|
} else if (soundingEndpoint) {
|
||||||
newLarge = soundingEndpoint;
|
newLarge = soundingEndpoint;
|
||||||
}
|
}
|
||||||
} else if ((nowSpeaking || nowSounding)
|
} else if ((nowSpeaking || nowSounding)
|
||||||
&& (nowCameraEndpoint != newLarge)
|
&& (nowCameraEndpoint != newLarge.endpoint)
|
||||||
&& (activeVideoEndpointType(newLarge) != EndpointType::Screen)
|
&& (activeVideoEndpointType(newLarge.endpoint)
|
||||||
|
!= EndpointType::Screen)
|
||||||
&& streamsVideo(nowCameraEndpoint)) {
|
&& streamsVideo(nowCameraEndpoint)) {
|
||||||
const auto participant = real->participantByEndpoint(newLarge);
|
const auto participant = real->participantByEndpoint(
|
||||||
|
newLarge.endpoint);
|
||||||
const auto screen = participant
|
const auto screen = participant
|
||||||
&& (participant->videoParams->screen.endpoint == newLarge);
|
&& (participant->videoParams->screen.endpoint
|
||||||
|
== newLarge.endpoint);
|
||||||
const auto speaking = participant && participant->speaking;
|
const auto speaking = participant && participant->speaking;
|
||||||
const auto sounding = participant && participant->sounding;
|
const auto sounding = participant && participant->sounding;
|
||||||
if (!screen
|
if (!screen
|
||||||
&& ((nowSpeaking && !speaking)
|
&& ((nowSpeaking && !speaking)
|
||||||
|| (nowSounding && !sounding))) {
|
|| (nowSounding && !sounding))) {
|
||||||
newLarge = nowCameraEndpoint;
|
newLarge = { peer, nowCameraEndpoint };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
@ -878,8 +886,8 @@ void GroupCall::setMyEndpointType(
|
||||||
const auto was2 = _activeVideoEndpoints.remove(endpoint);
|
const auto was2 = _activeVideoEndpoints.remove(endpoint);
|
||||||
if (was1 && was2) {
|
if (was1 && was2) {
|
||||||
auto newLarge = _videoEndpointLarge.current();
|
auto newLarge = _videoEndpointLarge.current();
|
||||||
if (newLarge == endpoint) {
|
if (newLarge.endpoint == endpoint) {
|
||||||
_videoEndpointPinned = std::string();
|
_videoEndpointPinned = false;
|
||||||
_videoEndpointLarge = chooseLargeVideoEndpoint();
|
_videoEndpointLarge = chooseLargeVideoEndpoint();
|
||||||
}
|
}
|
||||||
_streamsVideoUpdated.fire({ endpoint, false });
|
_streamsVideoUpdated.fire({ endpoint, false });
|
||||||
|
@ -893,13 +901,13 @@ void GroupCall::setMyEndpointType(
|
||||||
_streamsVideoUpdated.fire({ endpoint, true });
|
_streamsVideoUpdated.fire({ endpoint, true });
|
||||||
}
|
}
|
||||||
const auto nowLarge = activeVideoEndpointType(
|
const auto nowLarge = activeVideoEndpointType(
|
||||||
_videoEndpointLarge.current());
|
_videoEndpointLarge.current().endpoint);
|
||||||
if (_videoEndpointPinned.current().empty()
|
if (!_videoEndpointPinned.current()
|
||||||
&& ((type == EndpointType::Screen
|
&& ((type == EndpointType::Screen
|
||||||
&& nowLarge != EndpointType::Screen)
|
&& nowLarge != EndpointType::Screen)
|
||||||
|| (type == EndpointType::Camera
|
|| (type == EndpointType::Camera
|
||||||
&& nowLarge == EndpointType::None))) {
|
&& nowLarge == EndpointType::None))) {
|
||||||
_videoEndpointLarge = endpoint;
|
_videoEndpointLarge = VideoEndpoint{ _joinAs, endpoint };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1837,8 +1845,8 @@ void GroupCall::ensureControllerCreated() {
|
||||||
std::move(descriptor));
|
std::move(descriptor));
|
||||||
|
|
||||||
_videoEndpointLarge.changes(
|
_videoEndpointLarge.changes(
|
||||||
) | rpl::start_with_next([=](const std::string &endpoint) {
|
) | rpl::start_with_next([=](const VideoEndpoint &endpoint) {
|
||||||
_instance->setFullSizeVideoEndpointId(endpoint);
|
_instance->setFullSizeVideoEndpointId(endpoint.endpoint);
|
||||||
_videoLargeTrack = nullptr;
|
_videoLargeTrack = nullptr;
|
||||||
_videoLargeTrackWrap = nullptr;
|
_videoLargeTrackWrap = nullptr;
|
||||||
if (endpoint.empty()) {
|
if (endpoint.empty()) {
|
||||||
|
@ -1850,7 +1858,7 @@ void GroupCall::ensureControllerCreated() {
|
||||||
}
|
}
|
||||||
_videoLargeTrackWrap->sink = Webrtc::CreateProxySink(
|
_videoLargeTrackWrap->sink = Webrtc::CreateProxySink(
|
||||||
_videoLargeTrackWrap->track.sink());
|
_videoLargeTrackWrap->track.sink());
|
||||||
addVideoOutput(endpoint, { _videoLargeTrackWrap->sink });
|
addVideoOutput(endpoint.endpoint, { _videoLargeTrackWrap->sink });
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
|
||||||
updateInstanceMuteState();
|
updateInstanceMuteState();
|
||||||
|
@ -2063,7 +2071,7 @@ void GroupCall::setIncomingVideoEndpoints(
|
||||||
const auto feedOne = [&](const std::string &endpoint) {
|
const auto feedOne = [&](const std::string &endpoint) {
|
||||||
if (endpoint.empty()) {
|
if (endpoint.empty()) {
|
||||||
return;
|
return;
|
||||||
} else if (endpoint == newLarge) {
|
} else if (endpoint == newLarge.endpoint) {
|
||||||
newLargeFound = true;
|
newLargeFound = true;
|
||||||
}
|
}
|
||||||
if (!removed.remove(endpoint)) {
|
if (!removed.remove(endpoint)) {
|
||||||
|
@ -2080,8 +2088,9 @@ void GroupCall::setIncomingVideoEndpoints(
|
||||||
}
|
}
|
||||||
feedOne(cameraSharingEndpoint());
|
feedOne(cameraSharingEndpoint());
|
||||||
feedOne(screenSharingEndpoint());
|
feedOne(screenSharingEndpoint());
|
||||||
if (!newLarge.empty() && !newLargeFound) {
|
if (newLarge && !newLargeFound) {
|
||||||
_videoEndpointPinned = newLarge = std::string();
|
_videoEndpointPinned = false;
|
||||||
|
newLarge = VideoEndpoint();
|
||||||
}
|
}
|
||||||
if (newLarge.empty()) {
|
if (newLarge.empty()) {
|
||||||
_videoEndpointLarge = chooseLargeVideoEndpoint();
|
_videoEndpointLarge = chooseLargeVideoEndpoint();
|
||||||
|
@ -2106,7 +2115,7 @@ void GroupCall::fillActiveVideoEndpoints() {
|
||||||
EndpointType type) {
|
EndpointType type) {
|
||||||
if (endpoint.empty()) {
|
if (endpoint.empty()) {
|
||||||
return;
|
return;
|
||||||
} else if (endpoint == newLarge) {
|
} else if (endpoint == newLarge.endpoint) {
|
||||||
newLargeFound = true;
|
newLargeFound = true;
|
||||||
}
|
}
|
||||||
if (!removed.remove(endpoint)) {
|
if (!removed.remove(endpoint)) {
|
||||||
|
@ -2129,9 +2138,10 @@ void GroupCall::fillActiveVideoEndpoints() {
|
||||||
feedOne(cameraSharingEndpoint(), EndpointType::Camera);
|
feedOne(cameraSharingEndpoint(), EndpointType::Camera);
|
||||||
feedOne(screenSharingEndpoint(), EndpointType::Screen);
|
feedOne(screenSharingEndpoint(), EndpointType::Screen);
|
||||||
if (!newLarge.empty() && !newLargeFound) {
|
if (!newLarge.empty() && !newLargeFound) {
|
||||||
_videoEndpointPinned = newLarge = std::string();
|
_videoEndpointPinned = false;
|
||||||
|
newLarge = VideoEndpoint();
|
||||||
}
|
}
|
||||||
if (newLarge.empty()) {
|
if (!newLarge) {
|
||||||
_videoEndpointLarge = chooseLargeVideoEndpoint();
|
_videoEndpointLarge = chooseLargeVideoEndpoint();
|
||||||
}
|
}
|
||||||
for (const auto &[endpoint, type] : removed) {
|
for (const auto &[endpoint, type] : removed) {
|
||||||
|
@ -2152,15 +2162,15 @@ GroupCall::EndpointType GroupCall::activeVideoEndpointType(
|
||||||
: EndpointType::None;
|
: EndpointType::None;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string GroupCall::chooseLargeVideoEndpoint() const {
|
VideoEndpoint GroupCall::chooseLargeVideoEndpoint() const {
|
||||||
const auto real = lookupReal();
|
const auto real = lookupReal();
|
||||||
if (!real) {
|
if (!real) {
|
||||||
return std::string();
|
return VideoEndpoint();
|
||||||
}
|
}
|
||||||
auto anyEndpoint = std::string();
|
auto anyEndpoint = VideoEndpoint();
|
||||||
auto screenEndpoint = std::string();
|
auto screenEndpoint = VideoEndpoint();
|
||||||
auto speakingEndpoint = std::string();
|
auto speakingEndpoint = VideoEndpoint();
|
||||||
auto soundingEndpoint = std::string();
|
auto soundingEndpoint = VideoEndpoint();
|
||||||
const auto &myCameraEndpoint = cameraSharingEndpoint();
|
const auto &myCameraEndpoint = cameraSharingEndpoint();
|
||||||
const auto &myScreenEndpoint = screenSharingEndpoint();
|
const auto &myScreenEndpoint = screenSharingEndpoint();
|
||||||
const auto &participants = real->participants();
|
const auto &participants = real->participants();
|
||||||
|
@ -2171,35 +2181,36 @@ std::string GroupCall::chooseLargeVideoEndpoint() const {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (const auto participant = real->participantByEndpoint(endpoint)) {
|
if (const auto participant = real->participantByEndpoint(endpoint)) {
|
||||||
|
const auto peer = participant->peer;
|
||||||
if (screenEndpoint.empty()
|
if (screenEndpoint.empty()
|
||||||
&& participant->videoParams->screen.endpoint == endpoint) {
|
&& participant->videoParams->screen.endpoint == endpoint) {
|
||||||
screenEndpoint = endpoint;
|
screenEndpoint = { peer, endpoint };
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (speakingEndpoint.empty() && participant->speaking) {
|
if (speakingEndpoint.empty() && participant->speaking) {
|
||||||
speakingEndpoint = endpoint;
|
speakingEndpoint = { peer, endpoint };
|
||||||
}
|
}
|
||||||
if (soundingEndpoint.empty() && participant->sounding) {
|
if (soundingEndpoint.empty() && participant->sounding) {
|
||||||
soundingEndpoint = endpoint;
|
soundingEndpoint = { peer, endpoint };
|
||||||
}
|
}
|
||||||
if (anyEndpoint.empty()) {
|
if (anyEndpoint.empty()) {
|
||||||
anyEndpoint = endpoint;
|
anyEndpoint = { peer, endpoint };
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return !screenEndpoint.empty()
|
return screenEndpoint
|
||||||
? screenEndpoint
|
? screenEndpoint
|
||||||
: streamsVideo(myScreenEndpoint)
|
: streamsVideo(myScreenEndpoint)
|
||||||
? myScreenEndpoint
|
? VideoEndpoint{ _joinAs, myScreenEndpoint }
|
||||||
: !speakingEndpoint.empty()
|
: speakingEndpoint
|
||||||
? speakingEndpoint
|
? speakingEndpoint
|
||||||
: !soundingEndpoint.empty()
|
: soundingEndpoint
|
||||||
? soundingEndpoint
|
? soundingEndpoint
|
||||||
: !anyEndpoint.empty()
|
: anyEndpoint
|
||||||
? anyEndpoint
|
? anyEndpoint
|
||||||
: streamsVideo(myCameraEndpoint)
|
: streamsVideo(myCameraEndpoint)
|
||||||
? myCameraEndpoint
|
? VideoEndpoint{ _joinAs, myCameraEndpoint }
|
||||||
: std::string();
|
: VideoEndpoint();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::updateInstanceMuteState() {
|
void GroupCall::updateInstanceMuteState() {
|
||||||
|
@ -2491,13 +2502,13 @@ void GroupCall::sendSelfUpdate(SendUpdateType type) {
|
||||||
}).send();
|
}).send();
|
||||||
}
|
}
|
||||||
|
|
||||||
void GroupCall::pinVideoEndpoint(const std::string &endpoint) {
|
void GroupCall::pinVideoEndpoint(const VideoEndpoint &endpoint) {
|
||||||
if (endpoint.empty()) {
|
if (!endpoint) {
|
||||||
_videoEndpointPinned = endpoint;
|
_videoEndpointPinned = false;
|
||||||
} else if (streamsVideo(endpoint)) {
|
} else if (streamsVideo(endpoint.endpoint)) {
|
||||||
_videoEndpointPinned = std::string();
|
_videoEndpointPinned = false;
|
||||||
_videoEndpointLarge = endpoint;
|
_videoEndpointLarge = endpoint;
|
||||||
_videoEndpointPinned = endpoint;
|
_videoEndpointPinned = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -74,6 +74,55 @@ struct LevelUpdate {
|
||||||
bool me = false;
|
bool me = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct VideoEndpoint {
|
||||||
|
PeerData *peer = nullptr;
|
||||||
|
std::string endpoint;
|
||||||
|
|
||||||
|
[[nodiscard]] bool empty() const noexcept {
|
||||||
|
return !peer;
|
||||||
|
}
|
||||||
|
[[nodiscard]] explicit operator bool() const noexcept {
|
||||||
|
return !empty();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
inline bool operator==(
|
||||||
|
const VideoEndpoint &a,
|
||||||
|
const VideoEndpoint &b) noexcept {
|
||||||
|
return (a.peer == b.peer) && (a.endpoint == b.endpoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool operator!=(
|
||||||
|
const VideoEndpoint &a,
|
||||||
|
const VideoEndpoint &b) noexcept {
|
||||||
|
return !(a == b);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool operator<(
|
||||||
|
const VideoEndpoint &a,
|
||||||
|
const VideoEndpoint &b) noexcept {
|
||||||
|
return (a.peer < b.peer)
|
||||||
|
|| (a.peer == b.peer && a.endpoint < b.endpoint);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool operator>(
|
||||||
|
const VideoEndpoint &a,
|
||||||
|
const VideoEndpoint &b) noexcept {
|
||||||
|
return (b < a);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool operator<=(
|
||||||
|
const VideoEndpoint &a,
|
||||||
|
const VideoEndpoint &b) noexcept {
|
||||||
|
return !(b < a);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline bool operator>=(
|
||||||
|
const VideoEndpoint &a,
|
||||||
|
const VideoEndpoint &b) noexcept {
|
||||||
|
return !(a < b);
|
||||||
|
}
|
||||||
|
|
||||||
struct StreamsVideoUpdate {
|
struct StreamsVideoUpdate {
|
||||||
std::string endpoint;
|
std::string endpoint;
|
||||||
bool streams = false;
|
bool streams = false;
|
||||||
|
@ -234,18 +283,18 @@ public:
|
||||||
&& _incomingVideoEndpoints.contains(endpoint)
|
&& _incomingVideoEndpoints.contains(endpoint)
|
||||||
&& activeVideoEndpointType(endpoint) != EndpointType::None;
|
&& activeVideoEndpointType(endpoint) != EndpointType::None;
|
||||||
}
|
}
|
||||||
[[nodiscard]] const std::string &videoEndpointPinned() const {
|
[[nodiscard]] bool videoEndpointPinned() const {
|
||||||
return _videoEndpointPinned.current();
|
return _videoEndpointPinned.current();
|
||||||
}
|
}
|
||||||
[[nodiscard]] rpl::producer<std::string> videoEndpointPinnedValue() const {
|
[[nodiscard]] rpl::producer<bool> videoEndpointPinnedValue() const {
|
||||||
return _videoEndpointPinned.value();
|
return _videoEndpointPinned.value();
|
||||||
}
|
}
|
||||||
void pinVideoEndpoint(const std::string &endpoint);
|
void pinVideoEndpoint(const VideoEndpoint &endpoint);
|
||||||
[[nodiscard]] const std::string &videoEndpointLarge() const {
|
[[nodiscard]] const VideoEndpoint &videoEndpointLarge() const {
|
||||||
return _videoEndpointLarge.current();
|
return _videoEndpointLarge.current();
|
||||||
}
|
}
|
||||||
[[nodiscard]] auto videoEndpointLargeValue() const
|
[[nodiscard]] auto videoEndpointLargeValue() const
|
||||||
-> rpl::producer<std::string> {
|
-> rpl::producer<VideoEndpoint> {
|
||||||
return _videoEndpointLarge.value();
|
return _videoEndpointLarge.value();
|
||||||
}
|
}
|
||||||
[[nodiscard]] Webrtc::VideoTrack *videoLargeTrack() const {
|
[[nodiscard]] Webrtc::VideoTrack *videoLargeTrack() const {
|
||||||
|
@ -386,7 +435,7 @@ private:
|
||||||
void setIncomingVideoEndpoints(
|
void setIncomingVideoEndpoints(
|
||||||
const std::vector<std::string> &endpoints);
|
const std::vector<std::string> &endpoints);
|
||||||
void fillActiveVideoEndpoints();
|
void fillActiveVideoEndpoints();
|
||||||
[[nodiscard]] std::string chooseLargeVideoEndpoint() const;
|
[[nodiscard]] VideoEndpoint chooseLargeVideoEndpoint() const;
|
||||||
[[nodiscard]] EndpointType activeVideoEndpointType(
|
[[nodiscard]] EndpointType activeVideoEndpointType(
|
||||||
const std::string &endpoint) const;
|
const std::string &endpoint) const;
|
||||||
|
|
||||||
|
@ -473,8 +522,8 @@ private:
|
||||||
rpl::event_stream<StreamsVideoUpdate> _streamsVideoUpdated;
|
rpl::event_stream<StreamsVideoUpdate> _streamsVideoUpdated;
|
||||||
base::flat_set<std::string> _incomingVideoEndpoints;
|
base::flat_set<std::string> _incomingVideoEndpoints;
|
||||||
base::flat_map<std::string, EndpointType> _activeVideoEndpoints;
|
base::flat_map<std::string, EndpointType> _activeVideoEndpoints;
|
||||||
rpl::variable<std::string> _videoEndpointLarge;
|
rpl::variable<VideoEndpoint> _videoEndpointLarge;
|
||||||
rpl::variable<std::string> _videoEndpointPinned;
|
rpl::variable<bool> _videoEndpointPinned;
|
||||||
std::unique_ptr<LargeTrack> _videoLargeTrackWrap;
|
std::unique_ptr<LargeTrack> _videoLargeTrackWrap;
|
||||||
rpl::variable<Webrtc::VideoTrack*> _videoLargeTrack;
|
rpl::variable<Webrtc::VideoTrack*> _videoLargeTrack;
|
||||||
base::flat_map<uint32, Data::LastSpokeTimes> _lastSpoke;
|
base::flat_map<uint32, Data::LastSpokeTimes> _lastSpoke;
|
||||||
|
|
|
@ -1228,9 +1228,9 @@ void MembersController::setupListChangeViewers() {
|
||||||
}, _lifetime);
|
}, _lifetime);
|
||||||
|
|
||||||
_call->videoEndpointLargeValue(
|
_call->videoEndpointLargeValue(
|
||||||
) | rpl::filter([=](const std::string &largeEndpoint) {
|
) | rpl::filter([=](const VideoEndpoint &largeEndpoint) {
|
||||||
return (_largeEndpoint != largeEndpoint);
|
return (_largeEndpoint != largeEndpoint.endpoint);
|
||||||
}) | rpl::start_with_next([=](const std::string &largeEndpoint) {
|
}) | rpl::start_with_next([=](const VideoEndpoint &largeEndpoint) {
|
||||||
if (_call->streamsVideo(_largeEndpoint)) {
|
if (_call->streamsVideo(_largeEndpoint)) {
|
||||||
if (const auto participant = findParticipant(_largeEndpoint)) {
|
if (const auto participant = findParticipant(_largeEndpoint)) {
|
||||||
if (const auto row = findRow(participant->peer)) {
|
if (const auto row = findRow(participant->peer)) {
|
||||||
|
@ -1243,7 +1243,7 @@ void MembersController::setupListChangeViewers() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_largeEndpoint = largeEndpoint;
|
_largeEndpoint = largeEndpoint.endpoint;
|
||||||
if (const auto participant = findParticipant(_largeEndpoint)) {
|
if (const auto participant = findParticipant(_largeEndpoint)) {
|
||||||
if (const auto row = findRow(participant->peer)) {
|
if (const auto row = findRow(participant->peer)) {
|
||||||
if (row->videoTrackEndpoint() == _largeEndpoint) {
|
if (row->videoTrackEndpoint() == _largeEndpoint) {
|
||||||
|
@ -2013,12 +2013,14 @@ base::unique_qptr<Ui::PopupMenu> MembersController::createRowContextMenu(
|
||||||
});
|
});
|
||||||
|
|
||||||
if (const auto real = _call->lookupReal()) {
|
if (const auto real = _call->lookupReal()) {
|
||||||
const auto pinnedEndpoint = _call->videoEndpointPinned();
|
const auto pinnedEndpoint = _call->videoEndpointPinned()
|
||||||
|
? _call->videoEndpointLarge().endpoint
|
||||||
|
: std::string();
|
||||||
const auto participant = real->participantByEndpoint(pinnedEndpoint);
|
const auto participant = real->participantByEndpoint(pinnedEndpoint);
|
||||||
if (participant && participant->peer == participantPeer) {
|
if (participant && participant->peer == participantPeer) {
|
||||||
result->addAction(
|
result->addAction(
|
||||||
tr::lng_group_call_context_unpin_camera(tr::now),
|
tr::lng_group_call_context_unpin_camera(tr::now),
|
||||||
[=] { _call->pinVideoEndpoint(std::string()); });
|
[=] { _call->pinVideoEndpoint(VideoEndpoint()); });
|
||||||
} else {
|
} else {
|
||||||
const auto &participants = real->participants();
|
const auto &participants = real->participants();
|
||||||
const auto i = ranges::find(
|
const auto i = ranges::find(
|
||||||
|
@ -2031,9 +2033,9 @@ base::unique_qptr<Ui::PopupMenu> MembersController::createRowContextMenu(
|
||||||
const auto streamsScreen = _call->streamsVideo(screen);
|
const auto streamsScreen = _call->streamsVideo(screen);
|
||||||
if (streamsScreen || _call->streamsVideo(camera)) {
|
if (streamsScreen || _call->streamsVideo(camera)) {
|
||||||
const auto callback = [=] {
|
const auto callback = [=] {
|
||||||
_call->pinVideoEndpoint(streamsScreen
|
_call->pinVideoEndpoint(VideoEndpoint{
|
||||||
? screen
|
participantPeer,
|
||||||
: camera);
|
streamsScreen ? screen : camera });
|
||||||
};
|
};
|
||||||
result->addAction(
|
result->addAction(
|
||||||
tr::lng_group_call_context_pin_camera(tr::now),
|
tr::lng_group_call_context_pin_camera(tr::now),
|
||||||
|
@ -2438,10 +2440,7 @@ void Members::setupPinnedVideo() {
|
||||||
_mode.changes() | rpl::filter(
|
_mode.changes() | rpl::filter(
|
||||||
_1 == PanelMode::Default
|
_1 == PanelMode::Default
|
||||||
) | rpl::to_empty,
|
) | rpl::to_empty,
|
||||||
_call->videoEndpointLargeValue(
|
_call->videoEndpointPinnedValue() | rpl::filter(_1) | rpl::to_empty
|
||||||
) | rpl::filter([=](const std::string &endpoint) {
|
|
||||||
return endpoint == _call->videoEndpointPinned();
|
|
||||||
}) | rpl::to_empty
|
|
||||||
) | rpl::start_with_next([=] {
|
) | rpl::start_with_next([=] {
|
||||||
_scroll->scrollToY(0);
|
_scroll->scrollToY(0);
|
||||||
}, _scroll->lifetime());
|
}, _scroll->lifetime());
|
||||||
|
|
Loading…
Add table
Reference in a new issue