First version of tiled layout.

This commit is contained in:
John Preston 2021-05-12 19:02:45 +04:00
parent bd83ed8130
commit 7f739065e8
12 changed files with 762 additions and 706 deletions

View file

@ -709,7 +709,7 @@ groupCallTitleLabel: FlatLabel(groupCallSubtitleLabel) {
}
}
groupCallAddButtonPosition: point(10px, 7px);
groupCallMembersWidthMax: 360px;
groupCallMembersWidthMax: 480px;
groupCallRecordingMark: 6px;
groupCallRecordingMarkSkip: 4px;
groupCallRecordingMarkTop: 8px;
@ -1220,3 +1220,6 @@ groupCallLargeVideoPin: CrossLineAnimation {
}
groupCallVideoEnlarge: icon {{ "calls/voice_enlarge", mediaviewPipControlsFgOver }};
groupCallVideoMinimize: icon {{ "calls/voice_minimize", groupCallVideoSubTextFg }};
groupCallVideoSmallSkip: 4px;
groupCallVideoLargeSkip: 6px;

View file

@ -110,6 +110,12 @@ constexpr auto kFixLargeVideoDuration = 5 * crl::time(1000);
} // namespace
//GroupCall::VideoTrack::VideoTrack() = default;
//GroupCall::VideoTrack::VideoTrack(VideoTrack &&other) = default;
//GroupCall::VideoTrack &GroupCall::VideoTrack::operator=(
// VideoTrack &&other) = default;
//GroupCall::VideoTrack::~VideoTrack() = default;
//
class GroupCall::LoadPartTask final : public tgcalls::BroadcastPartTask {
public:
LoadPartTask(
@ -166,7 +172,7 @@ private:
};
struct GroupCall::SinkPointer {
std::shared_ptr<Webrtc::SinkInterface> data;
std::weak_ptr<Webrtc::SinkInterface> data;
};
[[nodiscard]] bool IsGroupCallAdmin(
@ -538,10 +544,7 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
using Update = Data::GroupCall::ParticipantUpdate;
real->participantUpdated(
) | rpl::start_with_next([=](const Update &data) {
auto changed = false;
auto newLarge = _videoEndpointLarge.current();
auto updateCameraNotStreams = std::string();
auto updateScreenNotStreams = std::string();
const auto &pinned = _videoEndpointPinned.current();
const auto regularEndpoint = [&](const std::string &endpoint)
-> const std::string & {
return (endpoint.empty()
@ -550,143 +553,30 @@ void GroupCall::subscribeToReal(not_null<Data::GroupCall*> real) {
? EmptyString()
: endpoint;
};
const auto guard = gsl::finally([&] {
if (!newLarge) {
newLarge = chooseLargeVideoEndpoint();
}
if (_videoEndpointLarge.current() != newLarge) {
setVideoEndpointLarge(newLarge);
} else if (changed) {
updateRequestedVideoChannelsDelayed();
}
if (!updateCameraNotStreams.empty()) {
_streamsVideoUpdated.fire({ updateCameraNotStreams, false });
}
if (!updateScreenNotStreams.empty()) {
_streamsVideoUpdated.fire({ updateScreenNotStreams, false });
}
});
const auto peer = data.was ? data.was->peer : data.now->peer;
const auto &wasCameraEndpoint = (data.was && data.was->videoParams)
? regularEndpoint(data.was->videoParams->camera.endpoint)
if (peer == _joinAs) {
return;
}
const auto &wasCameraEndpoint = data.was
? regularEndpoint(data.was->cameraEndpoint())
: EmptyString();
const auto &nowCameraEndpoint = (data.now && data.now->videoParams)
? regularEndpoint(data.now->videoParams->camera.endpoint)
const auto &nowCameraEndpoint = data.now
? regularEndpoint(data.now->cameraEndpoint())
: EmptyString();
if (wasCameraEndpoint != nowCameraEndpoint) {
if (!nowCameraEndpoint.empty()
&& _activeVideoEndpoints.emplace(
nowCameraEndpoint,
EndpointType::Camera).second) {
changed = true;
_streamsVideoUpdated.fire({ nowCameraEndpoint, true });
}
if (!wasCameraEndpoint.empty()
&& _activeVideoEndpoints.remove(wasCameraEndpoint)) {
changed = true;
updateCameraNotStreams = wasCameraEndpoint;
if (newLarge.endpoint == wasCameraEndpoint) {
newLarge = VideoEndpoint();
_videoEndpointPinned = false;
}
}
markEndpointActive({ peer, nowCameraEndpoint }, true);
markEndpointActive({ peer, wasCameraEndpoint }, false);
}
const auto &wasScreenEndpoint = (data.was && data.was->videoParams)
? data.was->videoParams->screen.endpoint
const auto &wasScreenEndpoint = data.was
? regularEndpoint(data.was->screenEndpoint())
: EmptyString();
const auto &nowScreenEndpoint = (data.now && data.now->videoParams)
? data.now->videoParams->screen.endpoint
const auto &nowScreenEndpoint = data.now
? regularEndpoint(data.now->screenEndpoint())
: EmptyString();
if (wasScreenEndpoint != nowScreenEndpoint) {
if (!nowScreenEndpoint.empty()
&& _activeVideoEndpoints.emplace(
nowScreenEndpoint,
EndpointType::Screen).second) {
changed = true;
_streamsVideoUpdated.fire({ nowScreenEndpoint, true });
}
if (!wasScreenEndpoint.empty()
&& _activeVideoEndpoints.remove(wasScreenEndpoint)) {
changed = true;
updateScreenNotStreams = wasScreenEndpoint;
if (newLarge.endpoint == wasScreenEndpoint) {
newLarge = VideoEndpoint();
_videoEndpointPinned = false;
}
}
}
const auto nowSpeaking = data.now && data.now->speaking;
const auto nowSounding = data.now && data.now->sounding;
const auto wasSpeaking = data.was && data.was->speaking;
const auto wasSounding = data.was && data.was->sounding;
if (nowSpeaking == wasSpeaking && nowSounding == wasSounding) {
return;
} else if (_videoEndpointPinned.current()
|| (_videoLargeShowTime
&& _videoLargeShowTime + kFixLargeVideoDuration
> crl::now())) {
return;
}
if (nowScreenEndpoint != newLarge.endpoint
&& streamsVideo(nowScreenEndpoint)
&& (activeVideoEndpointType(newLarge.endpoint)
!= EndpointType::Screen)) {
newLarge = { peer, nowScreenEndpoint };
}
const auto &participants = real->participants();
if (!nowSpeaking
&& (wasSpeaking || wasSounding)
&& (wasCameraEndpoint == newLarge.endpoint)) {
auto screenEndpoint = VideoEndpoint();
auto speakingEndpoint = VideoEndpoint();
auto soundingEndpoint = VideoEndpoint();
for (const auto &participant : participants) {
const auto params = participant.videoParams.get();
if (!params) {
continue;
}
const auto peer = participant.peer;
if (streamsVideo(params->screen.endpoint)) {
screenEndpoint = { peer, params->screen.endpoint };
break;
} else if (participant.speaking
&& !speakingEndpoint) {
if (streamsVideo(params->camera.endpoint)) {
speakingEndpoint = { peer, params->camera.endpoint };
}
} else if (!nowSounding
&& participant.sounding
&& !soundingEndpoint) {
if (streamsVideo(params->camera.endpoint)) {
soundingEndpoint = { peer, params->camera.endpoint };
}
}
}
if (screenEndpoint) {
newLarge = screenEndpoint;
} else if (speakingEndpoint) {
newLarge = speakingEndpoint;
} else if (soundingEndpoint) {
newLarge = soundingEndpoint;
}
} else if ((nowSpeaking || nowSounding)
&& (nowCameraEndpoint != newLarge.endpoint)
&& (activeVideoEndpointType(newLarge.endpoint)
!= EndpointType::Screen)
&& streamsVideo(nowCameraEndpoint)) {
const auto participant = real->participantByEndpoint(
newLarge.endpoint);
const auto screen = participant
&& (participant->videoParams->screen.endpoint
== newLarge.endpoint);
const auto speaking = participant && participant->speaking;
const auto sounding = participant && participant->sounding;
if (!screen
&& ((nowSpeaking && !speaking)
|| (nowSounding && !sounding))) {
newLarge = { peer, nowCameraEndpoint };
}
markEndpointActive({ peer, nowScreenEndpoint }, true);
markEndpointActive({ peer, wasScreenEndpoint }, false);
}
}, _lifetime);
@ -869,53 +759,19 @@ void GroupCall::join(const MTPInputGroupCall &inputCall) {
});
}
void GroupCall::setMyEndpointType(
const std::string &endpoint,
EndpointType type) {
if (endpoint.empty()) {
return;
} else if (type == EndpointType::None) {
const auto was = _activeVideoEndpoints.remove(endpoint);
if (was) {
auto newLarge = _videoEndpointLarge.current();
if (newLarge.endpoint == endpoint) {
_videoEndpointPinned = false;
setVideoEndpointLarge(chooseLargeVideoEndpoint());
}
_streamsVideoUpdated.fire({ endpoint, false });
}
} else {
const auto now = _activeVideoEndpoints.emplace(
endpoint,
type).second;
if (now) {
_streamsVideoUpdated.fire({ endpoint, true });
}
const auto nowLarge = activeVideoEndpointType(
_videoEndpointLarge.current().endpoint);
if (!_videoEndpointPinned.current()
&& ((type == EndpointType::Screen
&& nowLarge != EndpointType::Screen)
|| (type == EndpointType::Camera
&& nowLarge == EndpointType::None))) {
setVideoEndpointLarge(VideoEndpoint{ _joinAs, endpoint });
}
}
}
void GroupCall::setScreenEndpoint(std::string endpoint) {
if (_screenEndpoint == endpoint) {
return;
}
if (!_screenEndpoint.empty()) {
setMyEndpointType(_screenEndpoint, EndpointType::None);
markEndpointActive({ _joinAs, _screenEndpoint }, false);
}
_screenEndpoint = std::move(endpoint);
if (_screenEndpoint.empty()) {
return;
}
if (isSharingScreen()) {
setMyEndpointType(_screenEndpoint, EndpointType::Screen);
markEndpointActive({ _joinAs, _screenEndpoint }, true);
}
}
@ -924,14 +780,14 @@ void GroupCall::setCameraEndpoint(std::string endpoint) {
return;
}
if (!_cameraEndpoint.empty()) {
setMyEndpointType(_cameraEndpoint, EndpointType::None);
markEndpointActive({ _joinAs, _cameraEndpoint }, false);
}
_cameraEndpoint = std::move(endpoint);
if (_cameraEndpoint.empty()) {
return;
}
if (isSharingCamera()) {
setMyEndpointType(_cameraEndpoint, EndpointType::Camera);
markEndpointActive({ _joinAs, _cameraEndpoint }, true);
}
}
@ -939,12 +795,42 @@ void GroupCall::addVideoOutput(
const std::string &endpoint,
SinkPointer sink) {
if (_cameraEndpoint == endpoint) {
_cameraCapture->setOutput(sink.data);
if (auto strong = sink.data.lock()) {
_cameraCapture->setOutput(std::move(strong));
}
} else if (_screenEndpoint == endpoint) {
_screenCapture->setOutput(sink.data);
if (auto strong = sink.data.lock()) {
_screenCapture->setOutput(std::move(strong));
}
} else if (_instance) {
_instance->addIncomingVideoOutput(endpoint, std::move(sink.data));
} else {
Assert(_instance != nullptr);
_instance->addIncomingVideoOutput(endpoint, sink.data);
_pendingVideoOutputs.emplace(endpoint, std::move(sink));
}
}
void GroupCall::markEndpointActive(VideoEndpoint endpoint, bool active) {
if (!endpoint) {
return;
}
const auto changed = active
? !_activeVideoTracks.contains(endpoint)
: _activeVideoTracks.remove(endpoint);
if (active && changed) {
const auto i = _activeVideoTracks.emplace(
endpoint,
VideoTrack{
.track = std::make_unique<Webrtc::VideoTrack>(
Webrtc::VideoState::Active),
.peer = endpoint.peer,
}).first;
addVideoOutput(i->first.id, { i->second.track->sink() });
} else if (!active && _videoEndpointPinned.current() == endpoint) {
_videoEndpointPinned = VideoEndpoint();
}
updateRequestedVideoChannelsDelayed();
if (changed) {
_videoStreamActiveUpdates.fire(std::move(endpoint));
}
}
@ -1676,12 +1562,12 @@ void GroupCall::ensureOutgoingVideo() {
_instance->setVideoCapture(_cameraCapture);
}
_cameraCapture->setState(tgcalls::VideoState::Active);
setMyEndpointType(_cameraEndpoint, EndpointType::Camera);
markEndpointActive({ _joinAs, _cameraEndpoint }, true);
} else {
if (_cameraCapture) {
_cameraCapture->setState(tgcalls::VideoState::Inactive);
}
setMyEndpointType(_cameraEndpoint, EndpointType::None);
markEndpointActive({ _joinAs, _cameraEndpoint }, false);
}
sendSelfUpdate(SendUpdateType::VideoMuted);
applyMeInCallLocally();
@ -1716,12 +1602,12 @@ void GroupCall::ensureOutgoingVideo() {
_screenInstance->setVideoCapture(_screenCapture);
}
_screenCapture->setState(tgcalls::VideoState::Active);
setMyEndpointType(_screenEndpoint, EndpointType::Screen);
markEndpointActive({ _joinAs, _screenEndpoint }, true);
} else {
if (_screenCapture) {
_screenCapture->setState(tgcalls::VideoState::Inactive);
}
setMyEndpointType(_screenEndpoint, EndpointType::None);
markEndpointActive({ _joinAs, _screenEndpoint }, false);
}
joinLeavePresentation();
}, _lifetime);
@ -1855,26 +1741,11 @@ void GroupCall::ensureControllerCreated() {
_instance = std::make_unique<tgcalls::GroupInstanceCustomImpl>(
std::move(descriptor));
_videoEndpointLarge.value(
) | rpl::start_with_next([=](const VideoEndpoint &endpoint) {
updateRequestedVideoChannels();
_videoLargeTrack = LargeTrack();
_videoLargeTrackWrap = nullptr;
if (!endpoint) {
return;
}
_videoLargeTrackWrap = std::make_unique<Webrtc::VideoTrack>(
Webrtc::VideoState::Active);
_videoLargeTrack = LargeTrack{
_videoLargeTrackWrap.get(),
endpoint.peer
};
addVideoOutput(endpoint.endpoint, { _videoLargeTrackWrap->sink() });
}, _lifetime);
updateInstanceMuteState();
updateInstanceVolumes();
for (auto &[endpoint, sink] : base::take(_pendingVideoOutputs)) {
_instance->addIncomingVideoOutput(endpoint, std::move(sink.data));
}
//raw->setAudioOutputDuckingEnabled(settings.callAudioDuckingEnabled());
}
@ -2077,15 +1948,15 @@ void GroupCall::updateRequestedVideoChannels() {
}
auto channels = std::vector<tgcalls::VideoChannelDescription>();
using Quality = tgcalls::VideoChannelDescription::Quality;
channels.reserve(_activeVideoEndpoints.size());
channels.reserve(_activeVideoTracks.size());
const auto &camera = cameraSharingEndpoint();
const auto &screen = screenSharingEndpoint();
const auto &large = _videoEndpointLarge.current().endpoint;
for (const auto &[endpoint, endpointType] : _activeVideoEndpoints) {
if (endpoint == camera || endpoint == screen) {
for (const auto &[endpoint, video] : _activeVideoTracks) {
const auto &endpointId = endpoint.id;
if (endpointId == camera || endpointId == screen) {
continue;
}
const auto participant = real->participantByEndpoint(endpoint);
const auto participant = real->participantByEndpoint(endpointId);
const auto params = (participant && participant->ssrc)
? participant->videoParams.get()
: nullptr;
@ -2094,11 +1965,13 @@ void GroupCall::updateRequestedVideoChannels() {
}
channels.push_back({
.audioSsrc = participant->ssrc,
.videoInformation = (params->camera.endpoint == endpoint
.videoInformation = (params->camera.endpoint == endpointId
? params->camera.json.toStdString()
: params->screen.json.toStdString()),
.quality = (endpoint == large
.quality = (video.quality == Group::VideoQuality::Full
? Quality::Full
: video.quality == Group::VideoQuality::Medium
? Quality::Medium
: Quality::Thumbnail),
});
}
@ -2122,109 +1995,50 @@ void GroupCall::fillActiveVideoEndpoints() {
Assert(real != nullptr);
const auto &participants = real->participants();
auto newLarge = _videoEndpointLarge.current();
auto newLargeFound = false;
auto removed = _activeVideoEndpoints;
const auto feedOne = [&](
const std::string &endpoint,
EndpointType type) {
const auto &pinned = _videoEndpointPinned.current();
auto pinnedFound = false;
auto endpoints = _activeVideoTracks | ranges::views::transform([](
const auto &pair) {
return pair.first;
});
auto removed = base::flat_set<VideoEndpoint>(
begin(endpoints),
end(endpoints));
const auto feedOne = [&](VideoEndpoint endpoint) {
if (endpoint.empty()) {
return;
} else if (endpoint == newLarge.endpoint) {
newLargeFound = true;
} else if (endpoint == pinned) {
pinnedFound = true;
}
if (!removed.remove(endpoint)) {
_activeVideoEndpoints.emplace(endpoint, type);
_streamsVideoUpdated.fire({ endpoint, true });
markEndpointActive(std::move(endpoint), true);
}
};
for (const auto &participant : participants) {
const auto camera = participant.cameraEndpoint();
if (camera != _cameraEndpoint && camera != _screenEndpoint) {
feedOne(camera, EndpointType::Camera);
if (camera != _cameraEndpoint
&& camera != _screenEndpoint
&& participant.peer != _joinAs) {
feedOne({ participant.peer, camera });
}
const auto screen = participant.screenEndpoint();
if (screen != _cameraEndpoint && screen != _screenEndpoint) {
feedOne(screen, EndpointType::Screen);
if (screen != _cameraEndpoint
&& screen != _screenEndpoint
&& participant.peer != _joinAs) {
feedOne({ participant.peer, screen });
}
}
feedOne(cameraSharingEndpoint(), EndpointType::Camera);
feedOne(screenSharingEndpoint(), EndpointType::Screen);
if (!newLarge.empty() && !newLargeFound) {
_videoEndpointPinned = false;
newLarge = VideoEndpoint();
feedOne({ _joinAs, cameraSharingEndpoint() });
feedOne({ _joinAs, screenSharingEndpoint() });
if (pinned && !pinnedFound) {
_videoEndpointPinned = VideoEndpoint();
}
if (!newLarge) {
setVideoEndpointLarge(chooseLargeVideoEndpoint());
}
for (const auto &[endpoint, type] : removed) {
if (_activeVideoEndpoints.remove(endpoint)) {
_streamsVideoUpdated.fire({ endpoint, false });
}
for (const auto &endpoint : removed) {
markEndpointActive(endpoint, false);
}
updateRequestedVideoChannels();
}
GroupCall::EndpointType GroupCall::activeVideoEndpointType(
const std::string &endpoint) const {
if (endpoint.empty()) {
return EndpointType::None;
}
const auto i = _activeVideoEndpoints.find(endpoint);
return (i != end(_activeVideoEndpoints))
? i->second
: EndpointType::None;
}
VideoEndpoint GroupCall::chooseLargeVideoEndpoint() const {
const auto real = lookupReal();
if (!real) {
return VideoEndpoint();
}
auto anyEndpoint = VideoEndpoint();
auto screenEndpoint = VideoEndpoint();
auto speakingEndpoint = VideoEndpoint();
auto soundingEndpoint = VideoEndpoint();
const auto &myCameraEndpoint = cameraSharingEndpoint();
const auto &myScreenEndpoint = screenSharingEndpoint();
const auto &participants = real->participants();
for (const auto &[endpoint, endpointType] : _activeVideoEndpoints) {
if (endpoint == _cameraEndpoint || endpoint == _screenEndpoint) {
continue;
}
if (const auto participant = real->participantByEndpoint(endpoint)) {
const auto peer = participant->peer;
if (screenEndpoint.empty()
&& participant->videoParams->screen.endpoint == endpoint) {
screenEndpoint = { peer, endpoint };
break;
}
if (speakingEndpoint.empty() && participant->speaking) {
speakingEndpoint = { peer, endpoint };
}
if (soundingEndpoint.empty() && participant->sounding) {
soundingEndpoint = { peer, endpoint };
}
if (anyEndpoint.empty()) {
anyEndpoint = { peer, endpoint };
}
}
}
return screenEndpoint
? screenEndpoint
: streamsVideo(myScreenEndpoint)
? VideoEndpoint{ _joinAs, myScreenEndpoint }
: speakingEndpoint
? speakingEndpoint
: soundingEndpoint
? soundingEndpoint
: anyEndpoint
? anyEndpoint
: streamsVideo(myCameraEndpoint)
? VideoEndpoint{ _joinAs, myCameraEndpoint }
: VideoEndpoint();
}
void GroupCall::updateInstanceMuteState() {
Expects(_instance != nullptr);
@ -2515,27 +2329,21 @@ void GroupCall::sendSelfUpdate(SendUpdateType type) {
}
void GroupCall::pinVideoEndpoint(VideoEndpoint endpoint) {
if (!endpoint) {
_videoEndpointPinned = false;
} else if (streamsVideo(endpoint.endpoint)) {
_videoEndpointPinned = false;
setVideoEndpointLarge(std::move(endpoint));
_videoEndpointPinned = true;
}
_videoEndpointPinned = endpoint;
}
void GroupCall::showVideoEndpointLarge(VideoEndpoint endpoint) {
if (!streamsVideo(endpoint.endpoint)) {
void GroupCall::requestVideoQuality(
const VideoEndpoint &endpoint,
Group::VideoQuality quality) {
if (!endpoint) {
return;
}
_videoEndpointPinned = false;
setVideoEndpointLarge(std::move(endpoint));
_videoLargeShowTime = crl::now();
}
void GroupCall::setVideoEndpointLarge(VideoEndpoint endpoint) {
_videoEndpointLarge = endpoint;
_videoLargeShowTime = 0;
const auto i = _activeVideoTracks.find(endpoint);
if (i == end(_activeVideoTracks) || i->second.quality == quality) {
return;
}
i->second.quality = quality;
updateRequestedVideoChannelsDelayed();
}
void GroupCall::setCurrentAudioDevice(bool input, const QString &deviceId) {

View file

@ -47,6 +47,7 @@ struct VolumeRequest;
struct ParticipantState;
struct JoinInfo;
struct RejoinEvent;
enum class VideoQuality;
} // namespace Group
enum class MuteState {
@ -76,10 +77,10 @@ struct LevelUpdate {
struct VideoEndpoint {
PeerData *peer = nullptr;
std::string endpoint;
std::string id;
[[nodiscard]] bool empty() const noexcept {
return !peer;
return id.empty();
}
[[nodiscard]] explicit operator bool() const noexcept {
return !empty();
@ -89,7 +90,7 @@ struct VideoEndpoint {
inline bool operator==(
const VideoEndpoint &a,
const VideoEndpoint &b) noexcept {
return (a.peer == b.peer) && (a.endpoint == b.endpoint);
return (a.id == b.id);
}
inline bool operator!=(
@ -102,7 +103,7 @@ inline bool operator<(
const VideoEndpoint &a,
const VideoEndpoint &b) noexcept {
return (a.peer < b.peer)
|| (a.peer == b.peer && a.endpoint < b.endpoint);
|| (a.peer == b.peer && a.id < b.id);
}
inline bool operator>(
@ -123,11 +124,6 @@ inline bool operator>=(
return !(a < b);
}
struct StreamsVideoUpdate {
std::string endpoint;
bool streams = false;
};
struct VideoParams {
base::flat_set<uint32> ssrcs;
std::string endpoint;
@ -274,49 +270,44 @@ public:
[[nodiscard]] rpl::producer<LevelUpdate> levelUpdates() const {
return _levelUpdates.events();
}
[[nodiscard]] auto streamsVideoUpdates() const
-> rpl::producer<StreamsVideoUpdate> {
return _streamsVideoUpdated.events();
}
[[nodiscard]] bool streamsVideo(const std::string &endpoint) const {
return !endpoint.empty()
&& activeVideoEndpointType(endpoint) != EndpointType::None;
}
[[nodiscard]] bool videoEndpointPinned() const {
return _videoEndpointPinned.current();
}
[[nodiscard]] rpl::producer<bool> videoEndpointPinnedValue() const {
return _videoEndpointPinned.value();
[[nodiscard]] auto videoStreamActiveUpdates() const
-> rpl::producer<VideoEndpoint> {
return _videoStreamActiveUpdates.events();
}
void pinVideoEndpoint(VideoEndpoint endpoint);
[[nodiscard]] const VideoEndpoint &videoEndpointLarge() const {
return _videoEndpointLarge.current();
void requestVideoQuality(
const VideoEndpoint &endpoint,
Group::VideoQuality quality);
[[nodiscard]] const VideoEndpoint &videoEndpointPinned() const {
return _videoEndpointPinned.current();
}
[[nodiscard]] auto videoEndpointLargeValue() const
[[nodiscard]] auto videoEndpointPinnedValue() const
-> rpl::producer<VideoEndpoint> {
return _videoEndpointLarge.value();
return _videoEndpointPinned.value();
}
void showVideoEndpointLarge(VideoEndpoint endpoint);
struct LargeTrack {
Webrtc::VideoTrack *track = nullptr;
struct VideoTrack {
//VideoTrack();
//VideoTrack(VideoTrack &&other);
//VideoTrack &operator=(VideoTrack &&other);
//~VideoTrack();
std::unique_ptr<Webrtc::VideoTrack> track;
PeerData *peer = nullptr;
Group::VideoQuality quality = Group::VideoQuality();
[[nodiscard]] explicit operator bool() const {
return (track != nullptr);
}
[[nodiscard]] bool operator==(LargeTrack other) const {
[[nodiscard]] bool operator==(const VideoTrack &other) const {
return (track == other.track) && (peer == other.peer);
}
[[nodiscard]] bool operator!=(LargeTrack other) const {
[[nodiscard]] bool operator!=(const VideoTrack &other) const {
return !(*this == other);
}
};
[[nodiscard]] LargeTrack videoLargeTrack() const {
return _videoLargeTrack.current();
}
[[nodiscard]] auto videoLargeTrackValue() const
-> rpl::producer<LargeTrack> {
return _videoLargeTrack.value();
[[nodiscard]] auto activeVideoTracks() const
-> const base::flat_map<VideoEndpoint, VideoTrack> & {
return _activeVideoTracks;
}
[[nodiscard]] rpl::producer<Group::RejoinEvent> rejoinEvents() const {
return _rejoinEvents.events();
@ -391,11 +382,6 @@ private:
RaiseHand,
VideoMuted,
};
enum class EndpointType {
None,
Camera,
Screen,
};
[[nodiscard]] bool mediaChannelDescriptionsFill(
not_null<MediaChannelDescriptionsTask*> task,
@ -448,10 +434,6 @@ private:
void updateRequestedVideoChannels();
void updateRequestedVideoChannelsDelayed();
void fillActiveVideoEndpoints();
[[nodiscard]] VideoEndpoint chooseLargeVideoEndpoint() const;
[[nodiscard]] EndpointType activeVideoEndpointType(
const std::string &endpoint) const;
void setVideoEndpointLarge(VideoEndpoint endpoint);
void editParticipant(
not_null<PeerData*> participantPeer,
@ -467,11 +449,12 @@ private:
void setupMediaDevices();
void ensureOutgoingVideo();
void setMyEndpointType(const std::string &endpoint, EndpointType type);
void setScreenEndpoint(std::string endpoint);
void setCameraEndpoint(std::string endpoint);
void addVideoOutput(const std::string &endpoint, SinkPointer sink);
void markEndpointActive(VideoEndpoint endpoint, bool active);
[[nodiscard]] MTPInputGroupCall inputCall() const;
const not_null<Delegate*> _delegate;
@ -523,6 +506,7 @@ private:
base::has_weak_ptr _instanceGuard;
std::shared_ptr<tgcalls::VideoCaptureInterface> _cameraCapture;
std::unique_ptr<Webrtc::VideoTrack> _cameraOutgoing;
base::flat_map<std::string, SinkPointer> _pendingVideoOutputs;
rpl::variable<InstanceState> _screenInstanceState
= InstanceState::Disconnected;
@ -536,13 +520,9 @@ private:
bool _videoInited = false;
rpl::event_stream<LevelUpdate> _levelUpdates;
rpl::event_stream<StreamsVideoUpdate> _streamsVideoUpdated;
base::flat_map<std::string, EndpointType> _activeVideoEndpoints;
rpl::variable<VideoEndpoint> _videoEndpointLarge;
rpl::variable<bool> _videoEndpointPinned;
std::unique_ptr<Webrtc::VideoTrack> _videoLargeTrackWrap;
rpl::variable<LargeTrack> _videoLargeTrack;
crl::time _videoLargeShowTime = 0;
rpl::event_stream<VideoEndpoint> _videoStreamActiveUpdates;
base::flat_map<VideoEndpoint, VideoTrack> _activeVideoTracks;
rpl::variable<VideoEndpoint> _videoEndpointPinned;
base::flat_map<uint32, Data::LastSpokeTimes> _lastSpoke;
rpl::event_stream<Group::RejoinEvent> _rejoinEvents;
rpl::event_stream<> _allowedToSpeakNotifications;

View file

@ -53,4 +53,10 @@ enum class PanelMode {
Wide,
};
enum class VideoQuality {
Thumbnail,
Medium,
Full,
};
} // namespace Calls::Group

View file

@ -7,6 +7,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
*/
#include "calls/group/calls_group_large_video.h"
#include "calls/group/calls_group_common.h"
#include "calls/group/calls_group_members_row.h"
#include "media/view/media_view_pip.h"
#include "webrtc/webrtc_video_track.h"
@ -50,6 +51,15 @@ void LargeVideo::setVisible(bool visible) {
void LargeVideo::setGeometry(int x, int y, int width, int height) {
_content.setGeometry(x, y, width, height);
if (width > 0 && height > 0) {
const auto kMedium = style::ConvertScale(380);
const auto kSmall = style::ConvertScale(200);
_requestedQuality = (width > kMedium || height > kMedium)
? VideoQuality::Full
: (width > kSmall || height > kSmall)
? VideoQuality::Medium
: VideoQuality::Thumbnail;
}
}
void LargeVideo::setControlsShown(bool shown) {
@ -77,10 +87,24 @@ rpl::producer<float64> LargeVideo::controlsShown() const {
return _controlsShownRatio.value();
}
QSize LargeVideo::trackSize() const {
return _trackSize.current();
}
rpl::producer<QSize> LargeVideo::trackSizeValue() const {
return _trackSize.value();
}
rpl::producer<VideoQuality> LargeVideo::requestedQuality() const {
using namespace rpl::mappers;
return rpl::combine(
_content.shownValue(),
_requestedQuality.value()
) | rpl::filter([=](bool shown, auto) {
return shown;
}) | rpl::map(_2);
}
void LargeVideo::setup(
rpl::producer<LargeVideoTrack> track,
rpl::producer<bool> pinned) {

View file

@ -27,6 +27,7 @@ class VideoTrack;
namespace Calls::Group {
class MembersRow;
enum class VideoQuality;
struct LargeVideoTrack {
Webrtc::VideoTrack *track = nullptr;
@ -66,11 +67,14 @@ public:
[[nodiscard]] rpl::producer<bool> pinToggled() const;
[[nodiscard]] rpl::producer<> minimizeClicks() const;
[[nodiscard]] rpl::producer<float64> controlsShown() const;
[[nodiscard]] rpl::producer<QSize> trackSizeValue() const;
[[nodiscard]] rpl::producer<> clicks() const {
return _clicks.events();
}
[[nodiscard]] QSize trackSize() const;
[[nodiscard]] rpl::producer<QSize> trackSizeValue() const;
[[nodiscard]] rpl::producer<VideoQuality> requestedQuality() const;
[[nodiscard]] rpl::lifetime &lifetime() {
return _content.lifetime();
}
@ -124,6 +128,7 @@ private:
bool _toggleControlsScheduled = false;
rpl::variable<float64> _controlsShownRatio = 1.;
rpl::variable<QSize> _trackSize;
rpl::variable<VideoQuality> _requestedQuality;
rpl::lifetime _trackLifetime;
};

View file

@ -46,6 +46,11 @@ using Row = MembersRow;
} // namespace
struct Members::VideoTile {
std::unique_ptr<LargeVideo> video;
VideoEndpoint endpoint;
};
class Members::Controller final
: public PeerListController
, public MembersRowDelegate
@ -156,13 +161,12 @@ private:
not_null<const Data::GroupCallParticipant*> participant) const;
const std::string &computeCameraEndpoint(
not_null<const Data::GroupCallParticipant*> participant) const;
void setRowVideoEndpoint(
not_null<Row*> row,
const std::string &endpoint);
//void setRowVideoEndpoint(
// not_null<Row*> row,
// const std::string &endpoint);
bool toggleRowVideo(not_null<PeerListRow*> row);
void showRowMenu(not_null<PeerListRow*> row);
void generateNarrowShadow();
void appendInvitedUsers();
void scheduleRaisedHandStatusRemove();
@ -277,24 +281,24 @@ Members::Controller::~Controller() {
base::take(_menu);
}
void Members::Controller::setRowVideoEndpoint(
not_null<Row*> row,
const std::string &endpoint) {
const auto was = row->videoTrackEndpoint();
if (was != endpoint) {
if (!was.empty()) {
_videoEndpoints.remove(was);
}
if (!endpoint.empty()) {
_videoEndpoints.emplace(endpoint, row);
}
}
if (endpoint.empty()) {
row->clearVideoTrack();
} else {
_call->addVideoOutput(endpoint, row->createVideoTrack(endpoint));
}
}
//void Members::Controller::setRowVideoEndpoint(
// not_null<Row*> row,
// const std::string &endpoint) {
// const auto was = row->videoTrackEndpoint();
// if (was != endpoint) {
// if (!was.empty()) {
// _videoEndpoints.remove(was);
// }
// if (!endpoint.empty()) {
// _videoEndpoints.emplace(endpoint, row);
// }
// }
// if (endpoint.empty()) {
// row->clearVideoTrack();
// } else {
// _call->addVideoOutput(endpoint, row->createVideoTrack(endpoint));
// }
//}
void Members::Controller::setupListChangeViewers() {
_call->real(
@ -302,13 +306,6 @@ void Members::Controller::setupListChangeViewers() {
subscribeToChanges(real);
}, _lifetime);
//_call->stateValue(
//) | rpl::start_with_next([=] {
// if (const auto real = _call->lookupReal()) {
// updateRow(channel->session().user());
// }
//}, _lifetime);
_call->levelUpdates(
) | rpl::start_with_next([=](const LevelUpdate &update) {
const auto i = _soundingRowBySsrc.find(update.ssrc);
@ -317,88 +314,88 @@ void Members::Controller::setupListChangeViewers() {
}
}, _lifetime);
_call->videoEndpointLargeValue(
) | rpl::filter([=](const VideoEndpoint &largeEndpoint) {
return (_largeEndpoint != largeEndpoint.endpoint);
}) | rpl::start_with_next([=](const VideoEndpoint &largeEndpoint) {
if (_call->streamsVideo(_largeEndpoint)) {
if (const auto participant = findParticipant(_largeEndpoint)) {
if (const auto row = findRow(participant->peer)) {
const auto current = row->videoTrackEndpoint();
if (current.empty()
|| (computeScreenEndpoint(participant) == _largeEndpoint
&& computeCameraEndpoint(participant) == current)) {
setRowVideoEndpoint(row, _largeEndpoint);
}
}
}
}
_largeEndpoint = largeEndpoint.endpoint;
if (const auto participant = findParticipant(_largeEndpoint)) {
if (const auto row = findRow(participant->peer)) {
if (row->videoTrackEndpoint() == _largeEndpoint) {
const auto &camera = computeCameraEndpoint(participant);
const auto &screen = computeScreenEndpoint(participant);
if (_largeEndpoint == camera
&& _call->streamsVideo(screen)) {
setRowVideoEndpoint(row, screen);
} else if (_largeEndpoint == screen
&& _call->streamsVideo(camera)) {
setRowVideoEndpoint(row, camera);
} else {
setRowVideoEndpoint(row, std::string());
}
}
}
}
}, _lifetime);
//_call->videoEndpointLargeValue(
//) | rpl::filter([=](const VideoEndpoint &largeEndpoint) {
// return (_largeEndpoint != largeEndpoint.endpoint);
//}) | rpl::start_with_next([=](const VideoEndpoint &largeEndpoint) {
// if (_call->streamsVideo(_largeEndpoint)) {
// if (const auto participant = findParticipant(_largeEndpoint)) {
// if (const auto row = findRow(participant->peer)) {
// const auto current = row->videoTrackEndpoint();
// if (current.empty()
// || (computeScreenEndpoint(participant) == _largeEndpoint
// && computeCameraEndpoint(participant) == current)) {
// setRowVideoEndpoint(row, _largeEndpoint);
// }
// }
// }
// }
// _largeEndpoint = largeEndpoint.endpoint;
// if (const auto participant = findParticipant(_largeEndpoint)) {
// if (const auto row = findRow(participant->peer)) {
// if (row->videoTrackEndpoint() == _largeEndpoint) {
// const auto &camera = computeCameraEndpoint(participant);
// const auto &screen = computeScreenEndpoint(participant);
// if (_largeEndpoint == camera
// && _call->streamsVideo(screen)) {
// setRowVideoEndpoint(row, screen);
// } else if (_largeEndpoint == screen
// && _call->streamsVideo(camera)) {
// setRowVideoEndpoint(row, camera);
// } else {
// setRowVideoEndpoint(row, std::string());
// }
// }
// }
// }
//}, _lifetime);
_call->streamsVideoUpdates(
) | rpl::start_with_next([=](StreamsVideoUpdate update) {
Assert(update.endpoint != _largeEndpoint);
if (update.streams) {
if (const auto participant = findParticipant(update.endpoint)) {
if (const auto row = findRow(participant->peer)) {
const auto &camera = computeCameraEndpoint(participant);
const auto &screen = computeScreenEndpoint(participant);
if (update.endpoint == camera
&& (!_call->streamsVideo(screen)
|| _largeEndpoint == screen)) {
setRowVideoEndpoint(row, camera);
} else if (update.endpoint == screen
&& (_largeEndpoint != screen)) {
setRowVideoEndpoint(row, screen);
}
}
}
} else {
const auto i = _videoEndpoints.find(update.endpoint);
if (i != end(_videoEndpoints)) {
const auto row = i->second;
const auto real = _call->lookupReal();
Assert(real != nullptr);
const auto participant = real->participantByPeer(
row->peer());
if (!participant) {
setRowVideoEndpoint(row, std::string());
} else {
const auto &camera = computeCameraEndpoint(participant);
const auto &screen = computeScreenEndpoint(participant);
if (update.endpoint == camera
&& (_largeEndpoint != screen)
&& _call->streamsVideo(screen)) {
setRowVideoEndpoint(row, screen);
} else if (update.endpoint == screen
&& (_largeEndpoint != camera)
&& _call->streamsVideo(camera)) {
setRowVideoEndpoint(row, camera);
} else {
setRowVideoEndpoint(row, std::string());
}
}
}
}
}, _lifetime);
//_call->streamsVideoUpdates(
//) | rpl::start_with_next([=](StreamsVideoUpdate update) {
// Assert(update.endpoint != _largeEndpoint);
// if (update.streams) {
// if (const auto participant = findParticipant(update.endpoint)) {
// if (const auto row = findRow(participant->peer)) {
// const auto &camera = computeCameraEndpoint(participant);
// const auto &screen = computeScreenEndpoint(participant);
// if (update.endpoint == camera
// && (!_call->streamsVideo(screen)
// || _largeEndpoint == screen)) {
// setRowVideoEndpoint(row, camera);
// } else if (update.endpoint == screen
// && (_largeEndpoint != screen)) {
// setRowVideoEndpoint(row, screen);
// }
// }
// }
// } else {
// const auto i = _videoEndpoints.find(update.endpoint);
// if (i != end(_videoEndpoints)) {
// const auto row = i->second;
// const auto real = _call->lookupReal();
// Assert(real != nullptr);
// const auto participant = real->participantByPeer(
// row->peer());
// if (!participant) {
// setRowVideoEndpoint(row, std::string());
// } else {
// const auto &camera = computeCameraEndpoint(participant);
// const auto &screen = computeScreenEndpoint(participant);
// if (update.endpoint == camera
// && (_largeEndpoint != screen)
// && _call->streamsVideo(screen)) {
// setRowVideoEndpoint(row, screen);
// } else if (update.endpoint == screen
// && (_largeEndpoint != camera)
// && _call->streamsVideo(camera)) {
// setRowVideoEndpoint(row, camera);
// } else {
// setRowVideoEndpoint(row, std::string());
// }
// }
// }
// }
//}, _lifetime);
_call->rejoinEvents(
) | rpl::start_with_next([=](const Group::RejoinEvent &event) {
@ -996,18 +993,18 @@ void Members::Controller::rowPaintNarrowBorder(
int x,
int y,
not_null<Row*> row) {
if (_call->videoEndpointLarge().peer != row->peer().get()) {
return;
}
auto hq = PainterHighQualityEnabler(p);
p.setBrush(Qt::NoBrush);
auto pen = st::groupCallMemberActiveIcon->p;
pen.setWidthF(st::groupCallNarrowOutline);
p.setPen(pen);
p.drawRoundedRect(
QRect{ QPoint(x, y), st::groupCallNarrowSize },
st::roundRadiusLarge,
st::roundRadiusLarge);
//if (_call->videoEndpointLarge().peer != row->peer().get()) {
// return;
//}
//auto hq = PainterHighQualityEnabler(p);
//p.setBrush(Qt::NoBrush);
//auto pen = st::groupCallMemberActiveIcon->p;
//pen.setWidthF(st::groupCallNarrowOutline);
//p.setPen(pen);
//p.drawRoundedRect(
// QRect{ QPoint(x, y), st::groupCallNarrowSize },
// st::roundRadiusLarge,
// st::roundRadiusLarge);
}
void Members::Controller::rowPaintNarrowShadow(
@ -1096,45 +1093,46 @@ void Members::Controller::showRowMenu(not_null<PeerListRow*> row) {
}
bool Members::Controller::toggleRowVideo(not_null<PeerListRow*> row) {
const auto real = _call->lookupReal();
if (!real) {
return false;
}
const auto participantPeer = row->peer();
const auto isMe = (participantPeer == _call->joinAs());
const auto participant = real->participantByPeer(participantPeer);
if (!participant) {
return false;
}
const auto params = participant->videoParams.get();
const auto empty = std::string();
const auto &camera = isMe
? _call->cameraSharingEndpoint()
: (params && _call->streamsVideo(params->camera.endpoint))
? params->camera.endpoint
: empty;
const auto &screen = isMe
? _call->screenSharingEndpoint()
: (params && _call->streamsVideo(params->screen.endpoint))
? params->screen.endpoint
: empty;
const auto &large = _call->videoEndpointLarge().endpoint;
const auto show = [&] {
if (!screen.empty() && large != screen) {
return screen;
} else if (!camera.empty() && large != camera) {
return camera;
}
return std::string();
}();
if (show.empty()) {
return false;
} else if (_call->videoEndpointPinned()) {
_call->pinVideoEndpoint({ participantPeer, show });
} else {
_call->showVideoEndpointLarge({ participantPeer, show });
}
return true;
return false;
//const auto real = _call->lookupReal();
//if (!real) {
// return false;
//}
//const auto participantPeer = row->peer();
//const auto isMe = (participantPeer == _call->joinAs());
//const auto participant = real->participantByPeer(participantPeer);
//if (!participant) {
// return false;
//}
//const auto params = participant->videoParams.get();
//const auto empty = std::string();
//const auto &camera = isMe
// ? _call->cameraSharingEndpoint()
// : (params && _call->streamsVideo(params->camera.endpoint))
// ? params->camera.endpoint
// : empty;
//const auto &screen = isMe
// ? _call->screenSharingEndpoint()
// : (params && _call->streamsVideo(params->screen.endpoint))
// ? params->screen.endpoint
// : empty;
//const auto &large = _call->videoEndpointLarge().endpoint;
//const auto show = [&] {
// if (!screen.empty() && large != screen) {
// return screen;
// } else if (!camera.empty() && large != camera) {
// return camera;
// }
// return std::string();
//}();
//if (show.empty()) {
// return false;
//} else if (_call->videoEndpointPinned()) {
// _call->pinVideoEndpoint({ participantPeer, show });
//} else {
// _call->showVideoEndpointLarge({ participantPeer, show });
//}
//return true;
}
void Members::Controller::rowActionClicked(
@ -1223,13 +1221,11 @@ base::unique_qptr<Ui::PopupMenu> Members::Controller::createRowContextMenu(
if (const auto real = _call->lookupReal()) {
const auto participant = real->participantByPeer(participantPeer);
if (participant) {
const auto pinnedEndpoint = _call->videoEndpointPinned()
? _call->videoEndpointLarge().endpoint
: std::string();
const auto &pinned = _call->videoEndpointPinned();
const auto &camera = computeCameraEndpoint(participant);
const auto &screen = computeScreenEndpoint(participant);
if (_call->streamsVideo(camera)) {
if (pinnedEndpoint == camera) {
if (!camera.empty()) {
if (pinned.id == camera) {
result->addAction(
tr::lng_group_call_context_unpin_camera(tr::now),
[=] { _call->pinVideoEndpoint(VideoEndpoint()); });
@ -1241,8 +1237,8 @@ base::unique_qptr<Ui::PopupMenu> Members::Controller::createRowContextMenu(
camera }); });
}
}
if (_call->streamsVideo(screen)) {
if (pinnedEndpoint == screen) {
if (!screen.empty()) {
if (pinned.id == screen) {
result->addAction(
tr::lng_group_call_context_unpin_screen(tr::now),
[=] { _call->pinVideoEndpoint(VideoEndpoint()); });
@ -1445,14 +1441,13 @@ std::unique_ptr<Row> Members::Controller::createRow(
const Data::GroupCallParticipant &participant) {
auto result = std::make_unique<Row>(this, participant.peer);
updateRow(result.get(), &participant);
const auto &camera = computeCameraEndpoint(&participant);
const auto &screen = computeScreenEndpoint(&participant);
if (!screen.empty() && _largeEndpoint != screen) {
setRowVideoEndpoint(result.get(), screen);
} else if (!camera.empty() && _largeEndpoint != camera) {
setRowVideoEndpoint(result.get(), camera);
}
//const auto &camera = computeCameraEndpoint(&participant);
//const auto &screen = computeScreenEndpoint(&participant);
//if (!screen.empty() && _largeEndpoint != screen) {
// setRowVideoEndpoint(result.get(), screen);
//} else if (!camera.empty() && _largeEndpoint != camera) {
// setRowVideoEndpoint(result.get(), camera);
//}
return result;
}
@ -1612,7 +1607,7 @@ void Members::setupAddMember(not_null<GroupCall*> call) {
}
rpl::producer<> Members::enlargeVideo() const {
return _pinnedVideo->clicks();
return _enlargeVideoClicks.events();
}
Row *Members::lookupRow(not_null<PeerData*> peer) const {
@ -1624,7 +1619,9 @@ void Members::setMode(PanelMode mode) {
return;
}
_mode = mode;
_pinnedVideo->setVisible(mode == PanelMode::Default);
for (const auto &tile : _videoTiles) {
tile.video->setVisible(mode == PanelMode::Default);
}
_list->setMode((mode == PanelMode::Wide)
? PeerListContent::Mode::Custom
: PeerListContent::Mode::Default);
@ -1655,29 +1652,121 @@ void Members::setupList() {
updateControlsGeometry();
}
void Members::refreshTilesGeometry() {
const auto width = _layout->width();
if (_videoTiles.empty()
|| !width
|| _mode.current() == PanelMode::Wide) {
_pinnedVideoWrap->resize(width, 0);
return;
}
auto sizes = base::flat_map<not_null<LargeVideo*>, QSize>();
sizes.reserve(_videoTiles.size());
for (const auto &tile : _videoTiles) {
const auto video = tile.video.get();
const auto size = video->trackSize();
if (size.isEmpty()) {
video->setGeometry(0, 0, width, 0);
} else {
sizes.emplace(video, size);
}
}
if (sizes.empty()) {
_pinnedVideoWrap->resize(width, 0);
return;
} else if (sizes.size() == 1) {
const auto size = sizes.front().second;
const auto heightMin = (width * 9) / 16;
const auto heightMax = (width * 3) / 4;
const auto scaled = size.scaled(
QSize(width, heightMax),
Qt::KeepAspectRatio);
const auto height = std::max(scaled.height(), heightMin);
sizes.front().first->setGeometry(0, 0, width, height);
_pinnedVideoWrap->resize(width, height);
return;
}
const auto square = (width - st::groupCallVideoSmallSkip) / 2;
const auto skip = (width - 2 * square);
const auto put = [&](not_null<LargeVideo*> video, int column, int row) {
video->setGeometry(
(column == 2) ? 0 : column ? (width - square) : 0,
row * (square + skip),
(column == 2) ? width : square,
square);
};
const auto rows = (sizes.size() + 1) / 2;
if (sizes.size() == 3) {
put(sizes.front().first, 2, 0);
put((sizes.begin() + 1)->first, 0, 1);
put((sizes.begin() + 1)->first, 1, 1);
} else {
auto row = 0;
auto column = 0;
for (const auto &[video, endpoint] : sizes) {
put(video, column, row);
if (column) {
++row;
column = (row + 1 == rows && sizes.size() % 2) ? 2 : 0;
} else {
column = 1;
}
}
}
_pinnedVideoWrap->resize(width, rows * (square + skip) - skip);
}
void Members::setupPinnedVideo() {
using namespace rpl::mappers;
_pinnedVideo = std::make_unique<LargeVideo>(
_pinnedVideoWrap.get(),
st::groupCallLargeVideoNarrow,
true,
_call->videoLargeTrackValue(
) | rpl::map([=](GroupCall::LargeTrack track) {
const auto row = track ? lookupRow(track.peer) : nullptr;
Assert(!track || row != nullptr);
return LargeVideoTrack{ row ? track.track : nullptr, row };
}),
_call->videoEndpointPinnedValue());
const auto setupTile = [=](
const VideoEndpoint &endpoint,
const GroupCall::VideoTrack &track) {
const auto row = lookupRow(track.peer);
Assert(row != nullptr);
auto video = std::make_unique<LargeVideo>(
_pinnedVideoWrap.get(),
st::groupCallLargeVideoNarrow,
(_mode.current() == PanelMode::Default),
rpl::single(LargeVideoTrack{ track.track.get(), row }),
_call->videoEndpointPinnedValue() | rpl::map(_1 == endpoint));
_pinnedVideo->pinToggled(
) | rpl::start_with_next([=](bool pinned) {
if (!pinned) {
_call->pinVideoEndpoint(VideoEndpoint{});
} else if (const auto &large = _call->videoEndpointLarge()) {
_call->pinVideoEndpoint(large);
video->pinToggled(
) | rpl::start_with_next([=](bool pinned) {
_call->pinVideoEndpoint(pinned ? endpoint : VideoEndpoint{});
}, video->lifetime());
video->requestedQuality(
) | rpl::start_with_next([=](VideoQuality quality) {
_call->requestVideoQuality(endpoint, quality);
}, video->lifetime());
video->trackSizeValue(
) | rpl::start_with_next([=] {
refreshTilesGeometry();
}, video->lifetime());
return VideoTile{
.video = std::move(video),
.endpoint = endpoint,
};
};
for (const auto &[endpoint, track] : _call->activeVideoTracks()) {
_videoTiles.push_back(setupTile(endpoint, track));
}
_call->videoStreamActiveUpdates(
) | rpl::start_with_next([=](const VideoEndpoint &endpoint) {
const auto &tracks = _call->activeVideoTracks();
const auto i = tracks.find(endpoint);
if (i != end(tracks)) {
_videoTiles.push_back(setupTile(endpoint, i->second));
} else {
_videoTiles.erase(
ranges::remove(_videoTiles, endpoint, &VideoTile::endpoint),
end(_videoTiles));
refreshTilesGeometry();
}
}, _pinnedVideo->lifetime());
}, _pinnedVideoWrap->lifetime());
// New video was pinned or mode changed.
rpl::merge(
@ -1689,23 +1778,9 @@ void Members::setupPinnedVideo() {
_scroll->scrollToY(0);
}, _scroll->lifetime());
rpl::combine(
_layout->widthValue(),
_pinnedVideo->trackSizeValue()
) | rpl::start_with_next([=](int width, QSize size) {
if (size.isEmpty() || !width) {
_pinnedVideoWrap->resize(width, 0);
return;
}
const auto heightMin = (width * 9) / 16;
const auto heightMax = (width * 3) / 4;
const auto scaled = size.scaled(
QSize(width, heightMax),
Qt::KeepAspectRatio);
const auto height = std::max(scaled.height(), heightMin);
_pinnedVideo->setGeometry(0, 0, width, height);
_pinnedVideoWrap->resize(width, height);
}, _pinnedVideo->lifetime());
_layout->widthValue() | rpl::start_with_next([=] {
refreshTilesGeometry();
}, _pinnedVideoWrap->lifetime());
}
void Members::resizeEvent(QResizeEvent *e) {

View file

@ -61,6 +61,7 @@ public:
private:
class Controller;
struct VideoTile;
using ListWidget = PeerListContent;
void resizeEvent(QResizeEvent *e) override;
@ -87,6 +88,7 @@ private:
void setupFakeRoundCorners();
void updateControlsGeometry();
void refreshTilesGeometry();
const not_null<GroupCall*> _call;
rpl::variable<PanelMode> _mode = PanelMode();
@ -94,7 +96,8 @@ private:
std::unique_ptr<Controller> _listController;
not_null<Ui::VerticalLayout*> _layout;
const not_null<Ui::RpWidget*> _pinnedVideoWrap;
std::unique_ptr<LargeVideo> _pinnedVideo;
std::vector<VideoTile> _videoTiles;
rpl::event_stream<> _enlargeVideoClicks;
rpl::variable<Ui::RpWidget*> _addMemberButton = nullptr;
ListWidget *_list = nullptr;
rpl::event_stream<> _addMemberRequests;

View file

@ -377,38 +377,39 @@ bool MembersRow::paintVideo(
int sizew,
int sizeh,
PanelMode mode) {
if (!_videoTrackShown) {
return false;
}
const auto guard = gsl::finally([&] {
_videoTrackShown->markFrameShown();
});
const auto videoSize = _videoTrackShown->frameSize();
if (videoSize.isEmpty()
|| _videoTrackShown->state() != Webrtc::VideoState::Active) {
return false;
}
const auto videow = videoSize.width();
const auto videoh = videoSize.height();
const auto resize = (videow * sizeh > videoh * sizew)
? QSize(videow * sizeh / videoh, sizeh)
: QSize(sizew, videoh * sizew / videow);
const auto request = Webrtc::FrameRequest{
.resize = resize * cIntRetinaFactor(),
.outer = QSize(sizew, sizeh) * cIntRetinaFactor(),
};
const auto frame = _videoTrackShown->frame(request);
auto copy = frame; // #TODO calls optimize.
copy.detach();
if (mode == PanelMode::Default) {
Images::prepareCircle(copy);
} else {
Images::prepareRound(copy, ImageRoundRadius::Large);
}
p.drawImage(
QRect(QPoint(x, y), copy.size() / cIntRetinaFactor()),
copy);
return true;
return false;
//if (!_videoTrackShown) {
// return false;
//}
//const auto guard = gsl::finally([&] {
// _videoTrackShown->markFrameShown();
//});
//const auto videoSize = _videoTrackShown->frameSize();
//if (videoSize.isEmpty()
// || _videoTrackShown->state() != Webrtc::VideoState::Active) {
// return false;
//}
//const auto videow = videoSize.width();
//const auto videoh = videoSize.height();
//const auto resize = (videow * sizeh > videoh * sizew)
// ? QSize(videow * sizeh / videoh, sizeh)
// : QSize(sizew, videoh * sizew / videow);
//const auto request = Webrtc::FrameRequest{
// .resize = resize * cIntRetinaFactor(),
// .outer = QSize(sizew, sizeh) * cIntRetinaFactor(),
//};
//const auto frame = _videoTrackShown->frame(request);
//auto copy = frame; // #TODO calls optimize.
//copy.detach();
//if (mode == PanelMode::Default) {
// Images::prepareCircle(copy);
//} else {
// Images::prepareRound(copy, ImageRoundRadius::Large);
//}
//p.drawImage(
// QRect(QPoint(x, y), copy.size() / cIntRetinaFactor()),
// copy);
//return true;
}
std::tuple<int, int, int> MembersRow::UserpicInNarrowMode(
@ -860,40 +861,40 @@ void MembersRow::refreshStatus() {
_speaking);
}
not_null<Webrtc::VideoTrack*> MembersRow::createVideoTrack(
const std::string &endpoint) {
_videoTrackShown = nullptr;
_videoTrackEndpoint = endpoint;
_videoTrack = std::make_unique<Webrtc::VideoTrack>(
Webrtc::VideoState::Active);
setVideoTrack(_videoTrack.get());
return _videoTrack.get();
}
const std::string &MembersRow::videoTrackEndpoint() const {
return _videoTrackEndpoint;
}
void MembersRow::clearVideoTrack() {
_videoTrackLifetime.destroy();
_videoTrackEndpoint = std::string();
_videoTrackShown = nullptr;
_videoTrack = nullptr;
_delegate->rowUpdateRow(this);
}
void MembersRow::setVideoTrack(not_null<Webrtc::VideoTrack*> track) {
_videoTrackLifetime.destroy();
_videoTrackShown = track;
_videoTrackShown->renderNextFrame(
) | rpl::start_with_next([=] {
_delegate->rowUpdateRow(this);
if (_videoTrackShown->frameSize().isEmpty()) {
_videoTrackShown->markFrameShown();
}
}, _videoTrackLifetime);
_delegate->rowUpdateRow(this);
}
//not_null<Webrtc::VideoTrack*> MembersRow::createVideoTrack(
// const std::string &endpoint) {
// _videoTrackShown = nullptr;
// _videoTrackEndpoint = endpoint;
// _videoTrack = std::make_unique<Webrtc::VideoTrack>(
// Webrtc::VideoState::Active);
// setVideoTrack(_videoTrack.get());
// return _videoTrack.get();
//}
//
//const std::string &MembersRow::videoTrackEndpoint() const {
// return _videoTrackEndpoint;
//}
//
//void MembersRow::clearVideoTrack() {
// _videoTrackLifetime.destroy();
// _videoTrackEndpoint = std::string();
// _videoTrackShown = nullptr;
// _videoTrack = nullptr;
// _delegate->rowUpdateRow(this);
//}
//
//void MembersRow::setVideoTrack(not_null<Webrtc::VideoTrack*> track) {
// _videoTrackLifetime.destroy();
// _videoTrackShown = track;
// _videoTrackShown->renderNextFrame(
// ) | rpl::start_with_next([=] {
// _delegate->rowUpdateRow(this);
// if (_videoTrackShown->frameSize().isEmpty()) {
// _videoTrackShown->markFrameShown();
// }
// }, _videoTrackLifetime);
// _delegate->rowUpdateRow(this);
//}
void MembersRow::addActionRipple(QPoint point, Fn<void()> updateCallback) {
if (!_actionRipple) {

View file

@ -13,9 +13,9 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
class PeerData;
class Painter;
namespace Webrtc {
class VideoTrack;
} // namespace Webrtc
//namespace Webrtc {
//class VideoTrack;
//} // namespace Webrtc
namespace Data {
struct GroupCallParticipant;
@ -115,11 +115,11 @@ public:
return _raisedHandRating;
}
[[nodiscard]] not_null<Webrtc::VideoTrack*> createVideoTrack(
const std::string &endpoint);
void clearVideoTrack();
[[nodiscard]] const std::string &videoTrackEndpoint() const;
void setVideoTrack(not_null<Webrtc::VideoTrack*> track);
//[[nodiscard]] not_null<Webrtc::VideoTrack*> createVideoTrack(
// const std::string &endpoint);
//void clearVideoTrack();
//[[nodiscard]] const std::string &videoTrackEndpoint() const;
//void setVideoTrack(not_null<Webrtc::VideoTrack*> track);
void addActionRipple(QPoint point, Fn<void()> updateCallback) override;
void stopLastActionRipple() override;
@ -236,10 +236,10 @@ private:
std::unique_ptr<Ui::RippleAnimation> _actionRipple;
std::unique_ptr<BlobsAnimation> _blobsAnimation;
std::unique_ptr<StatusIcon> _statusIcon;
std::unique_ptr<Webrtc::VideoTrack> _videoTrack;
Webrtc::VideoTrack *_videoTrackShown = nullptr;
std::string _videoTrackEndpoint;
rpl::lifetime _videoTrackLifetime; // #TODO calls move to unique_ptr.
//std::unique_ptr<Webrtc::VideoTrack> _videoTrack;
//Webrtc::VideoTrack *_videoTrackShown = nullptr;
//std::string _videoTrackEndpoint;
//rpl::lifetime _videoTrackLifetime; // #TODO calls move to unique_ptr.
Ui::Animations::Simple _speakingAnimation; // For gray-red/green icon.
Ui::Animations::Simple _mutedAnimation; // For gray/red icon.
Ui::Animations::Simple _activeAnimation; // For icon cross animation.

View file

@ -379,6 +379,11 @@ std::unique_ptr<PeerListRow> InviteContactsController::createRow(
} // namespace
struct Panel::VideoTile {
std::unique_ptr<LargeVideo> video;
VideoEndpoint endpoint;
};
Panel::Panel(not_null<GroupCall*> call)
: _call(call)
, _peer(call->peer())
@ -978,7 +983,7 @@ void Panel::setupMembers() {
}, _callLifetime);
_call->videoEndpointPinnedValue(
) | rpl::filter([=](bool pinned) {
) | rpl::filter([=](const VideoEndpoint &pinned) {
return pinned && (_mode == PanelMode::Default);
}) | rpl::start_with_next([=] {
enlargeVideo();
@ -1072,50 +1077,190 @@ void Panel::raiseControls() {
_mute->raise();
}
void Panel::setupPinnedVideo() {
auto track = _call->videoLargeTrackValue(
) | rpl::map([=](GroupCall::LargeTrack track) {
const auto row = track ? _members->lookupRow(track.peer) : nullptr;
Assert(!track || row != nullptr);
return LargeVideoTrack{
row ? track.track : nullptr,
row
};
});
const auto visible = (_mode == PanelMode::Wide);
_pinnedVideo = std::make_unique<LargeVideo>(
widget(),
st::groupCallLargeVideoWide,
visible,
std::move(track),
_call->videoEndpointPinnedValue());
_pinnedVideo->minimizeClicks(
) | rpl::start_with_next([=] {
minimizeVideo();
}, _pinnedVideo->lifetime());
_pinnedVideo->pinToggled(
) | rpl::start_with_next([=](bool pinned) {
if (!pinned) {
_call->pinVideoEndpoint(VideoEndpoint{});
} else if (const auto &large = _call->videoEndpointLarge()) {
_call->pinVideoEndpoint(large);
void Panel::refreshTilesGeometry() {
const auto outer = _pinnedVideoWrap->size();
if (_videoTiles.empty()
|| outer.isEmpty()
|| _mode == PanelMode::Default) {
return;
}
struct Geometry {
QSize size;
QRect columns;
QRect rows;
};
auto sizes = base::flat_map<not_null<LargeVideo*>, Geometry>();
sizes.reserve(_videoTiles.size());
for (const auto &tile : _videoTiles) {
const auto video = tile.video.get();
const auto size = video->trackSize();
if (size.isEmpty()) {
video->setGeometry(0, 0, outer.width(), 0);
} else {
sizes.emplace(video, Geometry{ size });
}
}, _pinnedVideo->lifetime());
_pinnedVideo->controlsShown(
) | rpl::filter([=](float64 shown) {
return (_pinnedVideoControlsShown != shown);
}) | rpl::start_with_next([=](float64 shown) {
const auto hiding = (shown <= _pinnedVideoControlsShown);
_pinnedVideoControlsShown = shown;
if (_mode == PanelMode::Wide) {
if (hiding && _trackControlsLifetime) {
_trackControlsLifetime.destroy();
} else if (!hiding && !_trackControlsLifetime) {
trackControls();
}
if (sizes.empty()) {
return;
} else if (sizes.size() == 1) {
sizes.front().first->setGeometry(0, 0, outer.width(), outer.height());
return;
}
auto columnsBlack = uint64();
auto rowsBlack = uint64();
const auto count = int(sizes.size());
const auto skip = st::groupCallVideoLargeSkip;
const auto slices = int(std::ceil(std::sqrt(float64(count))));
{
auto index = 0;
const auto columns = slices;
const auto sizew = (outer.width() + skip) / float64(columns);
for (auto column = 0; column != columns; ++column) {
const auto left = int(std::round(column * sizew));
const auto width = int(std::round(column * sizew + sizew - skip))
- left;
const auto rows = int(std::round((count - index)
/ float64(columns - column)));
const auto sizeh = (outer.height() + skip) / float64(rows);
for (auto row = 0; row != rows; ++row) {
const auto top = int(std::round(row * sizeh));
const auto height = int(std::round(
row * sizeh + sizeh - skip)) - top;
auto &geometry = (sizes.begin() + index)->second;
geometry.columns = {
left,
top,
width,
height };
const auto scaled = geometry.size.scaled(
width,
height,
Qt::KeepAspectRatio);
columnsBlack += (scaled.width() < width)
? (width - scaled.width()) * height
: (height - scaled.height()) * width;
++index;
}
updateButtonsGeometry();
}
}, _pinnedVideo->lifetime());
}
{
auto index = 0;
const auto rows = slices;
const auto sizeh = (outer.height() + skip) / float64(rows);
for (auto row = 0; row != rows; ++row) {
const auto top = int(std::round(row * sizeh));
const auto height = int(std::round(row * sizeh + sizeh - skip))
- top;
const auto columns = int(std::round((count - index)
/ float64(rows - row)));
const auto sizew = (outer.width() + skip) / float64(columns);
for (auto column = 0; column != columns; ++column) {
const auto left = int(std::round(column * sizew));
const auto width = int(std::round(
column * sizew + sizew - skip)) - left;
auto &geometry = (sizes.begin() + index)->second;
geometry.rows = {
left,
top,
width,
height };
const auto scaled = geometry.size.scaled(
width,
height,
Qt::KeepAspectRatio);
rowsBlack += (scaled.width() < width)
? (width - scaled.width()) * height
: (height - scaled.height()) * width;
++index;
}
}
}
for (const auto &[video, geometry] : sizes) {
const auto &rect = (columnsBlack < rowsBlack)
? geometry.columns
: geometry.rows;
video->setGeometry(rect.x(), rect.y(), rect.width(), rect.height());
}
}
void Panel::setupPinnedVideo() {
using namespace rpl::mappers;
_pinnedVideoWrap = std::make_unique<Ui::RpWidget>(widget());
const auto setupTile = [=](
const VideoEndpoint &endpoint,
const GroupCall::VideoTrack &track) {
const auto row = _members->lookupRow(track.peer);
Assert(row != nullptr);
auto video = std::make_unique<LargeVideo>(
_pinnedVideoWrap.get(),
st::groupCallLargeVideoNarrow,
(_mode == PanelMode::Wide),
rpl::single(LargeVideoTrack{ track.track.get(), row }),
_call->videoEndpointPinnedValue() | rpl::map(_1 == endpoint));
video->pinToggled(
) | rpl::start_with_next([=](bool pinned) {
_call->pinVideoEndpoint(pinned ? endpoint : VideoEndpoint{});
}, video->lifetime());
video->requestedQuality(
) | rpl::start_with_next([=](VideoQuality quality) {
_call->requestVideoQuality(endpoint, quality);
}, video->lifetime());
video->minimizeClicks(
) | rpl::start_with_next([=] {
minimizeVideo();
}, video->lifetime());
video->trackSizeValue(
) | rpl::start_with_next([=] {
refreshTilesGeometry();
}, video->lifetime());
video->controlsShown(
) | rpl::filter([=](float64 shown) {
return (_pinnedVideoControlsShown != shown);
}) | rpl::start_with_next([=](float64 shown) {
const auto hiding = (shown <= _pinnedVideoControlsShown);
_pinnedVideoControlsShown = shown;
if (_mode == PanelMode::Wide) {
if (hiding && _trackControlsLifetime) {
_trackControlsLifetime.destroy();
} else if (!hiding && !_trackControlsLifetime) {
trackControls();
}
updateButtonsGeometry();
}
}, video->lifetime());
return VideoTile{
.video = std::move(video),
.endpoint = endpoint,
};
};
for (const auto &[endpoint, track] : _call->activeVideoTracks()) {
_videoTiles.push_back(setupTile(endpoint, track));
}
_call->videoStreamActiveUpdates(
) | rpl::start_with_next([=](const VideoEndpoint &endpoint) {
const auto &tracks = _call->activeVideoTracks();
const auto i = tracks.find(endpoint);
if (i != end(tracks)) {
_videoTiles.push_back(setupTile(endpoint, i->second));
} else {
_videoTiles.erase(
ranges::remove(_videoTiles, endpoint, &VideoTile::endpoint),
end(_videoTiles));
refreshTilesGeometry();
}
}, _pinnedVideoWrap->lifetime());
_pinnedVideoWrap->sizeValue() | rpl::start_with_next([=] {
refreshTilesGeometry();
}, _pinnedVideoWrap->lifetime());
raiseControls();
}
@ -1626,8 +1771,11 @@ bool Panel::updateMode() {
if (_members) {
_members->setMode(mode);
}
if (_pinnedVideo) {
_pinnedVideo->setVisible(mode == PanelMode::Wide);
if (_pinnedVideoWrap) {
_pinnedVideoWrap->setVisible(mode == PanelMode::Wide);
for (const auto &tile : _videoTiles) {
tile.video->setVisible(mode == PanelMode::Wide);
}
}
refreshControlsBackground();
updateControlsGeometry();
@ -1669,11 +1817,11 @@ void Panel::trackControls() {
if (widget) {
widget->events(
) | rpl::start_with_next([=](not_null<QEvent*> e) {
if (e->type() == QEvent::Enter) {
_pinnedVideo->setControlsShown(true);
} else if (e->type() == QEvent::Leave) {
_pinnedVideo->setControlsShown(false);
}
//if (e->type() == QEvent::Enter) {
// _pinnedVideo->setControlsShown(true);
//} else if (e->type() == QEvent::Leave) {
// _pinnedVideo->setControlsShown(false);
//}
}, _trackControlsLifetime);
}
};
@ -1834,7 +1982,7 @@ void Panel::updateMembersGeometry() {
top,
membersWidth,
std::min(desiredHeight, widget()->height()));
_pinnedVideo->setGeometry(
_pinnedVideoWrap->setGeometry(
membersWidth,
top,
widget()->width() - membersWidth - skip,

View file

@ -70,6 +70,7 @@ public:
private:
using State = GroupCall::State;
struct VideoTile;
[[nodiscard]] not_null<Ui::RpWidget*> widget() const;
@ -103,6 +104,7 @@ private:
void refreshControlsBackground();
void showControls();
void refreshLeftButton();
void refreshTilesGeometry();
void endCall();
@ -146,8 +148,9 @@ private:
object_ptr<Ui::DropdownMenu> _menu = { nullptr };
object_ptr<Ui::AbstractButton> _joinAsToggle = { nullptr };
object_ptr<Members> _members = { nullptr };
std::unique_ptr<LargeVideo> _pinnedVideo;
std::unique_ptr<Ui::RpWidget> _pinnedVideoWrap;
float64 _pinnedVideoControlsShown = 1.;
std::vector<VideoTile> _videoTiles;
rpl::lifetime _trackControlsLifetime;
object_ptr<Ui::FlatLabel> _startsIn = { nullptr };
object_ptr<Ui::RpWidget> _countdown = { nullptr };