From 0dca5568436e97ad94ef5fa91934c47c4da3b643 Mon Sep 17 00:00:00 2001 From: John Preston Date: Tue, 22 Mar 2022 17:56:43 +0400 Subject: [PATCH] Use FFmpeg hardware acceleration in media viewer / PiP. --- .../SourceFiles/ffmpeg/ffmpeg_utility.cpp | 107 +++++++++++++++++- Telegram/SourceFiles/ffmpeg/ffmpeg_utility.h | 7 +- .../streaming/media_streaming_audio_track.cpp | 12 +- .../media/streaming/media_streaming_common.h | 1 + .../media/streaming/media_streaming_file.cpp | 57 +++++++--- .../media/streaming/media_streaming_file.h | 10 +- .../streaming/media_streaming_player.cpp | 2 +- .../streaming/media_streaming_utility.cpp | 37 ++++-- .../media/streaming/media_streaming_utility.h | 9 +- .../streaming/media_streaming_video_track.cpp | 76 ++++++++++--- .../streaming/media_streaming_video_track.h | 1 + .../media/view/media_view_overlay_widget.cpp | 1 + .../SourceFiles/media/view/media_view_pip.cpp | 1 + Telegram/build/prepare/prepare.py | 8 +- 14 files changed, 272 insertions(+), 57 deletions(-) diff --git a/Telegram/SourceFiles/ffmpeg/ffmpeg_utility.cpp b/Telegram/SourceFiles/ffmpeg/ffmpeg_utility.cpp index a01b9fe61..513231ffa 100644 --- a/Telegram/SourceFiles/ffmpeg/ffmpeg_utility.cpp +++ b/Telegram/SourceFiles/ffmpeg/ffmpeg_utility.cpp @@ -31,6 +31,15 @@ constexpr auto kAvioBlockSize = 4096; constexpr auto kTimeUnknown = std::numeric_limits::min(); constexpr auto kDurationMax = crl::time(std::numeric_limits::max()); +using GetFormatMethod = enum AVPixelFormat(*)( + struct AVCodecContext *s, + const enum AVPixelFormat *fmt); + +struct HwAccelDescriptor { + GetFormatMethod getFormat = nullptr; + AVPixelFormat format = AV_PIX_FMT_NONE; +}; + void AlignedImageBufferCleanupHandler(void* data) { const auto buffer = static_cast(data); delete[] buffer; @@ -76,6 +85,69 @@ void PremultiplyLine(uchar *dst, const uchar *src, int intsCount) { #endif // LIB_FFMPEG_USE_QT_PRIVATE_API } +template +enum AVPixelFormat GetFormatImplementation( + AVCodecContext *ctx, + const enum AVPixelFormat *pix_fmts) { + const enum AVPixelFormat *p = nullptr; + for (p = pix_fmts; *p != -1; p++) { + if (*p == Required) { + return *p; + } + } + return AV_PIX_FMT_NONE; +} + +template +[[nodiscard]] HwAccelDescriptor HwAccelByFormat() { + return { + .getFormat = GetFormatImplementation, + .format = Format, + }; +} + +[[nodiscard]] HwAccelDescriptor ResolveHwAccel( + not_null decoder, + AVHWDeviceType type) { + Expects(type != AV_HWDEVICE_TYPE_NONE); + + const auto format = [&] { + for (auto i = 0;; i++) { + const auto config = avcodec_get_hw_config(decoder, i); + if (!config) { + break; + } else if (config->device_type == type + && (config->methods + & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX)) { + return config->pix_fmt; + } + } + return AV_PIX_FMT_NONE; + }(); + + switch (format) { +#ifdef Q_OS_WIN + case AV_PIX_FMT_D3D11: + return HwAccelByFormat(); + case AV_PIX_FMT_DXVA2_VLD: + return HwAccelByFormat(); + case AV_PIX_FMT_D3D11VA_VLD: + return HwAccelByFormat(); +#elif defined Q_OS_MAC // Q_OS_WIN + case AV_PIX_FMT_VIDEOTOOLBOX: + return HwAccelByFormat(); +#else // Q_OS_WIN || Q_OS_MAC + case AV_PIX_FMT_VAAPI: + return HwAccelByFormat(); + case AV_PIX_FMT_VDPAU: + return HwAccelByFormat(); +#endif // Q_OS_WIN || Q_OS_MAC + case AV_PIX_FMT_CUDA: + return HwAccelByFormat(); + } + return {}; +} + } // namespace IOPointer MakeIOPointer( @@ -161,7 +233,7 @@ const AVCodec *FindDecoder(not_null context) { : avcodec_find_decoder(context->codec_id); } -CodecPointer MakeCodecPointer(not_null stream) { +CodecPointer MakeCodecPointer(CodecDescriptor descriptor) { auto error = AvErrorWrap(); auto result = CodecPointer(avcodec_alloc_context3(nullptr)); @@ -170,6 +242,7 @@ CodecPointer MakeCodecPointer(not_null stream) { LogError(qstr("avcodec_alloc_context3")); return {}; } + const auto stream = descriptor.stream; error = avcodec_parameters_to_context(context, stream->codecpar); if (error) { LogError(qstr("avcodec_parameters_to_context"), error); @@ -183,7 +256,37 @@ CodecPointer MakeCodecPointer(not_null stream) { if (!codec) { LogError(qstr("avcodec_find_decoder"), context->codec_id); return {}; - } else if ((error = avcodec_open2(context, codec, nullptr))) { + } + + if (descriptor.type != AV_HWDEVICE_TYPE_NONE) { + const auto hw = ResolveHwAccel(codec, descriptor.type); + if (!hw.getFormat) { + return {}; + } + context->get_format = hw.getFormat; + auto hwDeviceContext = (AVBufferRef*)nullptr; + error = av_hwdevice_ctx_create( + &hwDeviceContext, + descriptor.type, + nullptr, + nullptr, + 0); + if (error || !hwDeviceContext) { + LogError(qstr("av_hwdevice_ctx_create"), error); + return {}; + } + DEBUG_LOG(("Video Info: " + "Using \"%1\" hardware acceleration for \"%2\" decoder." + ).arg(av_hwdevice_get_type_name(descriptor.type) + ).arg(codec->name)); + context->hw_device_ctx = av_buffer_ref(hwDeviceContext); + av_buffer_unref(&hwDeviceContext); + } else { + DEBUG_LOG(("Video Info: Using software \"%2\" decoder." + ).arg(codec->name)); + } + + if ((error = avcodec_open2(context, codec, nullptr))) { LogError(qstr("avcodec_open2"), error); return {}; } diff --git a/Telegram/SourceFiles/ffmpeg/ffmpeg_utility.h b/Telegram/SourceFiles/ffmpeg/ffmpeg_utility.h index 6f502dcaa..1a117961d 100644 --- a/Telegram/SourceFiles/ffmpeg/ffmpeg_utility.h +++ b/Telegram/SourceFiles/ffmpeg/ffmpeg_utility.h @@ -125,7 +125,12 @@ struct CodecDeleter { void operator()(AVCodecContext *value); }; using CodecPointer = std::unique_ptr; -[[nodiscard]] CodecPointer MakeCodecPointer(not_null stream); + +struct CodecDescriptor { + not_null stream; + AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE; +}; +[[nodiscard]] CodecPointer MakeCodecPointer(CodecDescriptor descriptor); struct FrameDeleter { void operator()(AVFrame *value); diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_audio_track.cpp b/Telegram/SourceFiles/media/streaming/media_streaming_audio_track.cpp index dc573c06c..7b538ca24 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_audio_track.cpp +++ b/Telegram/SourceFiles/media/streaming/media_streaming_audio_track.cpp @@ -86,7 +86,7 @@ bool AudioTrack::tryReadFirstFrame(FFmpeg::Packet &&packet) { return false; } // Return the last valid frame if we seek too far. - _stream.frame = std::move(_initialSkippingFrame); + _stream.decodedFrame = std::move(_initialSkippingFrame); return processFirstFrame(); } else if (error.code() != AVERROR(EAGAIN) || _readTillEnd) { return false; @@ -102,15 +102,15 @@ bool AudioTrack::tryReadFirstFrame(FFmpeg::Packet &&packet) { // Seek was with AVSEEK_FLAG_BACKWARD so first we get old frames. // Try skipping frames until one is after the requested position. - std::swap(_initialSkippingFrame, _stream.frame); - if (!_stream.frame) { - _stream.frame = FFmpeg::MakeFramePointer(); + std::swap(_initialSkippingFrame, _stream.decodedFrame); + if (!_stream.decodedFrame) { + _stream.decodedFrame = FFmpeg::MakeFramePointer(); } } } bool AudioTrack::processFirstFrame() { - if (!FFmpeg::FrameHasData(_stream.frame.get())) { + if (!FFmpeg::FrameHasData(_stream.decodedFrame.get())) { return false; } mixerInit(); @@ -131,7 +131,7 @@ void AudioTrack::mixerInit() { Expects(!initialized()); auto data = std::make_unique(); - data->frame = std::move(_stream.frame); + data->frame = std::move(_stream.decodedFrame); data->codec = std::move(_stream.codec); data->frequency = _stream.frequency; data->length = (_stream.duration * data->frequency) / 1000LL; diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_common.h b/Telegram/SourceFiles/media/streaming/media_streaming_common.h index 752a737a8..997445783 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_common.h +++ b/Telegram/SourceFiles/media/streaming/media_streaming_common.h @@ -45,6 +45,7 @@ struct PlaybackOptions { AudioMsgId audioId; bool syncVideoByAudio = true; bool waitForMarkAsShown = false; + bool hwAllow = false; bool loop = false; }; diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_file.cpp b/Telegram/SourceFiles/media/streaming/media_streaming_file.cpp index 3d2658c88..403f13001 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_file.cpp +++ b/Telegram/SourceFiles/media/streaming/media_streaming_file.cpp @@ -148,7 +148,8 @@ void File::Context::logFatal( Stream File::Context::initStream( not_null format, AVMediaType type, - Mode mode) { + Mode mode, + bool hwAllowed) { auto result = Stream(); const auto index = result.index = av_find_best_stream( format, @@ -158,31 +159,56 @@ Stream File::Context::initStream( nullptr, 0); if (index < 0) { - return result; + return {}; } const auto info = format->streams[index]; + const auto tryCreateCodec = [&](AVHWDeviceType type) { + result.codec = FFmpeg::MakeCodecPointer({ + .stream = info, + .type = type, + }); + return (result.codec != nullptr); + }; if (type == AVMEDIA_TYPE_VIDEO) { if (info->disposition & AV_DISPOSITION_ATTACHED_PIC) { // ignore cover streams return Stream(); } + const auto hwAccelTypes = std::array{ +#ifdef Q_OS_WIN + AV_HWDEVICE_TYPE_D3D11VA, + AV_HWDEVICE_TYPE_DXVA2, +#elif defined Q_OS_MAC // Q_OS_WIN + AV_HWDEVICE_TYPE_VIDEOTOOLBOX, +#else // Q_OS_WIN || Q_OS_MAC + AV_HWDEVICE_TYPE_VAAPI, + AV_HWDEVICE_TYPE_VDPAU, +#endif // Q_OS_WIN || Q_OS_MAC + AV_HWDEVICE_TYPE_CUDA, + AV_HWDEVICE_TYPE_NONE, + }; + for (const auto type : hwAccelTypes) { + if (tryCreateCodec(type)) { + break; + } + } + if (!result.codec) { + return result; + } result.rotation = FFmpeg::ReadRotationFromMetadata(info); result.aspect = FFmpeg::ValidateAspectRatio(info->sample_aspect_ratio); } else if (type == AVMEDIA_TYPE_AUDIO) { result.frequency = info->codecpar->sample_rate; if (!result.frequency) { return result; + } else if (!tryCreateCodec(AV_HWDEVICE_TYPE_NONE)) { + return result; } } - result.codec = FFmpeg::MakeCodecPointer(info); - if (!result.codec) { - return result; - } - - result.frame = FFmpeg::MakeFramePointer(); - if (!result.frame) { + result.decodedFrame = FFmpeg::MakeFramePointer(); + if (!result.decodedFrame) { result.codec = nullptr; return result; } @@ -260,7 +286,7 @@ std::variant File::Context::readPacket() { return error; } -void File::Context::start(crl::time position) { +void File::Context::start(crl::time position, bool hwAllow) { auto error = FFmpeg::AvErrorWrap(); if (unroll()) { @@ -280,12 +306,12 @@ void File::Context::start(crl::time position) { } const auto mode = _delegate->fileOpenMode(); - auto video = initStream(format.get(), AVMEDIA_TYPE_VIDEO, mode); + auto video = initStream(format.get(), AVMEDIA_TYPE_VIDEO, mode, hwAllow); if (unroll()) { return; } - auto audio = initStream(format.get(), AVMEDIA_TYPE_AUDIO, mode); + auto audio = initStream(format.get(), AVMEDIA_TYPE_AUDIO, mode, false); if (unroll()) { return; } @@ -425,7 +451,10 @@ File::File(std::shared_ptr reader) : _reader(std::move(reader)) { } -void File::start(not_null delegate, crl::time position) { +void File::start( + not_null delegate, + crl::time position, + bool hwAllow) { stop(true); _reader->startStreaming(); @@ -433,7 +462,7 @@ void File::start(not_null delegate, crl::time position) { _thread = std::thread([=, context = &*_context] { crl::toggle_fp_exceptions(true); - context->start(position); + context->start(position, hwAllow); while (!context->finished()) { context->readNextPacket(); } diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_file.h b/Telegram/SourceFiles/media/streaming/media_streaming_file.h index 8b75bf86d..e72da47ee 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_file.h +++ b/Telegram/SourceFiles/media/streaming/media_streaming_file.h @@ -28,7 +28,10 @@ public: File(const File &other) = delete; File &operator=(const File &other) = delete; - void start(not_null delegate, crl::time position); + void start( + not_null delegate, + crl::time position, + bool hwAllow); void wake(); void stop(bool stillActive = false); @@ -43,7 +46,7 @@ private: Context(not_null delegate, not_null reader); ~Context(); - void start(crl::time position); + void start(crl::time position, bool hwAllow); void readNextPacket(); void interrupt(); @@ -75,7 +78,8 @@ private: [[nodiscard]] Stream initStream( not_null format, AVMediaType type, - Mode mode); + Mode mode, + bool hwAllowed); void seekToPosition( not_null format, const Stream &stream, diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_player.cpp b/Telegram/SourceFiles/media/streaming/media_streaming_player.cpp index 3a60b39a0..0dce3142f 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_player.cpp +++ b/Telegram/SourceFiles/media/streaming/media_streaming_player.cpp @@ -544,7 +544,7 @@ void Player::play(const PlaybackOptions &options) { _options.speed = 1.; } _stage = Stage::Initializing; - _file->start(delegate(), _options.position); + _file->start(delegate(), _options.position, _options.hwAllow); } void Player::savePreviousReceivedTill( diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_utility.cpp b/Telegram/SourceFiles/media/streaming/media_streaming_utility.cpp index e33c4ffba..9a9ec5082 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_utility.cpp +++ b/Telegram/SourceFiles/media/streaming/media_streaming_utility.cpp @@ -20,13 +20,13 @@ constexpr auto kSkipInvalidDataPackets = 10; } // namespace crl::time FramePosition(const Stream &stream) { - const auto pts = !stream.frame + const auto pts = !stream.decodedFrame ? AV_NOPTS_VALUE - : (stream.frame->best_effort_timestamp != AV_NOPTS_VALUE) - ? stream.frame->best_effort_timestamp - : (stream.frame->pts != AV_NOPTS_VALUE) - ? stream.frame->pts - : stream.frame->pkt_dts; + : (stream.decodedFrame->best_effort_timestamp != AV_NOPTS_VALUE) + ? stream.decodedFrame->best_effort_timestamp + : (stream.decodedFrame->pts != AV_NOPTS_VALUE) + ? stream.decodedFrame->pts + : stream.decodedFrame->pkt_dts; return FFmpeg::PtsToTime(pts, stream.timeBase); } @@ -66,14 +66,14 @@ FFmpeg::AvErrorWrap ProcessPacket(Stream &stream, FFmpeg::Packet &&packet) { } FFmpeg::AvErrorWrap ReadNextFrame(Stream &stream) { - Expects(stream.frame != nullptr); + Expects(stream.decodedFrame != nullptr); auto error = FFmpeg::AvErrorWrap(); do { error = avcodec_receive_frame( stream.codec.get(), - stream.frame.get()); + stream.decodedFrame.get()); if (!error || error.code() != AVERROR(EAGAIN) || stream.queue.empty()) { @@ -108,13 +108,27 @@ bool GoodForRequest( && (request.resize == image.size()); } +bool TransferFrame( + Stream &stream, + not_null decodedFrame, + not_null transferredFrame) { + Expects(decodedFrame->hw_frames_ctx != nullptr); + + const auto error = FFmpeg::AvErrorWrap( + av_hwframe_transfer_data(transferredFrame, decodedFrame, 0)); + if (error) { + LogError(qstr("av_hwframe_transfer_data"), error); + return false; + } + FFmpeg::ClearFrameMemory(decodedFrame); + return true; +} + QImage ConvertFrame( Stream &stream, - AVFrame *frame, + not_null frame, QSize resize, QImage storage) { - Expects(frame != nullptr); - const auto frameSize = QSize(frame->width, frame->height); if (frameSize.isEmpty()) { LOG(("Streaming Error: Bad frame size %1,%2" @@ -134,6 +148,7 @@ QImage ConvertFrame( if (!FFmpeg::GoodStorageForFrame(storage, resize)) { storage = FFmpeg::CreateFrameStorage(resize); } + const auto format = AV_PIX_FMT_BGRA; const auto hasDesiredFormat = (frame->format == format); if (frameSize == storage.size() && hasDesiredFormat) { diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_utility.h b/Telegram/SourceFiles/media/streaming/media_streaming_utility.h index b581fee3e..144ae103f 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_utility.h +++ b/Telegram/SourceFiles/media/streaming/media_streaming_utility.h @@ -30,7 +30,8 @@ struct Stream { crl::time duration = kTimeUnknown; AVRational timeBase = FFmpeg::kUniversalTimeBase; FFmpeg::CodecPointer codec; - FFmpeg::FramePointer frame; + FFmpeg::FramePointer decodedFrame; + FFmpeg::FramePointer transferredFrame; std::deque queue; int invalidDataPackets = 0; @@ -54,9 +55,13 @@ struct Stream { bool hasAlpha, int rotation, const FrameRequest &request); +[[nodiscard]] bool TransferFrame( + Stream &stream, + not_null decodedFrame, + not_null transferredFrame); [[nodiscard]] QImage ConvertFrame( Stream &stream, - AVFrame *frame, + not_null frame, QSize resize, QImage storage); [[nodiscard]] FrameYUV420 ExtractYUV420(Stream &stream, AVFrame *frame); diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_video_track.cpp b/Telegram/SourceFiles/media/streaming/media_streaming_video_track.cpp index 80d69d33e..b0f69e579 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_video_track.cpp +++ b/Telegram/SourceFiles/media/streaming/media_streaming_video_track.cpp @@ -11,6 +11,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL #include "media/audio/media_audio.h" #include "base/concurrent_timer.h" #include "core/crash_reports.h" +#include "base/debug_log.h" namespace Media { namespace Streaming { @@ -373,7 +374,8 @@ auto VideoTrackObject::readFrame(not_null frame) -> FrameResult { fail(Error::InvalidData); return FrameResult::Error; } - std::swap(frame->decoded, _stream.frame); + std::swap(frame->decoded, _stream.decodedFrame); + std::swap(frame->transferred, _stream.transferredFrame); frame->index = _frameIndex++; frame->position = position; frame->displayed = kTimeUnknown; @@ -427,9 +429,28 @@ void VideoTrackObject::rasterizeFrame(not_null frame) { fillRequests(frame); frame->format = FrameFormat::None; - if (frame->decoded->format == AV_PIX_FMT_YUV420P && !requireARGB32()) { + if (frame->decoded->hw_frames_ctx) { + if (!frame->transferred) { + frame->transferred = FFmpeg::MakeFramePointer(); + } + const auto success = TransferFrame( + _stream, + frame->decoded.get(), + frame->transferred.get()); + if (!success) { + frame->prepared.clear(); + fail(Error::InvalidData); + return; + } + } else { + frame->transferred = nullptr; + } + const auto frameWithData = frame->transferred + ? frame->transferred.get() + : frame->decoded.get(); + if (frameWithData->format == AV_PIX_FMT_YUV420P && !requireARGB32()) { frame->alpha = false; - frame->yuv420 = ExtractYUV420(_stream, frame->decoded.get()); + frame->yuv420 = ExtractYUV420(_stream, frameWithData); if (frame->yuv420.size.isEmpty() || frame->yuv420.chromaSize.isEmpty() || !frame->yuv420.y.data @@ -447,15 +468,15 @@ void VideoTrackObject::rasterizeFrame(not_null frame) { } frame->format = FrameFormat::YUV420; } else { - frame->alpha = (frame->decoded->format == AV_PIX_FMT_BGRA) - || (frame->decoded->format == AV_PIX_FMT_YUVA420P); + frame->alpha = (frameWithData->format == AV_PIX_FMT_BGRA) + || (frameWithData->format == AV_PIX_FMT_YUVA420P); frame->yuv420.size = { - frame->decoded->width, - frame->decoded->height + frameWithData->width, + frameWithData->height }; frame->original = ConvertFrame( _stream, - frame->decoded.get(), + frameWithData, chooseOriginalResize(), std::move(frame->original)); if (frame->original.isNull()) { @@ -587,7 +608,7 @@ bool VideoTrackObject::tryReadFirstFrame(FFmpeg::Packet &&packet) { return false; } // Return the last valid frame if we seek too far. - _stream.frame = std::move(_initialSkippingFrame); + _stream.decodedFrame = std::move(_initialSkippingFrame); return processFirstFrame(); } else if (error.code() != AVERROR(EAGAIN) || _readTillEnd) { return false; @@ -603,22 +624,45 @@ bool VideoTrackObject::tryReadFirstFrame(FFmpeg::Packet &&packet) { // Seek was with AVSEEK_FLAG_BACKWARD so first we get old frames. // Try skipping frames until one is after the requested position. - std::swap(_initialSkippingFrame, _stream.frame); - if (!_stream.frame) { - _stream.frame = FFmpeg::MakeFramePointer(); + std::swap(_initialSkippingFrame, _stream.decodedFrame); + if (!_stream.decodedFrame) { + _stream.decodedFrame = FFmpeg::MakeFramePointer(); } } } bool VideoTrackObject::processFirstFrame() { - if (_stream.frame->width * _stream.frame->height > kMaxFrameArea) { + const auto decodedFrame = _stream.decodedFrame.get(); + if (decodedFrame->width * decodedFrame->height > kMaxFrameArea) { return false; + } else if (decodedFrame->hw_frames_ctx) { + if (!_stream.transferredFrame) { + _stream.transferredFrame = FFmpeg::MakeFramePointer(); + } + const auto success = TransferFrame( + _stream, + decodedFrame, + _stream.transferredFrame.get()); + if (!success) { + LOG(("Video Error: Failed accelerated decoding from format %1." + ).arg(int(decodedFrame->format))); + return false; + } + DEBUG_LOG(("Video Info: " + "Using accelerated decoding from format %1 to format %2." + ).arg(int(decodedFrame->format) + ).arg(int(_stream.transferredFrame->format))); + } else { + _stream.transferredFrame = nullptr; } - const auto alpha = (_stream.frame->format == AV_PIX_FMT_BGRA) - || (_stream.frame->format == AV_PIX_FMT_YUVA420P); + const auto frameWithData = _stream.transferredFrame + ? _stream.transferredFrame.get() + : decodedFrame; + const auto alpha = (frameWithData->format == AV_PIX_FMT_BGRA) + || (frameWithData->format == AV_PIX_FMT_YUVA420P); auto frame = ConvertFrame( _stream, - _stream.frame.get(), + frameWithData, QSize(), QImage()); if (frame.isNull()) { diff --git a/Telegram/SourceFiles/media/streaming/media_streaming_video_track.h b/Telegram/SourceFiles/media/streaming/media_streaming_video_track.h index 98a5fcfed..be3b14d72 100644 --- a/Telegram/SourceFiles/media/streaming/media_streaming_video_track.h +++ b/Telegram/SourceFiles/media/streaming/media_streaming_video_track.h @@ -82,6 +82,7 @@ private: }; struct Frame { FFmpeg::FramePointer decoded = FFmpeg::MakeFramePointer(); + FFmpeg::FramePointer transferred; QImage original; FrameYUV420 yuv420; crl::time position = kTimeUnknown; diff --git a/Telegram/SourceFiles/media/view/media_view_overlay_widget.cpp b/Telegram/SourceFiles/media/view/media_view_overlay_widget.cpp index af526be33..6eac88669 100644 --- a/Telegram/SourceFiles/media/view/media_view_overlay_widget.cpp +++ b/Telegram/SourceFiles/media/view/media_view_overlay_widget.cpp @@ -3124,6 +3124,7 @@ void OverlayWidget::restartAtSeekPosition(crl::time position) { } auto options = Streaming::PlaybackOptions(); options.position = position; + options.hwAllow = true; if (!_streamed->withSound) { options.mode = Streaming::Mode::Video; options.loop = true; diff --git a/Telegram/SourceFiles/media/view/media_view_pip.cpp b/Telegram/SourceFiles/media/view/media_view_pip.cpp index c0739a080..8c1e963de 100644 --- a/Telegram/SourceFiles/media/view/media_view_pip.cpp +++ b/Telegram/SourceFiles/media/view/media_view_pip.cpp @@ -1604,6 +1604,7 @@ void Pip::restartAtSeekPosition(crl::time position) { auto options = Streaming::PlaybackOptions(); options.position = position; + options.hwAllow = true; options.audioId = _instance.player().prepareLegacyState().id; Assert(8 && _delegate->pipPlaybackSpeed() >= 0.5 diff --git a/Telegram/build/prepare/prepare.py b/Telegram/build/prepare/prepare.py index a7e33c54c..3990d6742 100644 --- a/Telegram/build/prepare/prepare.py +++ b/Telegram/build/prepare/prepare.py @@ -400,7 +400,7 @@ if customRunCommand: stage('patches', """ git clone https://github.com/desktop-app/patches.git cd patches - git checkout b0ae34e08f + git checkout 0947a28160 """) stage('depot_tools', """ @@ -685,6 +685,12 @@ depends:yasm/yasm make install """) +stage('nv-codec-headers', """ + git clone https://github.com/FFmpeg/nv-codec-headers.git + cd nv-codec-headers + git checkout n11.1.5.1 +""") + stage('ffmpeg', """ git clone https://github.com/FFmpeg/FFmpeg.git ffmpeg cd ffmpeg