Upgrade FFmpeg to 5.1 on macOS.

In this version videos play fine with hardware acceleration enabled.
This commit is contained in:
John Preston 2023-02-20 13:45:37 +04:00
parent 4ca6834e03
commit 7a090610b9
6 changed files with 150 additions and 54 deletions

View file

@ -19,8 +19,11 @@ extern "C" {
#include <libavcodec/avcodec.h> #include <libavcodec/avcodec.h>
#include <libavformat/avformat.h> #include <libavformat/avformat.h>
#include <libswscale/swscale.h> #include <libswscale/swscale.h>
#include <libavutil/version.h>
} // extern "C" } // extern "C"
#define DA_FFMPEG_NEW_CHANNEL_LAYOUT (LIBAVUTIL_VERSION_MAJOR >= 59)
class QImage; class QImage;
namespace FFmpeg { namespace FFmpeg {

View file

@ -153,6 +153,7 @@ struct Instance::Inner::Private {
AVStream *stream = nullptr; AVStream *stream = nullptr;
const AVCodec *codec = nullptr; const AVCodec *codec = nullptr;
AVCodecContext *codecContext = nullptr; AVCodecContext *codecContext = nullptr;
int channels = 0;
bool opened = false; bool opened = false;
bool processing = false; bool processing = false;
@ -310,9 +311,14 @@ void Instance::Inner::start(Fn<void(Update)> updated, Fn<void()> error) {
d->codecContext->sample_fmt = AV_SAMPLE_FMT_FLTP; d->codecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;
d->codecContext->bit_rate = 32000; d->codecContext->bit_rate = 32000;
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
d->codecContext->ch_layout = AV_CHANNEL_LAYOUT_MONO;
d->channels = d->codecContext->ch_layout.nb_channels;
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
d->codecContext->channel_layout = AV_CH_LAYOUT_MONO; d->codecContext->channel_layout = AV_CH_LAYOUT_MONO;
d->channels = d->codecContext->channels = 1;
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
d->codecContext->sample_rate = kCaptureFrequency; d->codecContext->sample_rate = kCaptureFrequency;
d->codecContext->channels = 1;
if (d->fmtContext->oformat->flags & AVFMT_GLOBALHEADER) { if (d->fmtContext->oformat->flags & AVFMT_GLOBALHEADER) {
d->codecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; d->codecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
@ -337,32 +343,47 @@ void Instance::Inner::start(Fn<void(Update)> updated, Fn<void()> error) {
// Using _captured directly // Using _captured directly
// Prepare resampling // Prepare resampling
d->swrContext = swr_alloc(); #if DA_FFMPEG_NEW_CHANNEL_LAYOUT
if (!d->swrContext) { res = swr_alloc_set_opts2(
fprintf(stderr, "Could not allocate resampler context\n"); &d->swrContext,
exit(1); &d->codecContext->ch_layout,
} d->codecContext->sample_fmt,
d->codecContext->sample_rate,
av_opt_set_int(d->swrContext, "in_channel_count", d->codecContext->channels, 0); &d->codecContext->ch_layout,
av_opt_set_int(d->swrContext, "in_sample_rate", d->codecContext->sample_rate, 0); AV_SAMPLE_FMT_S16,
av_opt_set_sample_fmt(d->swrContext, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); d->codecContext->sample_rate,
av_opt_set_int(d->swrContext, "out_channel_count", d->codecContext->channels, 0); 0,
av_opt_set_int(d->swrContext, "out_sample_rate", d->codecContext->sample_rate, 0); nullptr);
av_opt_set_sample_fmt(d->swrContext, "out_sample_fmt", d->codecContext->sample_fmt, 0); #else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
d->swrContext = swr_alloc_set_opts(
if ((res = swr_init(d->swrContext)) < 0) { d->swrContext,
d->codecContext->channel_layout,
d->codecContext->sample_fmt,
d->codecContext->sample_rate,
d->codecContext->channel_layout,
AV_SAMPLE_FMT_S16,
d->codecContext->sample_rate,
0,
nullptr);
res = 0;
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
if (res < 0 || !d->swrContext) {
LOG(("Audio Error: Unable to swr_alloc_set_opts2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
fail();
return;
} else if ((res = swr_init(d->swrContext)) < 0) {
LOG(("Audio Error: Unable to swr_init for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); LOG(("Audio Error: Unable to swr_init for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
fail(); fail();
return; return;
} }
d->maxDstSamples = d->srcSamples; d->maxDstSamples = d->srcSamples;
if ((res = av_samples_alloc_array_and_samples(&d->dstSamplesData, 0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0)) < 0) { if ((res = av_samples_alloc_array_and_samples(&d->dstSamplesData, 0, d->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0)) < 0) {
LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
fail(); fail();
return; return;
} }
d->dstSamplesSize = av_samples_get_buffer_size(0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0); d->dstSamplesSize = av_samples_get_buffer_size(0, d->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0);
if ((res = avcodec_parameters_from_context(d->stream->codecpar, d->codecContext)) < 0) { if ((res = avcodec_parameters_from_context(d->stream->codecpar, d->codecContext)) < 0) {
LOG(("Audio Error: Unable to avcodec_parameters_from_context for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); LOG(("Audio Error: Unable to avcodec_parameters_from_context for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
@ -425,7 +446,7 @@ void Instance::Inner::stop(Fn<void(Result&&)> callback) {
memset(_captured.data() + s, 0, _captured.size() - s); memset(_captured.data() + s, 0, _captured.size() - s);
} }
int32 framesize = d->srcSamples * d->codecContext->channels * sizeof(short), encoded = 0; int32 framesize = d->srcSamples * d->channels * sizeof(short), encoded = 0;
while (_captured.size() >= encoded + framesize) { while (_captured.size() >= encoded + framesize) {
if (!processFrame(encoded, framesize)) { if (!processFrame(encoded, framesize)) {
break; break;
@ -596,7 +617,7 @@ void Instance::Inner::process() {
d->levelMax = 0; d->levelMax = 0;
} }
// Write frames // Write frames
int32 framesize = d->srcSamples * d->codecContext->channels * sizeof(short), encoded = 0; int32 framesize = d->srcSamples * d->channels * sizeof(short), encoded = 0;
while (uint32(_captured.size()) >= encoded + framesize + fadeSamples * sizeof(short)) { while (uint32(_captured.size()) >= encoded + framesize + fadeSamples * sizeof(short)) {
if (!processFrame(encoded, framesize)) { if (!processFrame(encoded, framesize)) {
return; return;
@ -665,12 +686,12 @@ bool Instance::Inner::processFrame(int32 offset, int32 framesize) {
if (d->dstSamples > d->maxDstSamples) { if (d->dstSamples > d->maxDstSamples) {
d->maxDstSamples = d->dstSamples; d->maxDstSamples = d->dstSamples;
av_freep(&d->dstSamplesData[0]); av_freep(&d->dstSamplesData[0]);
if ((res = av_samples_alloc(d->dstSamplesData, 0, d->codecContext->channels, d->dstSamples, d->codecContext->sample_fmt, 1)) < 0) { if ((res = av_samples_alloc(d->dstSamplesData, 0, d->channels, d->dstSamples, d->codecContext->sample_fmt, 1)) < 0) {
LOG(("Audio Error: Unable to av_samples_alloc for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res))); LOG(("Audio Error: Unable to av_samples_alloc for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
fail(); fail();
return false; return false;
} }
d->dstSamplesSize = av_samples_get_buffer_size(0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0); d->dstSamplesSize = av_samples_get_buffer_size(0, d->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0);
} }
if ((res = swr_convert(d->swrContext, d->dstSamplesData, d->dstSamples, (const uint8_t **)srcSamplesData, d->srcSamples)) < 0) { if ((res = swr_convert(d->swrContext, d->dstSamplesData, d->dstSamples, (const uint8_t **)srcSamplesData, d->srcSamples)) < 0) {
@ -684,13 +705,17 @@ bool Instance::Inner::processFrame(int32 offset, int32 framesize) {
AVFrame *frame = av_frame_alloc(); AVFrame *frame = av_frame_alloc();
frame->format = d->codecContext->sample_fmt; frame->format = d->codecContext->sample_fmt;
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
av_channel_layout_copy(&frame->ch_layout, &d->codecContext->ch_layout);
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
frame->channels = d->codecContext->channels; frame->channels = d->codecContext->channels;
frame->channel_layout = d->codecContext->channel_layout; frame->channel_layout = d->codecContext->channel_layout;
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
frame->sample_rate = d->codecContext->sample_rate; frame->sample_rate = d->codecContext->sample_rate;
frame->nb_samples = d->dstSamples; frame->nb_samples = d->dstSamples;
frame->pts = av_rescale_q(d->fullSamples, AVRational { 1, d->codecContext->sample_rate }, d->codecContext->time_base); frame->pts = av_rescale_q(d->fullSamples, AVRational { 1, d->codecContext->sample_rate }, d->codecContext->time_base);
avcodec_fill_audio_frame(frame, d->codecContext->channels, d->codecContext->sample_fmt, d->dstSamplesData[0], d->dstSamplesSize, 0); avcodec_fill_audio_frame(frame, d->channels, d->codecContext->sample_fmt, d->dstSamplesData[0], d->dstSamplesSize, 0);
if (!writeFrame(frame)) { if (!writeFrame(frame)) {
return false; return false;

View file

@ -13,9 +13,10 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
namespace Media { namespace Media {
#if !DA_FFMPEG_NEW_CHANNEL_LAYOUT
uint64_t AbstractFFMpegLoader::ComputeChannelLayout( uint64_t AbstractFFMpegLoader::ComputeChannelLayout(
uint64_t channel_layout, uint64_t channel_layout,
int channels) { int channels) {
if (channel_layout) { if (channel_layout) {
if (av_get_channel_layout_nb_channels(channel_layout) == channels) { if (av_get_channel_layout_nb_channels(channel_layout) == channels) {
return channel_layout; return channel_layout;
@ -23,6 +24,7 @@ uint64_t AbstractFFMpegLoader::ComputeChannelLayout(
} }
return av_get_default_channel_layout(channels); return av_get_default_channel_layout(channels);
} }
#endif // !DA_FFMPEG_NEW_CHANNEL_LAYOUT
int64 AbstractFFMpegLoader::Mul(int64 value, AVRational rational) { int64 AbstractFFMpegLoader::Mul(int64 value, AVRational rational) {
return value * rational.num / rational.den; return value * rational.num / rational.den;
@ -205,6 +207,20 @@ bool AbstractAudioFFMpegLoader::initUsingContext(
not_null<AVCodecContext*> context, not_null<AVCodecContext*> context,
int64 initialCount, int64 initialCount,
int initialFrequency) { int initialFrequency) {
_swrSrcSampleFormat = context->sample_fmt;
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
const AVChannelLayout mono = AV_CHANNEL_LAYOUT_MONO;
const AVChannelLayout stereo = AV_CHANNEL_LAYOUT_STEREO;
const auto useMono = !av_channel_layout_compare(
&context->ch_layout,
&mono);
const auto useStereo = !av_channel_layout_compare(
&context->ch_layout,
&stereo);
const auto copyDstChannelLayout = [&] {
av_channel_layout_copy(&_swrDstChannelLayout, &context->ch_layout);
};
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
const auto layout = ComputeChannelLayout( const auto layout = ComputeChannelLayout(
context->channel_layout, context->channel_layout,
context->channels); context->channels);
@ -215,15 +231,18 @@ bool AbstractAudioFFMpegLoader::initUsingContext(
)); ));
return false; return false;
} }
const auto useMono = (layout == AV_CH_LAYOUT_MONO);
_swrSrcSampleFormat = context->sample_fmt; const auto useStereo = (layout == AV_CH_LAYOUT_STEREO);
switch (layout) { const auto copyDstChannelLayout = [&] {
case AV_CH_LAYOUT_MONO: _swrDstChannelLayout = layout;
};
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
if (useMono) {
switch (_swrSrcSampleFormat) { switch (_swrSrcSampleFormat) {
case AV_SAMPLE_FMT_U8: case AV_SAMPLE_FMT_U8:
case AV_SAMPLE_FMT_U8P: case AV_SAMPLE_FMT_U8P:
_swrDstSampleFormat = _swrSrcSampleFormat; _swrDstSampleFormat = _swrSrcSampleFormat;
_swrDstChannelLayout = layout; copyDstChannelLayout();
_outputChannels = 1; _outputChannels = 1;
_outputSampleSize = 1; _outputSampleSize = 1;
_outputFormat = AL_FORMAT_MONO8; _outputFormat = AL_FORMAT_MONO8;
@ -231,31 +250,29 @@ bool AbstractAudioFFMpegLoader::initUsingContext(
case AV_SAMPLE_FMT_S16: case AV_SAMPLE_FMT_S16:
case AV_SAMPLE_FMT_S16P: case AV_SAMPLE_FMT_S16P:
_swrDstSampleFormat = _swrSrcSampleFormat; _swrDstSampleFormat = _swrSrcSampleFormat;
_swrDstChannelLayout = layout; copyDstChannelLayout();
_outputChannels = 1; _outputChannels = 1;
_outputSampleSize = sizeof(uint16); _outputSampleSize = sizeof(uint16);
_outputFormat = AL_FORMAT_MONO16; _outputFormat = AL_FORMAT_MONO16;
break; break;
} }
break; } else if (useStereo) {
case AV_CH_LAYOUT_STEREO:
switch (_swrSrcSampleFormat) { switch (_swrSrcSampleFormat) {
case AV_SAMPLE_FMT_U8: case AV_SAMPLE_FMT_U8:
_swrDstSampleFormat = _swrSrcSampleFormat; _swrDstSampleFormat = _swrSrcSampleFormat;
_swrDstChannelLayout = layout; copyDstChannelLayout();
_outputChannels = 2; _outputChannels = 2;
_outputSampleSize = 2; _outputSampleSize = 2;
_outputFormat = AL_FORMAT_STEREO8; _outputFormat = AL_FORMAT_STEREO8;
break; break;
case AV_SAMPLE_FMT_S16: case AV_SAMPLE_FMT_S16:
_swrDstSampleFormat = _swrSrcSampleFormat; _swrDstSampleFormat = _swrSrcSampleFormat;
_swrDstChannelLayout = layout; copyDstChannelLayout();
_outputChannels = 2; _outputChannels = 2;
_outputSampleSize = 2 * sizeof(uint16); _outputSampleSize = 2 * sizeof(uint16);
_outputFormat = AL_FORMAT_STEREO16; _outputFormat = AL_FORMAT_STEREO16;
break; break;
} }
break;
} }
if (_swrDstRate == initialFrequency) { if (_swrDstRate == initialFrequency) {
@ -307,27 +324,39 @@ auto AbstractAudioFFMpegLoader::readFromReadyContext(
} }
bool AbstractAudioFFMpegLoader::frameHasDesiredFormat() const { bool AbstractAudioFFMpegLoader::frameHasDesiredFormat() const {
const auto frameChannelLayout = ComputeChannelLayout( const auto sameChannelLayout = [&] {
_frame->channel_layout, #if DA_FFMPEG_NEW_CHANNEL_LAYOUT
_frame->channels); return !av_channel_layout_compare(
&_frame->ch_layout,
&_swrDstChannelLayout);
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
const auto frameChannelLayout = ComputeChannelLayout(
_frame->channel_layout,
_frame->channels);
return (frameChannelLayout == _swrDstChannelLayout);
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
};
return true return true
&& (_frame->format == _swrDstSampleFormat) && (_frame->format == _swrDstSampleFormat)
&& (frameChannelLayout == _swrDstChannelLayout) && (_frame->sample_rate == _swrDstRate)
&& (_frame->sample_rate == _swrDstRate); && sameChannelLayout();
} }
bool AbstractAudioFFMpegLoader::initResampleForFrame() { bool AbstractAudioFFMpegLoader::initResampleForFrame() {
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
const auto bad = !_frame->ch_layout.nb_channels;
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
const auto frameChannelLayout = ComputeChannelLayout( const auto frameChannelLayout = ComputeChannelLayout(
_frame->channel_layout, _frame->channel_layout,
_frame->channels); _frame->channels);
if (!frameChannelLayout) { const auto bad = !frameChannelLayout;
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
if (bad) {
LOG(("Audio Error: " LOG(("Audio Error: "
"Unable to compute channel layout for frame in file '%1', " "Unknown channel layout for frame in file '%1', "
"data size '%2', channel_layout %3, channels %4" "data size '%2'"
).arg(_file.name() ).arg(_file.name()
).arg(_data.size() ).arg(_data.size()
).arg(_frame->channel_layout
).arg(_frame->channels
)); ));
return false; return false;
} else if (_frame->format == -1) { } else if (_frame->format == -1) {
@ -338,24 +367,48 @@ bool AbstractAudioFFMpegLoader::initResampleForFrame() {
)); ));
return false; return false;
} else if (_swrContext) { } else if (_swrContext) {
const auto sameChannelLayout = [&] {
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
return !av_channel_layout_compare(
&_frame->ch_layout,
&_swrSrcChannelLayout);
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
return (frameChannelLayout == _swrSrcChannelLayout);
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
};
if (true if (true
&& (_frame->format == _swrSrcSampleFormat) && (_frame->format == _swrSrcSampleFormat)
&& (frameChannelLayout == _swrSrcChannelLayout) && (_frame->sample_rate == _swrSrcRate)
&& (_frame->sample_rate == _swrSrcRate)) { && sameChannelLayout()) {
return true; return true;
} }
swr_close(_swrContext); swr_close(_swrContext);
} }
_swrSrcSampleFormat = static_cast<AVSampleFormat>(_frame->format); _swrSrcSampleFormat = static_cast<AVSampleFormat>(_frame->format);
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
av_channel_layout_copy(&_swrSrcChannelLayout, &_frame->ch_layout);
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
_swrSrcChannelLayout = frameChannelLayout; _swrSrcChannelLayout = frameChannelLayout;
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
_swrSrcRate = _frame->sample_rate; _swrSrcRate = _frame->sample_rate;
return initResampleUsingFormat(); return initResampleUsingFormat();
} }
bool AbstractAudioFFMpegLoader::initResampleUsingFormat() { bool AbstractAudioFFMpegLoader::initResampleUsingFormat() {
int res = 0; int res = 0;
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
res = swr_alloc_set_opts2(
&_swrContext,
&_swrDstChannelLayout,
_swrDstSampleFormat,
_swrDstRate,
&_swrSrcChannelLayout,
_swrSrcSampleFormat,
_swrSrcRate,
0,
nullptr);
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
_swrContext = swr_alloc_set_opts( _swrContext = swr_alloc_set_opts(
_swrContext, _swrContext,
_swrDstChannelLayout, _swrDstChannelLayout,
@ -366,11 +419,17 @@ bool AbstractAudioFFMpegLoader::initResampleUsingFormat() {
_swrSrcRate, _swrSrcRate,
0, 0,
nullptr); nullptr);
if (!_swrContext) { #endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
if (res < 0 || !_swrContext) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: " LOG(("Audio Error: "
"Unable to swr_alloc for file '%1', data size '%2'" "Unable to swr_alloc_set_opts2 for file '%1', data size '%2', "
"error %3, %4"
).arg(_file.name() ).arg(_file.name()
).arg(_data.size())); ).arg(_data.size()
).arg(res
).arg(av_make_error_string(err, sizeof(err), res)
));
return false; return false;
} else if ((res = swr_init(_swrContext)) < 0) { } else if ((res = swr_init(_swrContext)) < 0) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };

View file

@ -45,9 +45,11 @@ public:
return _samplesFrequency; return _samplesFrequency;
} }
#if !DA_FFMPEG_NEW_CHANNEL_LAYOUT
static uint64_t ComputeChannelLayout( static uint64_t ComputeChannelLayout(
uint64_t channel_layout, uint64_t channel_layout,
int channels); int channels);
#endif // !DA_FFMPEG_NEW_CHANNEL_LAYOUT
~AbstractFFMpegLoader(); ~AbstractFFMpegLoader();
@ -144,11 +146,17 @@ private:
int _swrSrcRate = 0; int _swrSrcRate = 0;
AVSampleFormat _swrSrcSampleFormat = AV_SAMPLE_FMT_NONE; AVSampleFormat _swrSrcSampleFormat = AV_SAMPLE_FMT_NONE;
uint64_t _swrSrcChannelLayout = 0;
const int _swrDstRate = Media::Player::kDefaultFrequency; const int _swrDstRate = Media::Player::kDefaultFrequency;
AVSampleFormat _swrDstSampleFormat = AV_SAMPLE_FMT_S16; AVSampleFormat _swrDstSampleFormat = AV_SAMPLE_FMT_S16;
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
AVChannelLayout _swrSrcChannelLayout = AV_CHANNEL_LAYOUT_STEREO;
AVChannelLayout _swrDstChannelLayout = AV_CHANNEL_LAYOUT_STEREO;
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
uint64_t _swrSrcChannelLayout = 0;
uint64_t _swrDstChannelLayout = AV_CH_LAYOUT_STEREO; uint64_t _swrDstChannelLayout = AV_CH_LAYOUT_STEREO;
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
uint8_t **_swrDstData = nullptr; uint8_t **_swrDstData = nullptr;
int _swrDstDataCapacity = 0; int _swrDstDataCapacity = 0;

View file

@ -873,8 +873,8 @@ win:
stage('ffmpeg', """ stage('ffmpeg', """
git clone https://github.com/FFmpeg/FFmpeg.git ffmpeg git clone https://github.com/FFmpeg/FFmpeg.git ffmpeg
cd ffmpeg cd ffmpeg
git checkout cc33e73618
win: win:
git checkout cc33e73618
SET PATH_BACKUP_=%PATH% SET PATH_BACKUP_=%PATH%
SET PATH=%ROOT_DIR%\\ThirdParty\\msys64\\usr\\bin;%PATH% SET PATH=%ROOT_DIR%\\ThirdParty\\msys64\\usr\\bin;%PATH%
@ -886,6 +886,7 @@ depends:patches/build_ffmpeg_win.sh
SET PATH=%PATH_BACKUP_% SET PATH=%PATH_BACKUP_%
mac: mac:
git checkout 7268323193
export PKG_CONFIG_PATH=$USED_PREFIX/lib/pkgconfig export PKG_CONFIG_PATH=$USED_PREFIX/lib/pkgconfig
depends:yasm/yasm depends:yasm/yasm

@ -1 +1 @@
Subproject commit 74ab66cfa9c05745ca513504940e59f6fd68eff3 Subproject commit c3b5f2e62dbe54bca50c14b977fac626bab98138