Improve ffmpeg audio loader interface.

This commit is contained in:
John Preston 2023-03-06 13:51:37 +04:00
parent af95bd6fb7
commit 0880a83c2c
9 changed files with 123 additions and 171 deletions

View file

@ -1677,6 +1677,10 @@ public:
//}
}
int sampleSize() override {
Unexpected("We shouldn't try to read sample size here.");
}
int format() override {
return 0;
}
@ -1701,9 +1705,9 @@ public:
return _coverFormat;
}
ReadResult readMore(QByteArray &result, int64 &samplesAdded) override {
ReadResult readMore() override {
DEBUG_LOG(("Audio Read Error: should not call this"));
return ReadResult::Error;
return ReadError::Other;
}
~FFMpegAttributesReader() {
@ -1770,24 +1774,23 @@ public:
}
};
while (processed < countbytes) {
buffer.resize(0);
int64 samples = 0;
auto res = readMore(buffer, samples);
if (res == ReadResult::Error || res == ReadResult::EndOfFile) {
const auto result = readMore();
const auto sampleBytes = v::is<bytes::const_span>(result)
? v::get<bytes::const_span>(result)
: bytes::const_span();
if (result == ReadError::Other
|| result == ReadError::EndOfFile) {
break;
}
if (buffer.isEmpty()) {
} else if (sampleBytes.empty()) {
continue;
}
auto sampleBytes = bytes::make_span(buffer);
if (fmt == AL_FORMAT_MONO8 || fmt == AL_FORMAT_STEREO8) {
Media::Audio::IterateSamples<uchar>(sampleBytes, callback);
} else if (fmt == AL_FORMAT_MONO16 || fmt == AL_FORMAT_STEREO16) {
Media::Audio::IterateSamples<int16>(sampleBytes, callback);
}
processed += sampleSize() * samples;
processed += sampleBytes.size();
}
if (sumbytes > 0 && peaks.size() < Media::Player::kWaveformSamplesCount) {
peaks.push_back(peak);

View file

@ -288,26 +288,22 @@ bool AbstractAudioFFMpegLoader::initUsingContext(
}
auto AbstractAudioFFMpegLoader::replaceFrameAndRead(
FFmpeg::FramePointer frame,
QByteArray &result,
int64 &samplesAdded)
FFmpeg::FramePointer frame)
-> ReadResult {
_frame = std::move(frame);
return readFromReadyFrame(result, samplesAdded);
return readFromReadyFrame();
}
auto AbstractAudioFFMpegLoader::readFromReadyContext(
not_null<AVCodecContext *> context,
QByteArray &result,
int64 &samplesAdded)
not_null<AVCodecContext*> context)
-> ReadResult {
const auto res = avcodec_receive_frame(context, _frame.get());
if (res >= 0) {
return readFromReadyFrame(result, samplesAdded);
return readFromReadyFrame();
}
if (res == AVERROR_EOF) {
return ReadResult::EndOfFile;
return ReadError::EndOfFile;
} else if (res != AVERROR(EAGAIN)) {
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
LOG(("Audio Error: "
@ -318,9 +314,9 @@ auto AbstractAudioFFMpegLoader::readFromReadyContext(
).arg(res
).arg(av_make_error_string(err, sizeof(err), res)
));
return ReadResult::Error;
return ReadError::Other;
}
return ReadResult::Wait;
return ReadError::Wait;
}
bool AbstractAudioFFMpegLoader::frameHasDesiredFormat() const {
@ -494,29 +490,13 @@ bool AbstractAudioFFMpegLoader::ensureResampleSpaceAvailable(int samples) {
return true;
}
void AbstractAudioFFMpegLoader::appendSamples(
QByteArray & result,
int64 & samplesAdded,
uint8_t * *data,
int count) const {
result.append(
reinterpret_cast<const char *>(data[0]),
count * _outputSampleSize);
samplesAdded += count;
}
AudioPlayerLoader::ReadResult AbstractAudioFFMpegLoader::readFromReadyFrame(
QByteArray & result,
int64 & samplesAdded) {
auto AbstractAudioFFMpegLoader::readFromReadyFrame() -> ReadResult {
if (frameHasDesiredFormat()) {
appendSamples(
result,
samplesAdded,
_frame->extended_data,
_frame->nb_samples);
return ReadResult::Ok;
return bytes::const_span(
reinterpret_cast<const bytes::type*>(_frame->extended_data[0]),
_frame->nb_samples * _outputSampleSize);
} else if (!initResampleForFrame()) {
return ReadResult::Error;
return ReadError::Other;
}
const auto maxSamples = av_rescale_rnd(
@ -525,7 +505,7 @@ AudioPlayerLoader::ReadResult AbstractAudioFFMpegLoader::readFromReadyFrame(
_swrSrcRate,
AV_ROUND_UP);
if (!ensureResampleSpaceAvailable(maxSamples)) {
return ReadResult::Error;
return ReadError::Other;
}
const auto samples = swr_convert(
_swrContext,
@ -543,15 +523,11 @@ AudioPlayerLoader::ReadResult AbstractAudioFFMpegLoader::readFromReadyFrame(
).arg(samples
).arg(av_make_error_string(err, sizeof(err), samples)
));
return ReadResult::Error;
return ReadError::Other;
}
appendSamples(
result,
samplesAdded,
_swrDstData,
samples);
return ReadResult::Ok;
return bytes::const_span(
reinterpret_cast<const bytes::type*>(_swrDstData[0]),
samples * _outputSampleSize);
}
AbstractAudioFFMpegLoader::~AbstractAudioFFMpegLoader() {
@ -648,14 +624,9 @@ bool FFMpegLoader::seekTo(crl::time positionMs) {
return true;
}
AudioPlayerLoader::ReadResult FFMpegLoader::readMore(
QByteArray &result,
int64 &samplesAdded) {
const auto readResult = readFromReadyContext(
_codecContext,
result,
samplesAdded);
if (readResult != ReadResult::Wait) {
FFMpegLoader::ReadResult FFMpegLoader::readMore() {
const auto readResult = readFromReadyContext(_codecContext);
if (readResult != ReadError::Wait) {
return readResult;
}
@ -671,10 +642,10 @@ AudioPlayerLoader::ReadResult FFMpegLoader::readMore(
).arg(res
).arg(av_make_error_string(err, sizeof(err), res)
));
return ReadResult::Error;
return ReadError::Other;
}
avcodec_send_packet(_codecContext, nullptr); // drain
return ReadResult::Ok;
return bytes::const_span();
}
if (_packet.stream_index == streamId) {
@ -696,11 +667,11 @@ AudioPlayerLoader::ReadResult FFMpegLoader::readMore(
//if (res == AVERROR_INVALIDDATA) {
// return ReadResult::NotYet; // try to skip bad packet
//}
return ReadResult::Error;
return ReadError::Other;
}
}
av_packet_unref(&_packet);
return ReadResult::Ok;
return bytes::const_span();
}
FFMpegLoader::~FFMpegLoader() {

View file

@ -96,6 +96,10 @@ public:
return _swrDstRate;
}
int sampleSize() override {
return _outputSampleSize;
}
int format() override {
return _outputFormat;
}
@ -107,35 +111,20 @@ protected:
not_null<AVCodecContext *> context,
int64 initialCount,
int initialFrequency);
ReadResult readFromReadyContext(
not_null<AVCodecContext *> context,
QByteArray &result,
int64 &samplesAdded);
[[nodiscard]] ReadResult readFromReadyContext(
not_null<AVCodecContext*> context);
// Streaming player provides the first frame to the ChildFFMpegLoader
// so we replace our allocated frame with the one provided.
ReadResult replaceFrameAndRead(
FFmpeg::FramePointer frame,
QByteArray &result,
int64 &samplesAdded);
int sampleSize() const {
return _outputSampleSize;
}
[[nodiscard]] ReadResult replaceFrameAndRead(FFmpeg::FramePointer frame);
private:
ReadResult readFromReadyFrame(QByteArray &result, int64 &samplesAdded);
[[nodiscard]] ReadResult readFromReadyFrame();
bool frameHasDesiredFormat() const;
bool initResampleForFrame();
bool initResampleUsingFormat();
bool ensureResampleSpaceAvailable(int samples);
void appendSamples(
QByteArray &result,
int64 &samplesAdded,
uint8_t **data,
int count) const;
FFmpeg::FramePointer _frame;
int _outputFormat = AL_FORMAT_STEREO16;
int _outputChannels = 2;
@ -171,7 +160,7 @@ public:
bool open(crl::time positionMs) override;
ReadResult readMore(QByteArray &result, int64 &samplesAdded) override;
ReadResult readMore() override;
~FFMpegLoader();

View file

@ -31,27 +31,20 @@ bool AudioPlayerLoader::check(
return (this->_file == file) && (this->_data.size() == data.size());
}
void AudioPlayerLoader::saveDecodedSamples(
not_null<QByteArray*> samples,
not_null<int64*> samplesCount) {
Expects(_savedSamplesCount == 0);
void AudioPlayerLoader::saveDecodedSamples(not_null<QByteArray*> samples) {
Expects(_savedSamples.isEmpty());
Expects(!_holdsSavedSamples);
samples->swap(_savedSamples);
std::swap(*samplesCount, _savedSamplesCount);
_holdsSavedSamples = true;
}
void AudioPlayerLoader::takeSavedDecodedSamples(
not_null<QByteArray*> samples,
not_null<int64*> samplesCount) {
Expects(*samplesCount == 0);
not_null<QByteArray*> samples) {
Expects(samples->isEmpty());
Expects(_holdsSavedSamples);
samples->swap(_savedSamples);
std::swap(*samplesCount, _savedSamplesCount);
_holdsSavedSamples = false;
}

View file

@ -26,18 +26,18 @@ public:
virtual bool open(crl::time positionMs) = 0;
virtual int64 samplesCount() = 0;
virtual int samplesFrequency() = 0;
virtual int sampleSize() = 0;
virtual int format() = 0;
enum class ReadResult {
Error,
enum class ReadError {
Other,
NotYet,
Ok,
Wait,
EndOfFile,
};
virtual ReadResult readMore(
QByteArray &samples,
int64 &samplesCount) = 0;
using ReadResult = std::variant<bytes::const_span, ReadError>;
[[nodiscard]] virtual ReadResult readMore() = 0;
virtual void enqueuePackets(std::deque<FFmpeg::Packet> &&packets) {
Unexpected("enqueuePackets() call on not ChildFFMpegLoader.");
}
@ -48,12 +48,8 @@ public:
return false;
}
void saveDecodedSamples(
not_null<QByteArray*> samples,
not_null<int64*> samplesCount);
void takeSavedDecodedSamples(
not_null<QByteArray*> samples,
not_null<int64*> samplesCount);
void saveDecodedSamples(not_null<QByteArray*> samples);
void takeSavedDecodedSamples(not_null<QByteArray*> samples);
bool holdsSavedDecodedSamples() const;
protected:
@ -69,7 +65,6 @@ protected:
private:
QByteArray _savedSamples;
int64 _savedSamplesCount = 0;
bool _holdsSavedSamples = false;
};

View file

@ -166,15 +166,25 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
auto waiting = false;
auto errAtStart = started;
QByteArray samples;
int64 samplesCount = 0;
auto accumulated = QByteArray();
auto accumulatedCount = 0;
if (l->holdsSavedDecodedSamples()) {
l->takeSavedDecodedSamples(&samples, &samplesCount);
l->takeSavedDecodedSamples(&accumulated);
accumulatedCount = accumulated.size() / l->sampleSize();
}
while (samples.size() < kPlaybackBufferSize) {
auto res = l->readMore(samples, samplesCount);
using Result = AudioPlayerLoader::ReadResult;
if (res == Result::Error) {
while (accumulated.size() < kPlaybackBufferSize) {
const auto result = l->readMore();
const auto sampleBytes = v::is<bytes::const_span>(result)
? v::get<bytes::const_span>(result)
: bytes::const_span();
if (!sampleBytes.empty()) {
accumulated.append(
reinterpret_cast<const char*>(sampleBytes.data()),
sampleBytes.size());
accumulatedCount += sampleBytes.size() / l->sampleSize();
}
using Error = AudioPlayerLoader::ReadError;
if (result == Error::Other) {
if (errAtStart) {
{
QMutexLocker lock(internal::audioPlayerMutex());
@ -187,18 +197,18 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
}
finished = true;
break;
} else if (res == Result::EndOfFile) {
} else if (result == Error::EndOfFile) {
finished = true;
break;
} else if (res == Result::Ok) {
errAtStart = false;
} else if (res == Result::Wait) {
waiting = (samples.size() < kPlaybackBufferSize)
&& (!samplesCount || !l->forceToBuffer());
} else if (result == Error::Wait) {
waiting = (accumulated.size() < kPlaybackBufferSize)
&& (accumulated.isEmpty() || !l->forceToBuffer());
if (waiting) {
l->saveDecodedSamples(&samples, &samplesCount);
l->saveDecodedSamples(&accumulated);
}
break;
} else if (v::is<bytes::const_span>(result)) {
errAtStart = false;
}
QMutexLocker lock(internal::audioPlayerMutex());
@ -215,7 +225,7 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
return;
}
if (started || samplesCount) {
if (started || !accumulated.isEmpty()) {
Audio::AttachToDevice();
}
if (started) {
@ -234,7 +244,7 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
track->state.position = position;
track->fadeStartPosition = position;
}
if (samplesCount) {
if (!accumulated.isEmpty()) {
track->ensureStreamCreated(type);
auto bufferIndex = track->getNotQueuedBufferIndex();
@ -246,18 +256,26 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) {
}
if (bufferIndex < 0) { // No free buffers, wait.
l->saveDecodedSamples(&samples, &samplesCount);
l->saveDecodedSamples(&accumulated);
return;
} else if (l->forceToBuffer()) {
l->setForceToBuffer(false);
}
track->bufferSamples[bufferIndex] = samples;
track->samplesCount[bufferIndex] = samplesCount;
track->bufferedLength += samplesCount;
alBufferData(track->stream.buffers[bufferIndex], track->format, samples.constData(), samples.size(), track->frequency);
track->bufferSamples[bufferIndex] = accumulated;
track->samplesCount[bufferIndex] = accumulatedCount;
track->bufferedLength += accumulatedCount;
alBufferData(
track->stream.buffers[bufferIndex],
track->format,
accumulated.constData(),
accumulated.size(),
track->frequency);
alSourceQueueBuffers(track->stream.source, 1, track->stream.buffers + bufferIndex);
alSourceQueueBuffers(
track->stream.source,
1,
track->stream.buffers + bufferIndex);
if (!internal::audioCheckError()) {
setStoppedState(track, State::StoppedAtError);

View file

@ -77,12 +77,13 @@ void Track::fillFromData(bytes::vector &&data) {
}
};
do {
auto buffer = QByteArray();
auto samplesAdded = int64(0);
auto result = loader.readMore(buffer, samplesAdded);
if (samplesAdded > 0) {
auto sampleBytes = bytes::make_span(buffer);
_samplesCount += samplesAdded;
using Error = AudioPlayerLoader::ReadError;
const auto result = loader.readMore();
const auto sampleBytes = v::is<bytes::const_span>(result)
? v::get<bytes::const_span>(result)
: bytes::const_span();
if (!sampleBytes.empty()) {
_samplesCount += sampleBytes.size() / loader.sampleSize();
_samples.insert(_samples.end(), sampleBytes.data(), sampleBytes.data() + sampleBytes.size());
if (peaksCount) {
if (format == AL_FORMAT_MONO8 || format == AL_FORMAT_STEREO8) {
@ -91,17 +92,12 @@ void Track::fillFromData(bytes::vector &&data) {
Media::Audio::IterateSamples<int16>(sampleBytes, peakCallback);
}
}
}
using Result = AudioPlayerLoader::ReadResult;
switch (result) {
case Result::Error:
case Result::NotYet:
case Result::Wait: {
} else if (result == Error::Other
|| result == Error::NotYet
|| result == Error::Wait) {
_failed = true;
} break;
}
if (result != Result::Ok) {
if (!v::is<bytes::const_span>(result)) {
break;
}
} while (true);

View file

@ -29,38 +29,27 @@ bool ChildFFMpegLoader::open(crl::time positionMs) {
_parentData->frequency);
}
AudioPlayerLoader::ReadResult ChildFFMpegLoader::readFromInitialFrame(
QByteArray &result,
int64 &samplesAdded) {
AudioPlayerLoader::ReadResult ChildFFMpegLoader::readFromInitialFrame() {
if (!_parentData->frame) {
return ReadResult::Wait;
return ReadError::Wait;
}
return replaceFrameAndRead(
base::take(_parentData->frame),
result,
samplesAdded);
return replaceFrameAndRead(base::take(_parentData->frame));
}
AudioPlayerLoader::ReadResult ChildFFMpegLoader::readMore(
QByteArray & result,
int64 & samplesAdded) {
const auto initialFrameResult = readFromInitialFrame(
result,
samplesAdded);
if (initialFrameResult != ReadResult::Wait) {
auto ChildFFMpegLoader::readMore() -> ReadResult {
const auto initialFrameResult = readFromInitialFrame();
if (initialFrameResult != ReadError::Wait) {
return initialFrameResult;
}
const auto readResult = readFromReadyContext(
_parentData->codec.get(),
result,
samplesAdded);
if (readResult != ReadResult::Wait) {
_parentData->codec.get());
if (readResult != ReadError::Wait) {
return readResult;
}
if (_queue.empty()) {
return _eofReached ? ReadResult::EndOfFile : ReadResult::Wait;
return _eofReached ? ReadError::EndOfFile : ReadError::Wait;
}
auto packet = std::move(_queue.front());
@ -69,7 +58,7 @@ AudioPlayerLoader::ReadResult ChildFFMpegLoader::readMore(
_eofReached = packet.empty();
if (_eofReached) {
avcodec_send_packet(_parentData->codec.get(), nullptr); // drain
return ReadResult::Ok;
return bytes::const_span();
}
auto res = avcodec_send_packet(
@ -86,11 +75,11 @@ AudioPlayerLoader::ReadResult ChildFFMpegLoader::readMore(
// There is a sample voice message where skipping such packet
// results in a crash (read_access to nullptr) in swr_convert().
if (res == AVERROR_INVALIDDATA) {
return ReadResult::NotYet; // try to skip bad packet
return ReadError::NotYet; // try to skip bad packet
}
return ReadResult::Error;
return ReadError::Other;
}
return ReadResult::Ok;
return bytes::const_span();
}
void ChildFFMpegLoader::enqueuePackets(

View file

@ -35,7 +35,7 @@ public:
return true;
}
ReadResult readMore(QByteArray &result, int64 &samplesAdded) override;
ReadResult readMore() override;
void enqueuePackets(std::deque<FFmpeg::Packet> &&packets) override;
void setForceToBuffer(bool force) override;
bool forceToBuffer() const override;
@ -50,9 +50,7 @@ private:
// Streaming player reads first frame by itself and provides it together
// with the codec context. So we first read data from this frame and
// only after that we try to read next packets.
ReadResult readFromInitialFrame(
QByteArray &result,
int64 &samplesAdded);
ReadResult readFromInitialFrame();
std::unique_ptr<ExternalSoundData> _parentData;
std::deque<FFmpeg::Packet> _queue;