diff --git a/Telegram/SourceFiles/media/audio/media_audio.cpp b/Telegram/SourceFiles/media/audio/media_audio.cpp index 3fc726416..e82ea6eed 100644 --- a/Telegram/SourceFiles/media/audio/media_audio.cpp +++ b/Telegram/SourceFiles/media/audio/media_audio.cpp @@ -1677,6 +1677,10 @@ public: //} } + int sampleSize() override { + Unexpected("We shouldn't try to read sample size here."); + } + int format() override { return 0; } @@ -1701,9 +1705,9 @@ public: return _coverFormat; } - ReadResult readMore(QByteArray &result, int64 &samplesAdded) override { + ReadResult readMore() override { DEBUG_LOG(("Audio Read Error: should not call this")); - return ReadResult::Error; + return ReadError::Other; } ~FFMpegAttributesReader() { @@ -1770,24 +1774,23 @@ public: } }; while (processed < countbytes) { - buffer.resize(0); - - int64 samples = 0; - auto res = readMore(buffer, samples); - if (res == ReadResult::Error || res == ReadResult::EndOfFile) { + const auto result = readMore(); + const auto sampleBytes = v::is(result) + ? v::get(result) + : bytes::const_span(); + if (result == ReadError::Other + || result == ReadError::EndOfFile) { break; - } - if (buffer.isEmpty()) { + } else if (sampleBytes.empty()) { continue; } - auto sampleBytes = bytes::make_span(buffer); if (fmt == AL_FORMAT_MONO8 || fmt == AL_FORMAT_STEREO8) { Media::Audio::IterateSamples(sampleBytes, callback); } else if (fmt == AL_FORMAT_MONO16 || fmt == AL_FORMAT_STEREO16) { Media::Audio::IterateSamples(sampleBytes, callback); } - processed += sampleSize() * samples; + processed += sampleBytes.size(); } if (sumbytes > 0 && peaks.size() < Media::Player::kWaveformSamplesCount) { peaks.push_back(peak); diff --git a/Telegram/SourceFiles/media/audio/media_audio_ffmpeg_loader.cpp b/Telegram/SourceFiles/media/audio/media_audio_ffmpeg_loader.cpp index 03dfc230a..6cc1ec700 100644 --- a/Telegram/SourceFiles/media/audio/media_audio_ffmpeg_loader.cpp +++ b/Telegram/SourceFiles/media/audio/media_audio_ffmpeg_loader.cpp @@ -288,26 +288,22 @@ bool AbstractAudioFFMpegLoader::initUsingContext( } auto AbstractAudioFFMpegLoader::replaceFrameAndRead( - FFmpeg::FramePointer frame, - QByteArray &result, - int64 &samplesAdded) + FFmpeg::FramePointer frame) -> ReadResult { _frame = std::move(frame); - return readFromReadyFrame(result, samplesAdded); + return readFromReadyFrame(); } auto AbstractAudioFFMpegLoader::readFromReadyContext( - not_null context, - QByteArray &result, - int64 &samplesAdded) + not_null context) -> ReadResult { const auto res = avcodec_receive_frame(context, _frame.get()); if (res >= 0) { - return readFromReadyFrame(result, samplesAdded); + return readFromReadyFrame(); } if (res == AVERROR_EOF) { - return ReadResult::EndOfFile; + return ReadError::EndOfFile; } else if (res != AVERROR(EAGAIN)) { char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; LOG(("Audio Error: " @@ -318,9 +314,9 @@ auto AbstractAudioFFMpegLoader::readFromReadyContext( ).arg(res ).arg(av_make_error_string(err, sizeof(err), res) )); - return ReadResult::Error; + return ReadError::Other; } - return ReadResult::Wait; + return ReadError::Wait; } bool AbstractAudioFFMpegLoader::frameHasDesiredFormat() const { @@ -494,29 +490,13 @@ bool AbstractAudioFFMpegLoader::ensureResampleSpaceAvailable(int samples) { return true; } -void AbstractAudioFFMpegLoader::appendSamples( - QByteArray & result, - int64 & samplesAdded, - uint8_t * *data, - int count) const { - result.append( - reinterpret_cast(data[0]), - count * _outputSampleSize); - samplesAdded += count; -} - -AudioPlayerLoader::ReadResult AbstractAudioFFMpegLoader::readFromReadyFrame( - QByteArray & result, - int64 & samplesAdded) { +auto AbstractAudioFFMpegLoader::readFromReadyFrame() -> ReadResult { if (frameHasDesiredFormat()) { - appendSamples( - result, - samplesAdded, - _frame->extended_data, - _frame->nb_samples); - return ReadResult::Ok; + return bytes::const_span( + reinterpret_cast(_frame->extended_data[0]), + _frame->nb_samples * _outputSampleSize); } else if (!initResampleForFrame()) { - return ReadResult::Error; + return ReadError::Other; } const auto maxSamples = av_rescale_rnd( @@ -525,7 +505,7 @@ AudioPlayerLoader::ReadResult AbstractAudioFFMpegLoader::readFromReadyFrame( _swrSrcRate, AV_ROUND_UP); if (!ensureResampleSpaceAvailable(maxSamples)) { - return ReadResult::Error; + return ReadError::Other; } const auto samples = swr_convert( _swrContext, @@ -543,15 +523,11 @@ AudioPlayerLoader::ReadResult AbstractAudioFFMpegLoader::readFromReadyFrame( ).arg(samples ).arg(av_make_error_string(err, sizeof(err), samples) )); - return ReadResult::Error; + return ReadError::Other; } - - appendSamples( - result, - samplesAdded, - _swrDstData, - samples); - return ReadResult::Ok; + return bytes::const_span( + reinterpret_cast(_swrDstData[0]), + samples * _outputSampleSize); } AbstractAudioFFMpegLoader::~AbstractAudioFFMpegLoader() { @@ -648,14 +624,9 @@ bool FFMpegLoader::seekTo(crl::time positionMs) { return true; } -AudioPlayerLoader::ReadResult FFMpegLoader::readMore( - QByteArray &result, - int64 &samplesAdded) { - const auto readResult = readFromReadyContext( - _codecContext, - result, - samplesAdded); - if (readResult != ReadResult::Wait) { +FFMpegLoader::ReadResult FFMpegLoader::readMore() { + const auto readResult = readFromReadyContext(_codecContext); + if (readResult != ReadError::Wait) { return readResult; } @@ -671,10 +642,10 @@ AudioPlayerLoader::ReadResult FFMpegLoader::readMore( ).arg(res ).arg(av_make_error_string(err, sizeof(err), res) )); - return ReadResult::Error; + return ReadError::Other; } avcodec_send_packet(_codecContext, nullptr); // drain - return ReadResult::Ok; + return bytes::const_span(); } if (_packet.stream_index == streamId) { @@ -696,11 +667,11 @@ AudioPlayerLoader::ReadResult FFMpegLoader::readMore( //if (res == AVERROR_INVALIDDATA) { // return ReadResult::NotYet; // try to skip bad packet //} - return ReadResult::Error; + return ReadError::Other; } } av_packet_unref(&_packet); - return ReadResult::Ok; + return bytes::const_span(); } FFMpegLoader::~FFMpegLoader() { diff --git a/Telegram/SourceFiles/media/audio/media_audio_ffmpeg_loader.h b/Telegram/SourceFiles/media/audio/media_audio_ffmpeg_loader.h index f74e4a5bb..07edebc14 100644 --- a/Telegram/SourceFiles/media/audio/media_audio_ffmpeg_loader.h +++ b/Telegram/SourceFiles/media/audio/media_audio_ffmpeg_loader.h @@ -96,6 +96,10 @@ public: return _swrDstRate; } + int sampleSize() override { + return _outputSampleSize; + } + int format() override { return _outputFormat; } @@ -107,35 +111,20 @@ protected: not_null context, int64 initialCount, int initialFrequency); - ReadResult readFromReadyContext( - not_null context, - QByteArray &result, - int64 &samplesAdded); + [[nodiscard]] ReadResult readFromReadyContext( + not_null context); // Streaming player provides the first frame to the ChildFFMpegLoader // so we replace our allocated frame with the one provided. - ReadResult replaceFrameAndRead( - FFmpeg::FramePointer frame, - QByteArray &result, - int64 &samplesAdded); - - int sampleSize() const { - return _outputSampleSize; - } + [[nodiscard]] ReadResult replaceFrameAndRead(FFmpeg::FramePointer frame); private: - ReadResult readFromReadyFrame(QByteArray &result, int64 &samplesAdded); + [[nodiscard]] ReadResult readFromReadyFrame(); bool frameHasDesiredFormat() const; bool initResampleForFrame(); bool initResampleUsingFormat(); bool ensureResampleSpaceAvailable(int samples); - void appendSamples( - QByteArray &result, - int64 &samplesAdded, - uint8_t **data, - int count) const; - FFmpeg::FramePointer _frame; int _outputFormat = AL_FORMAT_STEREO16; int _outputChannels = 2; @@ -171,7 +160,7 @@ public: bool open(crl::time positionMs) override; - ReadResult readMore(QByteArray &result, int64 &samplesAdded) override; + ReadResult readMore() override; ~FFMpegLoader(); diff --git a/Telegram/SourceFiles/media/audio/media_audio_loader.cpp b/Telegram/SourceFiles/media/audio/media_audio_loader.cpp index 7871f7195..61e3f5e8b 100644 --- a/Telegram/SourceFiles/media/audio/media_audio_loader.cpp +++ b/Telegram/SourceFiles/media/audio/media_audio_loader.cpp @@ -31,27 +31,20 @@ bool AudioPlayerLoader::check( return (this->_file == file) && (this->_data.size() == data.size()); } -void AudioPlayerLoader::saveDecodedSamples( - not_null samples, - not_null samplesCount) { - Expects(_savedSamplesCount == 0); +void AudioPlayerLoader::saveDecodedSamples(not_null samples) { Expects(_savedSamples.isEmpty()); Expects(!_holdsSavedSamples); samples->swap(_savedSamples); - std::swap(*samplesCount, _savedSamplesCount); _holdsSavedSamples = true; } void AudioPlayerLoader::takeSavedDecodedSamples( - not_null samples, - not_null samplesCount) { - Expects(*samplesCount == 0); + not_null samples) { Expects(samples->isEmpty()); Expects(_holdsSavedSamples); samples->swap(_savedSamples); - std::swap(*samplesCount, _savedSamplesCount); _holdsSavedSamples = false; } diff --git a/Telegram/SourceFiles/media/audio/media_audio_loader.h b/Telegram/SourceFiles/media/audio/media_audio_loader.h index 399d85f71..2003121db 100644 --- a/Telegram/SourceFiles/media/audio/media_audio_loader.h +++ b/Telegram/SourceFiles/media/audio/media_audio_loader.h @@ -26,18 +26,18 @@ public: virtual bool open(crl::time positionMs) = 0; virtual int64 samplesCount() = 0; virtual int samplesFrequency() = 0; + virtual int sampleSize() = 0; virtual int format() = 0; - enum class ReadResult { - Error, + enum class ReadError { + Other, NotYet, - Ok, Wait, EndOfFile, }; - virtual ReadResult readMore( - QByteArray &samples, - int64 &samplesCount) = 0; + using ReadResult = std::variant; + [[nodiscard]] virtual ReadResult readMore() = 0; + virtual void enqueuePackets(std::deque &&packets) { Unexpected("enqueuePackets() call on not ChildFFMpegLoader."); } @@ -48,12 +48,8 @@ public: return false; } - void saveDecodedSamples( - not_null samples, - not_null samplesCount); - void takeSavedDecodedSamples( - not_null samples, - not_null samplesCount); + void saveDecodedSamples(not_null samples); + void takeSavedDecodedSamples(not_null samples); bool holdsSavedDecodedSamples() const; protected: @@ -69,7 +65,6 @@ protected: private: QByteArray _savedSamples; - int64 _savedSamplesCount = 0; bool _holdsSavedSamples = false; }; diff --git a/Telegram/SourceFiles/media/audio/media_audio_loaders.cpp b/Telegram/SourceFiles/media/audio/media_audio_loaders.cpp index 115b22e1c..df18e891c 100644 --- a/Telegram/SourceFiles/media/audio/media_audio_loaders.cpp +++ b/Telegram/SourceFiles/media/audio/media_audio_loaders.cpp @@ -166,15 +166,25 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) { auto waiting = false; auto errAtStart = started; - QByteArray samples; - int64 samplesCount = 0; + auto accumulated = QByteArray(); + auto accumulatedCount = 0; if (l->holdsSavedDecodedSamples()) { - l->takeSavedDecodedSamples(&samples, &samplesCount); + l->takeSavedDecodedSamples(&accumulated); + accumulatedCount = accumulated.size() / l->sampleSize(); } - while (samples.size() < kPlaybackBufferSize) { - auto res = l->readMore(samples, samplesCount); - using Result = AudioPlayerLoader::ReadResult; - if (res == Result::Error) { + while (accumulated.size() < kPlaybackBufferSize) { + const auto result = l->readMore(); + const auto sampleBytes = v::is(result) + ? v::get(result) + : bytes::const_span(); + if (!sampleBytes.empty()) { + accumulated.append( + reinterpret_cast(sampleBytes.data()), + sampleBytes.size()); + accumulatedCount += sampleBytes.size() / l->sampleSize(); + } + using Error = AudioPlayerLoader::ReadError; + if (result == Error::Other) { if (errAtStart) { { QMutexLocker lock(internal::audioPlayerMutex()); @@ -187,18 +197,18 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) { } finished = true; break; - } else if (res == Result::EndOfFile) { + } else if (result == Error::EndOfFile) { finished = true; break; - } else if (res == Result::Ok) { - errAtStart = false; - } else if (res == Result::Wait) { - waiting = (samples.size() < kPlaybackBufferSize) - && (!samplesCount || !l->forceToBuffer()); + } else if (result == Error::Wait) { + waiting = (accumulated.size() < kPlaybackBufferSize) + && (accumulated.isEmpty() || !l->forceToBuffer()); if (waiting) { - l->saveDecodedSamples(&samples, &samplesCount); + l->saveDecodedSamples(&accumulated); } break; + } else if (v::is(result)) { + errAtStart = false; } QMutexLocker lock(internal::audioPlayerMutex()); @@ -215,7 +225,7 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) { return; } - if (started || samplesCount) { + if (started || !accumulated.isEmpty()) { Audio::AttachToDevice(); } if (started) { @@ -234,7 +244,7 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) { track->state.position = position; track->fadeStartPosition = position; } - if (samplesCount) { + if (!accumulated.isEmpty()) { track->ensureStreamCreated(type); auto bufferIndex = track->getNotQueuedBufferIndex(); @@ -246,18 +256,26 @@ void Loaders::loadData(AudioMsgId audio, crl::time positionMs) { } if (bufferIndex < 0) { // No free buffers, wait. - l->saveDecodedSamples(&samples, &samplesCount); + l->saveDecodedSamples(&accumulated); return; } else if (l->forceToBuffer()) { l->setForceToBuffer(false); } - track->bufferSamples[bufferIndex] = samples; - track->samplesCount[bufferIndex] = samplesCount; - track->bufferedLength += samplesCount; - alBufferData(track->stream.buffers[bufferIndex], track->format, samples.constData(), samples.size(), track->frequency); + track->bufferSamples[bufferIndex] = accumulated; + track->samplesCount[bufferIndex] = accumulatedCount; + track->bufferedLength += accumulatedCount; + alBufferData( + track->stream.buffers[bufferIndex], + track->format, + accumulated.constData(), + accumulated.size(), + track->frequency); - alSourceQueueBuffers(track->stream.source, 1, track->stream.buffers + bufferIndex); + alSourceQueueBuffers( + track->stream.source, + 1, + track->stream.buffers + bufferIndex); if (!internal::audioCheckError()) { setStoppedState(track, State::StoppedAtError); diff --git a/Telegram/SourceFiles/media/audio/media_audio_track.cpp b/Telegram/SourceFiles/media/audio/media_audio_track.cpp index fb9ccb48a..5ff3d5c81 100644 --- a/Telegram/SourceFiles/media/audio/media_audio_track.cpp +++ b/Telegram/SourceFiles/media/audio/media_audio_track.cpp @@ -77,12 +77,13 @@ void Track::fillFromData(bytes::vector &&data) { } }; do { - auto buffer = QByteArray(); - auto samplesAdded = int64(0); - auto result = loader.readMore(buffer, samplesAdded); - if (samplesAdded > 0) { - auto sampleBytes = bytes::make_span(buffer); - _samplesCount += samplesAdded; + using Error = AudioPlayerLoader::ReadError; + const auto result = loader.readMore(); + const auto sampleBytes = v::is(result) + ? v::get(result) + : bytes::const_span(); + if (!sampleBytes.empty()) { + _samplesCount += sampleBytes.size() / loader.sampleSize(); _samples.insert(_samples.end(), sampleBytes.data(), sampleBytes.data() + sampleBytes.size()); if (peaksCount) { if (format == AL_FORMAT_MONO8 || format == AL_FORMAT_STEREO8) { @@ -91,17 +92,12 @@ void Track::fillFromData(bytes::vector &&data) { Media::Audio::IterateSamples(sampleBytes, peakCallback); } } - } - - using Result = AudioPlayerLoader::ReadResult; - switch (result) { - case Result::Error: - case Result::NotYet: - case Result::Wait: { + } else if (result == Error::Other + || result == Error::NotYet + || result == Error::Wait) { _failed = true; - } break; } - if (result != Result::Ok) { + if (!v::is(result)) { break; } } while (true); diff --git a/Telegram/SourceFiles/media/audio/media_child_ffmpeg_loader.cpp b/Telegram/SourceFiles/media/audio/media_child_ffmpeg_loader.cpp index ef1fc74d1..04c338c5d 100644 --- a/Telegram/SourceFiles/media/audio/media_child_ffmpeg_loader.cpp +++ b/Telegram/SourceFiles/media/audio/media_child_ffmpeg_loader.cpp @@ -29,38 +29,27 @@ bool ChildFFMpegLoader::open(crl::time positionMs) { _parentData->frequency); } -AudioPlayerLoader::ReadResult ChildFFMpegLoader::readFromInitialFrame( - QByteArray &result, - int64 &samplesAdded) { +AudioPlayerLoader::ReadResult ChildFFMpegLoader::readFromInitialFrame() { if (!_parentData->frame) { - return ReadResult::Wait; + return ReadError::Wait; } - return replaceFrameAndRead( - base::take(_parentData->frame), - result, - samplesAdded); + return replaceFrameAndRead(base::take(_parentData->frame)); } -AudioPlayerLoader::ReadResult ChildFFMpegLoader::readMore( - QByteArray & result, - int64 & samplesAdded) { - const auto initialFrameResult = readFromInitialFrame( - result, - samplesAdded); - if (initialFrameResult != ReadResult::Wait) { +auto ChildFFMpegLoader::readMore() -> ReadResult { + const auto initialFrameResult = readFromInitialFrame(); + if (initialFrameResult != ReadError::Wait) { return initialFrameResult; } const auto readResult = readFromReadyContext( - _parentData->codec.get(), - result, - samplesAdded); - if (readResult != ReadResult::Wait) { + _parentData->codec.get()); + if (readResult != ReadError::Wait) { return readResult; } if (_queue.empty()) { - return _eofReached ? ReadResult::EndOfFile : ReadResult::Wait; + return _eofReached ? ReadError::EndOfFile : ReadError::Wait; } auto packet = std::move(_queue.front()); @@ -69,7 +58,7 @@ AudioPlayerLoader::ReadResult ChildFFMpegLoader::readMore( _eofReached = packet.empty(); if (_eofReached) { avcodec_send_packet(_parentData->codec.get(), nullptr); // drain - return ReadResult::Ok; + return bytes::const_span(); } auto res = avcodec_send_packet( @@ -86,11 +75,11 @@ AudioPlayerLoader::ReadResult ChildFFMpegLoader::readMore( // There is a sample voice message where skipping such packet // results in a crash (read_access to nullptr) in swr_convert(). if (res == AVERROR_INVALIDDATA) { - return ReadResult::NotYet; // try to skip bad packet + return ReadError::NotYet; // try to skip bad packet } - return ReadResult::Error; + return ReadError::Other; } - return ReadResult::Ok; + return bytes::const_span(); } void ChildFFMpegLoader::enqueuePackets( diff --git a/Telegram/SourceFiles/media/audio/media_child_ffmpeg_loader.h b/Telegram/SourceFiles/media/audio/media_child_ffmpeg_loader.h index a68dd5529..3917c4ab9 100644 --- a/Telegram/SourceFiles/media/audio/media_child_ffmpeg_loader.h +++ b/Telegram/SourceFiles/media/audio/media_child_ffmpeg_loader.h @@ -35,7 +35,7 @@ public: return true; } - ReadResult readMore(QByteArray &result, int64 &samplesAdded) override; + ReadResult readMore() override; void enqueuePackets(std::deque &&packets) override; void setForceToBuffer(bool force) override; bool forceToBuffer() const override; @@ -50,9 +50,7 @@ private: // Streaming player reads first frame by itself and provides it together // with the codec context. So we first read data from this frame and // only after that we try to read next packets. - ReadResult readFromInitialFrame( - QByteArray &result, - int64 &samplesAdded); + ReadResult readFromInitialFrame(); std::unique_ptr _parentData; std::deque _queue;