mirror of
https://github.com/AyuGram/AyuGramDesktop.git
synced 2025-06-05 06:33:57 +02:00
Fix voice messages sending.
This commit is contained in:
parent
11e03a181d
commit
4e8a1f8d29
6 changed files with 186 additions and 169 deletions
|
@ -232,10 +232,6 @@ HistoryWidget::HistoryWidget(
|
||||||
|
|
||||||
initTabbedSelector();
|
initTabbedSelector();
|
||||||
|
|
||||||
connect(Media::Capture::instance(), SIGNAL(error()), this, SLOT(onRecordError()));
|
|
||||||
connect(Media::Capture::instance(), SIGNAL(updated(quint16,qint32)), this, SLOT(onRecordUpdate(quint16,qint32)));
|
|
||||||
connect(Media::Capture::instance(), SIGNAL(done(QByteArray,VoiceWaveform,qint32)), this, SLOT(onRecordDone(QByteArray,VoiceWaveform,qint32)));
|
|
||||||
|
|
||||||
_attachToggle->addClickHandler(App::LambdaDelayed(
|
_attachToggle->addClickHandler(App::LambdaDelayed(
|
||||||
st::historyAttach.ripple.hideDuration,
|
st::historyAttach.ripple.hideDuration,
|
||||||
this,
|
this,
|
||||||
|
@ -1367,15 +1363,13 @@ void HistoryWidget::setInnerFocus() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void HistoryWidget::onRecordError() {
|
void HistoryWidget::recordDone(
|
||||||
stopRecording(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void HistoryWidget::onRecordDone(
|
|
||||||
QByteArray result,
|
QByteArray result,
|
||||||
VoiceWaveform waveform,
|
VoiceWaveform waveform,
|
||||||
qint32 samples) {
|
int samples) {
|
||||||
if (!canWriteMessage() || result.isEmpty()) return;
|
if (!canWriteMessage() || result.isEmpty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
Window::ActivateWindow(controller());
|
Window::ActivateWindow(controller());
|
||||||
const auto duration = samples / Media::Player::kDefaultFrequency;
|
const auto duration = samples / Media::Player::kDefaultFrequency;
|
||||||
|
@ -1384,7 +1378,7 @@ void HistoryWidget::onRecordDone(
|
||||||
session().api().sendVoiceMessage(result, waveform, duration, action);
|
session().api().sendVoiceMessage(result, waveform, duration, action);
|
||||||
}
|
}
|
||||||
|
|
||||||
void HistoryWidget::onRecordUpdate(quint16 level, qint32 samples) {
|
void HistoryWidget::recordUpdate(ushort level, int samples) {
|
||||||
if (!_recording) {
|
if (!_recording) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -3361,6 +3355,8 @@ void HistoryWidget::leaveToChildEvent(QEvent *e, QWidget *child) { // e -- from
|
||||||
}
|
}
|
||||||
|
|
||||||
void HistoryWidget::recordStartCallback() {
|
void HistoryWidget::recordStartCallback() {
|
||||||
|
using namespace Media::Capture;
|
||||||
|
|
||||||
const auto error = _peer
|
const auto error = _peer
|
||||||
? Data::RestrictionError(_peer, ChatRestriction::f_send_media)
|
? Data::RestrictionError(_peer, ChatRestriction::f_send_media)
|
||||||
: std::nullopt;
|
: std::nullopt;
|
||||||
|
@ -3369,11 +3365,17 @@ void HistoryWidget::recordStartCallback() {
|
||||||
return;
|
return;
|
||||||
} else if (showSlowmodeError()) {
|
} else if (showSlowmodeError()) {
|
||||||
return;
|
return;
|
||||||
} else if (!Media::Capture::instance()->available()) {
|
} else if (!instance()->available()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
emit Media::Capture::instance()->start();
|
instance()->start();
|
||||||
|
instance()->updated(
|
||||||
|
) | rpl::start_with_next_error([=](const Update &update) {
|
||||||
|
recordUpdate(update.level, update.samples);
|
||||||
|
}, [=] {
|
||||||
|
stopRecording(false);
|
||||||
|
}, _recordingLifetime);
|
||||||
|
|
||||||
_recording = _inField = true;
|
_recording = _inField = true;
|
||||||
updateControlsVisibility();
|
updateControlsVisibility();
|
||||||
|
@ -3403,11 +3405,20 @@ void HistoryWidget::mouseReleaseEvent(QMouseEvent *e) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void HistoryWidget::stopRecording(bool send) {
|
void HistoryWidget::stopRecording(bool send) {
|
||||||
emit Media::Capture::instance()->stop(send);
|
if (send) {
|
||||||
|
const auto weak = Ui::MakeWeak(this);
|
||||||
|
Media::Capture::instance()->stop(crl::guard(this, [=](
|
||||||
|
const Media::Capture::Result &result) {
|
||||||
|
recordDone(result.bytes, result.waveform, result.samples);
|
||||||
|
}));
|
||||||
|
} else {
|
||||||
|
Media::Capture::instance()->stop();
|
||||||
|
}
|
||||||
|
|
||||||
_recordingLevel = anim::value();
|
_recordingLevel = anim::value();
|
||||||
_recordingAnimation.stop();
|
_recordingAnimation.stop();
|
||||||
|
|
||||||
|
_recordingLifetime.destroy();
|
||||||
_recording = false;
|
_recording = false;
|
||||||
_recordingSamples = 0;
|
_recordingSamples = 0;
|
||||||
if (_history) {
|
if (_history) {
|
||||||
|
@ -3662,7 +3673,10 @@ bool HistoryWidget::isMuteUnmute() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool HistoryWidget::showRecordButton() const {
|
bool HistoryWidget::showRecordButton() const {
|
||||||
return Media::Capture::instance()->available() && !HasSendText(_field) && !readyToForward() && !_editMsgId;
|
return Media::Capture::instance()->available()
|
||||||
|
&& !HasSendText(_field)
|
||||||
|
&& !readyToForward()
|
||||||
|
&& !_editMsgId;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool HistoryWidget::showInlineBotCancel() const {
|
bool HistoryWidget::showInlineBotCancel() const {
|
||||||
|
|
|
@ -312,10 +312,6 @@ public slots:
|
||||||
void onDraftSave(bool delayed = false);
|
void onDraftSave(bool delayed = false);
|
||||||
void onCloudDraftSave();
|
void onCloudDraftSave();
|
||||||
|
|
||||||
void onRecordError();
|
|
||||||
void onRecordDone(QByteArray result, VoiceWaveform waveform, qint32 samples);
|
|
||||||
void onRecordUpdate(quint16 level, qint32 samples);
|
|
||||||
|
|
||||||
void onUpdateHistoryItems();
|
void onUpdateHistoryItems();
|
||||||
|
|
||||||
// checks if we are too close to the top or to the bottom
|
// checks if we are too close to the top or to the bottom
|
||||||
|
@ -412,6 +408,8 @@ private:
|
||||||
|
|
||||||
void animationCallback();
|
void animationCallback();
|
||||||
void updateOverStates(QPoint pos);
|
void updateOverStates(QPoint pos);
|
||||||
|
void recordDone(QByteArray result, VoiceWaveform waveform, int samples);
|
||||||
|
void recordUpdate(ushort level, int samples);
|
||||||
void recordStartCallback();
|
void recordStartCallback();
|
||||||
void recordStopCallback(bool active);
|
void recordStopCallback(bool active);
|
||||||
void recordUpdateCallback(QPoint globalPos);
|
void recordUpdateCallback(QPoint globalPos);
|
||||||
|
@ -707,6 +705,7 @@ private:
|
||||||
bool _inClickable = false;
|
bool _inClickable = false;
|
||||||
int _recordingSamples = 0;
|
int _recordingSamples = 0;
|
||||||
int _recordCancelWidth;
|
int _recordCancelWidth;
|
||||||
|
rpl::lifetime _recordingLifetime;
|
||||||
|
|
||||||
// This can animate for a very long time (like in music playing),
|
// This can animate for a very long time (like in music playing),
|
||||||
// so it should be a Basic, not a Simple animation.
|
// so it should be a Basic, not a Simple animation.
|
||||||
|
|
|
@ -786,25 +786,6 @@ void ComposeControls::init() {
|
||||||
initSendButton();
|
initSendButton();
|
||||||
initWriteRestriction();
|
initWriteRestriction();
|
||||||
|
|
||||||
QObject::connect(
|
|
||||||
::Media::Capture::instance(),
|
|
||||||
&::Media::Capture::Instance::error,
|
|
||||||
_wrap.get(),
|
|
||||||
[=] { recordError(); });
|
|
||||||
QObject::connect(
|
|
||||||
::Media::Capture::instance(),
|
|
||||||
&::Media::Capture::Instance::updated,
|
|
||||||
_wrap.get(),
|
|
||||||
[=](quint16 level, int samples) { recordUpdated(level, samples); });
|
|
||||||
qRegisterMetaType<VoiceWaveform>();
|
|
||||||
QObject::connect(
|
|
||||||
::Media::Capture::instance(),
|
|
||||||
&::Media::Capture::Instance::done,
|
|
||||||
_wrap.get(),
|
|
||||||
[=](QByteArray result, VoiceWaveform waveform, int samples) {
|
|
||||||
recordDone(result, waveform, samples);
|
|
||||||
});
|
|
||||||
|
|
||||||
_wrap->sizeValue(
|
_wrap->sizeValue(
|
||||||
) | rpl::start_with_next([=](QSize size) {
|
) | rpl::start_with_next([=](QSize size) {
|
||||||
updateControlsGeometry(size);
|
updateControlsGeometry(size);
|
||||||
|
@ -855,10 +836,6 @@ void ComposeControls::init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ComposeControls::recordError() {
|
|
||||||
stopRecording(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ComposeControls::recordDone(
|
void ComposeControls::recordDone(
|
||||||
QByteArray result,
|
QByteArray result,
|
||||||
VoiceWaveform waveform,
|
VoiceWaveform waveform,
|
||||||
|
@ -889,20 +866,26 @@ void ComposeControls::recordUpdated(quint16 level, int samples) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ComposeControls::recordStartCallback() {
|
void ComposeControls::recordStartCallback() {
|
||||||
//const auto error = _peer // #TODO restrictions
|
using namespace ::Media::Capture;
|
||||||
// ? Data::RestrictionError(_peer, ChatRestriction::f_send_media)
|
const auto error = _history
|
||||||
// : std::nullopt;
|
? Data::RestrictionError(_history->peer, ChatRestriction::f_send_media)
|
||||||
const auto error = std::optional<QString>();
|
: std::nullopt;
|
||||||
if (error) {
|
if (error) {
|
||||||
Ui::show(Box<InformBox>(*error));
|
Ui::show(Box<InformBox>(*error));
|
||||||
return;
|
return;
|
||||||
} else if (_showSlowmodeError && _showSlowmodeError()) {
|
} else if (_showSlowmodeError && _showSlowmodeError()) {
|
||||||
return;
|
return;
|
||||||
} else if (!::Media::Capture::instance()->available()) {
|
} else if (!instance()->available()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
emit ::Media::Capture::instance()->start();
|
instance()->start();
|
||||||
|
instance()->updated(
|
||||||
|
) | rpl::start_with_next_error([=](const Update &update) {
|
||||||
|
recordUpdated(update.level, update.samples);
|
||||||
|
}, [=] {
|
||||||
|
stopRecording(false);
|
||||||
|
}, _recordingLifetime);
|
||||||
|
|
||||||
_recording = _inField = true;
|
_recording = _inField = true;
|
||||||
updateControlsVisibility();
|
updateControlsVisibility();
|
||||||
|
@ -922,11 +905,19 @@ void ComposeControls::recordUpdateCallback(QPoint globalPos) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ComposeControls::stopRecording(bool send) {
|
void ComposeControls::stopRecording(bool send) {
|
||||||
emit ::Media::Capture::instance()->stop(send);
|
if (send) {
|
||||||
|
::Media::Capture::instance()->stop(crl::guard(_wrap.get(), [=](
|
||||||
|
const ::Media::Capture::Result &result) {
|
||||||
|
recordDone(result.bytes, result.waveform, result.samples);
|
||||||
|
}));
|
||||||
|
} else {
|
||||||
|
::Media::Capture::instance()->stop();
|
||||||
|
}
|
||||||
|
|
||||||
_recordingLevel = anim::value();
|
_recordingLevel = anim::value();
|
||||||
_recordingAnimation.stop();
|
_recordingAnimation.stop();
|
||||||
|
|
||||||
|
_recordingLifetime.destroy();
|
||||||
_recording = false;
|
_recording = false;
|
||||||
_recordingSamples = 0;
|
_recordingSamples = 0;
|
||||||
_sendActionUpdates.fire({ Api::SendProgressType::RecordVoice, -1 });
|
_sendActionUpdates.fire({ Api::SendProgressType::RecordVoice, -1 });
|
||||||
|
|
|
@ -230,6 +230,7 @@ private:
|
||||||
//bool _inClickable = false;
|
//bool _inClickable = false;
|
||||||
int _recordingSamples = 0;
|
int _recordingSamples = 0;
|
||||||
int _recordCancelWidth;
|
int _recordCancelWidth;
|
||||||
|
rpl::lifetime _recordingLifetime;
|
||||||
|
|
||||||
rpl::lifetime _uploaderSubscriptions;
|
rpl::lifetime _uploaderSubscriptions;
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "media/audio/media_audio_capture.h"
|
#include "media/audio/media_audio_capture.h"
|
||||||
|
|
||||||
#include "media/audio/media_audio_ffmpeg_loader.h"
|
#include "media/audio/media_audio_ffmpeg_loader.h"
|
||||||
|
#include "base/timer.h"
|
||||||
|
|
||||||
#include <al.h>
|
#include <al.h>
|
||||||
#include <alc.h>
|
#include <alc.h>
|
||||||
|
@ -37,6 +38,36 @@ bool ErrorHappened(ALCdevice *device) {
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
class Instance::Inner final : public QObject {
|
||||||
|
public:
|
||||||
|
Inner(QThread *thread);
|
||||||
|
~Inner();
|
||||||
|
|
||||||
|
void start(Fn<void(Update)> updated, Fn<void()> error);
|
||||||
|
void stop(Fn<void(Result&&)> callback = nullptr);
|
||||||
|
|
||||||
|
void timeout();
|
||||||
|
|
||||||
|
private:
|
||||||
|
void processFrame(int32 offset, int32 framesize);
|
||||||
|
void fail();
|
||||||
|
|
||||||
|
void writeFrame(AVFrame *frame);
|
||||||
|
|
||||||
|
// Writes the packets till EAGAIN is got from av_receive_packet()
|
||||||
|
// Returns number of packets written or -1 on error
|
||||||
|
int writePackets();
|
||||||
|
|
||||||
|
Fn<void(Update)> _updated;
|
||||||
|
Fn<void()> _error;
|
||||||
|
|
||||||
|
struct Private;
|
||||||
|
std::unique_ptr<Private> d;
|
||||||
|
base::Timer _timer;
|
||||||
|
QByteArray _captured;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
void Start() {
|
void Start() {
|
||||||
Assert(CaptureInstance == nullptr);
|
Assert(CaptureInstance == nullptr);
|
||||||
CaptureInstance = new Instance();
|
CaptureInstance = new Instance();
|
||||||
|
@ -47,18 +78,32 @@ void Finish() {
|
||||||
delete base::take(CaptureInstance);
|
delete base::take(CaptureInstance);
|
||||||
}
|
}
|
||||||
|
|
||||||
Instance::Instance() : _inner(new Inner(&_thread)) {
|
Instance::Instance() : _inner(std::make_unique<Inner>(&_thread)) {
|
||||||
CaptureInstance = this;
|
CaptureInstance = this;
|
||||||
connect(this, SIGNAL(start()), _inner, SLOT(onStart()));
|
|
||||||
connect(this, SIGNAL(stop(bool)), _inner, SLOT(onStop(bool)));
|
|
||||||
connect(_inner, SIGNAL(done(QByteArray, VoiceWaveform, qint32)), this, SIGNAL(done(QByteArray, VoiceWaveform, qint32)));
|
|
||||||
connect(_inner, SIGNAL(updated(quint16, qint32)), this, SIGNAL(updated(quint16, qint32)));
|
|
||||||
connect(_inner, SIGNAL(error()), this, SIGNAL(error()));
|
|
||||||
connect(&_thread, SIGNAL(started()), _inner, SLOT(onInit()));
|
|
||||||
connect(&_thread, SIGNAL(finished()), _inner, SLOT(deleteLater()));
|
|
||||||
_thread.start();
|
_thread.start();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Instance::start() {
|
||||||
|
_updates.fire_done();
|
||||||
|
InvokeQueued(_inner.get(), [=] {
|
||||||
|
_inner->start([=](Update update) {
|
||||||
|
crl::on_main(this, [=] {
|
||||||
|
_updates.fire_copy(update);
|
||||||
|
});
|
||||||
|
}, [=] {
|
||||||
|
crl::on_main(this, [=] {
|
||||||
|
_updates.fire_error({});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
void Instance::stop(Fn<void(Result&&)> callback) {
|
||||||
|
InvokeQueued(_inner.get(), [=] {
|
||||||
|
_inner->stop(callback);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
void Instance::check() {
|
void Instance::check() {
|
||||||
_available = false;
|
_available = false;
|
||||||
if (auto device = alcGetString(0, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)) {
|
if (auto device = alcGetString(0, ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER)) {
|
||||||
|
@ -71,7 +116,8 @@ void Instance::check() {
|
||||||
}
|
}
|
||||||
|
|
||||||
Instance::~Instance() {
|
Instance::~Instance() {
|
||||||
_inner = nullptr;
|
InvokeQueued(_inner.get(), [copy = base::take(_inner)] {
|
||||||
|
});
|
||||||
_thread.quit();
|
_thread.quit();
|
||||||
_thread.wait();
|
_thread.wait();
|
||||||
}
|
}
|
||||||
|
@ -155,34 +201,39 @@ struct Instance::Inner::Private {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Instance::Inner::Inner(QThread *thread) : d(new Private()) {
|
Instance::Inner::Inner(QThread *thread)
|
||||||
|
: d(std::make_unique<Private>())
|
||||||
|
, _timer(thread, [=] { timeout(); }) {
|
||||||
moveToThread(thread);
|
moveToThread(thread);
|
||||||
_timer.moveToThread(thread);
|
|
||||||
connect(&_timer, SIGNAL(timeout()), this, SLOT(onTimeout()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Instance::Inner::~Inner() {
|
Instance::Inner::~Inner() {
|
||||||
onStop(false);
|
stop();
|
||||||
delete d;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Instance::Inner::onInit() {
|
void Instance::Inner::fail() {
|
||||||
|
Expects(_error != nullptr);
|
||||||
|
|
||||||
|
stop();
|
||||||
|
_error();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Instance::Inner::onStart() {
|
void Instance::Inner::start(Fn<void(Update)> updated, Fn<void()> error) {
|
||||||
|
_updated = std::move(updated);
|
||||||
|
_error = std::move(_error);
|
||||||
|
|
||||||
// Start OpenAL Capture
|
// Start OpenAL Capture
|
||||||
d->device = alcCaptureOpenDevice(nullptr, kCaptureFrequency, AL_FORMAT_MONO16, kCaptureFrequency / 5);
|
d->device = alcCaptureOpenDevice(nullptr, kCaptureFrequency, AL_FORMAT_MONO16, kCaptureFrequency / 5);
|
||||||
if (!d->device) {
|
if (!d->device) {
|
||||||
LOG(("Audio Error: capture device not present!"));
|
LOG(("Audio Error: capture device not present!"));
|
||||||
emit error();
|
fail();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
alcCaptureStart(d->device);
|
alcCaptureStart(d->device);
|
||||||
if (ErrorHappened(d->device)) {
|
if (ErrorHappened(d->device)) {
|
||||||
alcCaptureCloseDevice(d->device);
|
alcCaptureCloseDevice(d->device);
|
||||||
d->device = nullptr;
|
d->device = nullptr;
|
||||||
emit error();
|
fail();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,7 +241,7 @@ void Instance::Inner::onStart() {
|
||||||
|
|
||||||
d->ioBuffer = (uchar*)av_malloc(AVBlockSize);
|
d->ioBuffer = (uchar*)av_malloc(AVBlockSize);
|
||||||
|
|
||||||
d->ioContext = avio_alloc_context(d->ioBuffer, AVBlockSize, 1, static_cast<void*>(d), &Private::_read_data, &Private::_write_data, &Private::_seek_data);
|
d->ioContext = avio_alloc_context(d->ioBuffer, AVBlockSize, 1, static_cast<void*>(d.get()), &Private::_read_data, &Private::_write_data, &Private::_seek_data);
|
||||||
int res = 0;
|
int res = 0;
|
||||||
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
|
char err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
|
||||||
AVOutputFormat *fmt = 0;
|
AVOutputFormat *fmt = 0;
|
||||||
|
@ -201,15 +252,13 @@ void Instance::Inner::onStart() {
|
||||||
}
|
}
|
||||||
if (!fmt) {
|
if (!fmt) {
|
||||||
LOG(("Audio Error: Unable to find opus AVOutputFormat for capture"));
|
LOG(("Audio Error: Unable to find opus AVOutputFormat for capture"));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((res = avformat_alloc_output_context2(&d->fmtContext, fmt, 0, 0)) < 0) {
|
if ((res = avformat_alloc_output_context2(&d->fmtContext, fmt, 0, 0)) < 0) {
|
||||||
LOG(("Audio Error: Unable to avformat_alloc_output_context2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to avformat_alloc_output_context2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
d->fmtContext->pb = d->ioContext;
|
d->fmtContext->pb = d->ioContext;
|
||||||
|
@ -220,23 +269,20 @@ void Instance::Inner::onStart() {
|
||||||
d->codec = avcodec_find_encoder(fmt->audio_codec);
|
d->codec = avcodec_find_encoder(fmt->audio_codec);
|
||||||
if (!d->codec) {
|
if (!d->codec) {
|
||||||
LOG(("Audio Error: Unable to avcodec_find_encoder for capture"));
|
LOG(("Audio Error: Unable to avcodec_find_encoder for capture"));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
d->stream = avformat_new_stream(d->fmtContext, d->codec);
|
d->stream = avformat_new_stream(d->fmtContext, d->codec);
|
||||||
if (!d->stream) {
|
if (!d->stream) {
|
||||||
LOG(("Audio Error: Unable to avformat_new_stream for capture"));
|
LOG(("Audio Error: Unable to avformat_new_stream for capture"));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
d->stream->id = d->fmtContext->nb_streams - 1;
|
d->stream->id = d->fmtContext->nb_streams - 1;
|
||||||
d->codecContext = avcodec_alloc_context3(d->codec);
|
d->codecContext = avcodec_alloc_context3(d->codec);
|
||||||
if (!d->codecContext) {
|
if (!d->codecContext) {
|
||||||
LOG(("Audio Error: Unable to avcodec_alloc_context3 for capture"));
|
LOG(("Audio Error: Unable to avcodec_alloc_context3 for capture"));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -255,8 +301,7 @@ void Instance::Inner::onStart() {
|
||||||
// Open audio stream
|
// Open audio stream
|
||||||
if ((res = avcodec_open2(d->codecContext, d->codec, nullptr)) < 0) {
|
if ((res = avcodec_open2(d->codecContext, d->codec, nullptr)) < 0) {
|
||||||
LOG(("Audio Error: Unable to avcodec_open2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to avcodec_open2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -287,48 +332,46 @@ void Instance::Inner::onStart() {
|
||||||
|
|
||||||
if ((res = swr_init(d->swrContext)) < 0) {
|
if ((res = swr_init(d->swrContext)) < 0) {
|
||||||
LOG(("Audio Error: Unable to swr_init for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to swr_init for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
d->maxDstSamples = d->srcSamples;
|
d->maxDstSamples = d->srcSamples;
|
||||||
if ((res = av_samples_alloc_array_and_samples(&d->dstSamplesData, 0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0)) < 0) {
|
if ((res = av_samples_alloc_array_and_samples(&d->dstSamplesData, 0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0)) < 0) {
|
||||||
LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
d->dstSamplesSize = av_samples_get_buffer_size(0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0);
|
d->dstSamplesSize = av_samples_get_buffer_size(0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0);
|
||||||
|
|
||||||
if ((res = avcodec_parameters_from_context(d->stream->codecpar, d->codecContext)) < 0) {
|
if ((res = avcodec_parameters_from_context(d->stream->codecpar, d->codecContext)) < 0) {
|
||||||
LOG(("Audio Error: Unable to avcodec_parameters_from_context for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to avcodec_parameters_from_context for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write file header
|
// Write file header
|
||||||
if ((res = avformat_write_header(d->fmtContext, 0)) < 0) {
|
if ((res = avformat_write_header(d->fmtContext, 0)) < 0) {
|
||||||
LOG(("Audio Error: Unable to avformat_write_header for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to avformat_write_header for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
_timer.start(50);
|
_timer.callEach(50);
|
||||||
_captured.clear();
|
_captured.clear();
|
||||||
_captured.reserve(kCaptureBufferSlice);
|
_captured.reserve(kCaptureBufferSlice);
|
||||||
DEBUG_LOG(("Audio Capture: started!"));
|
DEBUG_LOG(("Audio Capture: started!"));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Instance::Inner::onStop(bool needResult) {
|
void Instance::Inner::stop(Fn<void(Result&&)> callback) {
|
||||||
if (!_timer.isActive()) return; // in onStop() already
|
if (!_timer.isActive()) {
|
||||||
_timer.stop();
|
return; // in stop() already
|
||||||
|
}
|
||||||
|
_timer.cancel();
|
||||||
|
|
||||||
if (d->device) {
|
if (d->device) {
|
||||||
alcCaptureStop(d->device);
|
alcCaptureStop(d->device);
|
||||||
onTimeout(); // get last data
|
timeout(); // get last data
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write what is left
|
// Write what is left
|
||||||
|
@ -370,7 +413,11 @@ void Instance::Inner::onStop(bool needResult) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
DEBUG_LOG(("Audio Capture: stopping (need result: %1), size: %2, samples: %3").arg(Logs::b(needResult)).arg(d->data.size()).arg(d->fullSamples));
|
DEBUG_LOG(("Audio Capture: "
|
||||||
|
"stopping (need result: %1), size: %2, samples: %3"
|
||||||
|
).arg(Logs::b(callback != nullptr)
|
||||||
|
).arg(d->data.size()
|
||||||
|
).arg(d->fullSamples));
|
||||||
_captured = QByteArray();
|
_captured = QByteArray();
|
||||||
|
|
||||||
// Finish stream
|
// Finish stream
|
||||||
|
@ -465,19 +512,21 @@ void Instance::Inner::onStop(bool needResult) {
|
||||||
d->waveformPeak = 0;
|
d->waveformPeak = 0;
|
||||||
d->waveform.clear();
|
d->waveform.clear();
|
||||||
}
|
}
|
||||||
if (needResult) emit done(result, waveform, samples);
|
|
||||||
|
if (callback) {
|
||||||
|
callback({ result, waveform, samples });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Instance::Inner::onTimeout() {
|
void Instance::Inner::timeout() {
|
||||||
if (!d->device) {
|
if (!d->device) {
|
||||||
_timer.stop();
|
_timer.cancel();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ALint samples;
|
ALint samples;
|
||||||
alcGetIntegerv(d->device, ALC_CAPTURE_SAMPLES, sizeof(samples), &samples);
|
alcGetIntegerv(d->device, ALC_CAPTURE_SAMPLES, sizeof(samples), &samples);
|
||||||
if (ErrorHappened(d->device)) {
|
if (ErrorHappened(d->device)) {
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (samples > 0) {
|
if (samples > 0) {
|
||||||
|
@ -490,8 +539,7 @@ void Instance::Inner::onTimeout() {
|
||||||
_captured.resize(news);
|
_captured.resize(news);
|
||||||
alcCaptureSamples(d->device, (ALCvoid *)(_captured.data() + s), samples);
|
alcCaptureSamples(d->device, (ALCvoid *)(_captured.data() + s), samples);
|
||||||
if (ErrorHappened(d->device)) {
|
if (ErrorHappened(d->device)) {
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -512,7 +560,7 @@ void Instance::Inner::onTimeout() {
|
||||||
}
|
}
|
||||||
qint32 samplesFull = d->fullSamples + _captured.size() / sizeof(short), samplesSinceUpdate = samplesFull - d->lastUpdate;
|
qint32 samplesFull = d->fullSamples + _captured.size() / sizeof(short), samplesSinceUpdate = samplesFull - d->lastUpdate;
|
||||||
if (samplesSinceUpdate > kCaptureUpdateDelta * kCaptureFrequency / 1000) {
|
if (samplesSinceUpdate > kCaptureUpdateDelta * kCaptureFrequency / 1000) {
|
||||||
emit updated(d->levelMax, samplesFull);
|
_updated(Update{ .samples = samplesFull, .level = d->levelMax });
|
||||||
d->lastUpdate = samplesFull;
|
d->lastUpdate = samplesFull;
|
||||||
d->levelMax = 0;
|
d->levelMax = 0;
|
||||||
}
|
}
|
||||||
|
@ -539,8 +587,7 @@ void Instance::Inner::processFrame(int32 offset, int32 framesize) {
|
||||||
|
|
||||||
if (framesize % sizeof(short)) { // in the middle of a sample
|
if (framesize % sizeof(short)) { // in the middle of a sample
|
||||||
LOG(("Audio Error: Bad framesize in writeFrame() for capture, framesize %1, %2").arg(framesize));
|
LOG(("Audio Error: Bad framesize in writeFrame() for capture, framesize %1, %2").arg(framesize));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
auto samplesCnt = static_cast<int>(framesize / sizeof(short));
|
auto samplesCnt = static_cast<int>(framesize / sizeof(short));
|
||||||
|
@ -587,8 +634,7 @@ void Instance::Inner::processFrame(int32 offset, int32 framesize) {
|
||||||
av_freep(&d->dstSamplesData[0]);
|
av_freep(&d->dstSamplesData[0]);
|
||||||
if ((res = av_samples_alloc(d->dstSamplesData, 0, d->codecContext->channels, d->dstSamples, d->codecContext->sample_fmt, 1)) < 0) {
|
if ((res = av_samples_alloc(d->dstSamplesData, 0, d->codecContext->channels, d->dstSamples, d->codecContext->sample_fmt, 1)) < 0) {
|
||||||
LOG(("Audio Error: Unable to av_samples_alloc for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to av_samples_alloc for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
d->dstSamplesSize = av_samples_get_buffer_size(0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0);
|
d->dstSamplesSize = av_samples_get_buffer_size(0, d->codecContext->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0);
|
||||||
|
@ -596,8 +642,7 @@ void Instance::Inner::processFrame(int32 offset, int32 framesize) {
|
||||||
|
|
||||||
if ((res = swr_convert(d->swrContext, d->dstSamplesData, d->dstSamples, (const uint8_t **)srcSamplesData, d->srcSamples)) < 0) {
|
if ((res = swr_convert(d->swrContext, d->dstSamplesData, d->dstSamples, (const uint8_t **)srcSamplesData, d->srcSamples)) < 0) {
|
||||||
LOG(("Audio Error: Unable to swr_convert for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to swr_convert for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -627,30 +672,26 @@ void Instance::Inner::writeFrame(AVFrame *frame) {
|
||||||
if (packetsWritten < 0) {
|
if (packetsWritten < 0) {
|
||||||
if (frame && packetsWritten == AVERROR_EOF) {
|
if (frame && packetsWritten == AVERROR_EOF) {
|
||||||
LOG(("Audio Error: EOF in packets received when EAGAIN was got in avcodec_send_frame()"));
|
LOG(("Audio Error: EOF in packets received when EAGAIN was got in avcodec_send_frame()"));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
} else if (!packetsWritten) {
|
} else if (!packetsWritten) {
|
||||||
LOG(("Audio Error: No packets received when EAGAIN was got in avcodec_send_frame()"));
|
LOG(("Audio Error: No packets received when EAGAIN was got in avcodec_send_frame()"));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
res = avcodec_send_frame(d->codecContext, frame);
|
res = avcodec_send_frame(d->codecContext, frame);
|
||||||
}
|
}
|
||||||
if (res < 0) {
|
if (res < 0) {
|
||||||
LOG(("Audio Error: Unable to avcodec_send_frame for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to avcodec_send_frame for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!frame) { // drain
|
if (!frame) { // drain
|
||||||
if ((res = writePackets()) != AVERROR_EOF) {
|
if ((res = writePackets()) != AVERROR_EOF) {
|
||||||
LOG(("Audio Error: not EOF in packets received when draining the codec, result %1").arg(res));
|
LOG(("Audio Error: not EOF in packets received when draining the codec, result %1").arg(res));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -672,8 +713,7 @@ int Instance::Inner::writePackets() {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
LOG(("Audio Error: Unable to avcodec_receive_packet for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to avcodec_receive_packet for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -681,8 +721,7 @@ int Instance::Inner::writePackets() {
|
||||||
pkt.stream_index = d->stream->index;
|
pkt.stream_index = d->stream->index;
|
||||||
if ((res = av_interleaved_write_frame(d->fmtContext, &pkt)) < 0) {
|
if ((res = av_interleaved_write_frame(d->fmtContext, &pkt)) < 0) {
|
||||||
LOG(("Audio Error: Unable to av_interleaved_write_frame for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to av_interleaved_write_frame for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
onStop(false);
|
fail();
|
||||||
emit error();
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,76 +14,49 @@ struct AVFrame;
|
||||||
namespace Media {
|
namespace Media {
|
||||||
namespace Capture {
|
namespace Capture {
|
||||||
|
|
||||||
|
struct Update {
|
||||||
|
int samples = 0;
|
||||||
|
ushort level = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Result {
|
||||||
|
QByteArray bytes;
|
||||||
|
VoiceWaveform waveform;
|
||||||
|
int samples = 0;
|
||||||
|
};
|
||||||
|
|
||||||
void Start();
|
void Start();
|
||||||
void Finish();
|
void Finish();
|
||||||
|
|
||||||
class Instance : public QObject {
|
class Instance final : public QObject {
|
||||||
Q_OBJECT
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Instance();
|
Instance();
|
||||||
|
~Instance();
|
||||||
|
|
||||||
void check();
|
void check();
|
||||||
bool available() const {
|
[[nodiscard]] bool available() const {
|
||||||
return _available;
|
return _available;
|
||||||
}
|
}
|
||||||
|
|
||||||
~Instance();
|
[[nodiscard]] rpl::producer<Update, rpl::empty_error> updated() const {
|
||||||
|
return _updates.events();
|
||||||
|
}
|
||||||
|
|
||||||
signals:
|
|
||||||
void start();
|
void start();
|
||||||
void stop(bool needResult);
|
void stop(Fn<void(Result&&)> callback = nullptr);
|
||||||
|
|
||||||
void done(QByteArray data, VoiceWaveform waveform, qint32 samples);
|
|
||||||
void updated(quint16 level, qint32 samples);
|
|
||||||
void error();
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class Inner;
|
class Inner;
|
||||||
friend class Inner;
|
friend class Inner;
|
||||||
|
|
||||||
bool _available = false;
|
bool _available = false;
|
||||||
|
rpl::event_stream<Update, rpl::empty_error> _updates;
|
||||||
QThread _thread;
|
QThread _thread;
|
||||||
Inner *_inner;
|
std::unique_ptr<Inner> _inner;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Instance *instance();
|
[[nodiscard]] Instance *instance();
|
||||||
|
|
||||||
class Instance::Inner : public QObject {
|
|
||||||
Q_OBJECT
|
|
||||||
|
|
||||||
public:
|
|
||||||
Inner(QThread *thread);
|
|
||||||
~Inner();
|
|
||||||
|
|
||||||
signals:
|
|
||||||
void error();
|
|
||||||
void updated(quint16 level, qint32 samples);
|
|
||||||
void done(QByteArray data, VoiceWaveform waveform, qint32 samples);
|
|
||||||
|
|
||||||
public slots:
|
|
||||||
void onInit();
|
|
||||||
void onStart();
|
|
||||||
void onStop(bool needResult);
|
|
||||||
|
|
||||||
void onTimeout();
|
|
||||||
|
|
||||||
private:
|
|
||||||
void processFrame(int32 offset, int32 framesize);
|
|
||||||
|
|
||||||
void writeFrame(AVFrame *frame);
|
|
||||||
|
|
||||||
// Writes the packets till EAGAIN is got from av_receive_packet()
|
|
||||||
// Returns number of packets written or -1 on error
|
|
||||||
int writePackets();
|
|
||||||
|
|
||||||
struct Private;
|
|
||||||
Private *d;
|
|
||||||
QTimer _timer;
|
|
||||||
QByteArray _captured;
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace Capture
|
} // namespace Capture
|
||||||
} // namespace Media
|
} // namespace Media
|
||||||
|
|
Loading…
Add table
Reference in a new issue