mirror of
https://github.com/AyuGram/AyuGramDesktop.git
synced 2025-06-05 06:33:57 +02:00
PoC video messages sending.
This commit is contained in:
parent
4dc7fd8cd1
commit
552343fa37
22 changed files with 1278 additions and 209 deletions
|
@ -3502,6 +3502,7 @@ void ApiWrap::sendVoiceMessage(
|
||||||
QByteArray result,
|
QByteArray result,
|
||||||
VoiceWaveform waveform,
|
VoiceWaveform waveform,
|
||||||
crl::time duration,
|
crl::time duration,
|
||||||
|
bool video,
|
||||||
const SendAction &action) {
|
const SendAction &action) {
|
||||||
const auto caption = TextWithTags();
|
const auto caption = TextWithTags();
|
||||||
const auto to = FileLoadTaskOptions(action);
|
const auto to = FileLoadTaskOptions(action);
|
||||||
|
@ -3510,6 +3511,7 @@ void ApiWrap::sendVoiceMessage(
|
||||||
result,
|
result,
|
||||||
duration,
|
duration,
|
||||||
waveform,
|
waveform,
|
||||||
|
video,
|
||||||
to,
|
to,
|
||||||
caption));
|
caption));
|
||||||
}
|
}
|
||||||
|
|
|
@ -317,6 +317,7 @@ public:
|
||||||
QByteArray result,
|
QByteArray result,
|
||||||
VoiceWaveform waveform,
|
VoiceWaveform waveform,
|
||||||
crl::time duration,
|
crl::time duration,
|
||||||
|
bool video,
|
||||||
const SendAction &action);
|
const SendAction &action);
|
||||||
void sendFiles(
|
void sendFiles(
|
||||||
Ui::PreparedList &&list,
|
Ui::PreparedList &&list,
|
||||||
|
|
|
@ -73,8 +73,8 @@ private:
|
||||||
|
|
||||||
SourceButton _widget;
|
SourceButton _widget;
|
||||||
FlatLabel _label;
|
FlatLabel _label;
|
||||||
RoundRect _selectedRect;
|
Ui::RoundRect _selectedRect;
|
||||||
RoundRect _activeRect;
|
Ui::RoundRect _activeRect;
|
||||||
tgcalls::DesktopCaptureSource _source;
|
tgcalls::DesktopCaptureSource _source;
|
||||||
std::unique_ptr<Preview> _preview;
|
std::unique_ptr<Preview> _preview;
|
||||||
rpl::event_stream<> _activations;
|
rpl::event_stream<> _activations;
|
||||||
|
|
|
@ -284,10 +284,12 @@ FormatPointer MakeFormatPointer(
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
result->pb = io.get();
|
result->pb = io.get();
|
||||||
|
result->flags |= AVFMT_FLAG_CUSTOM_IO;
|
||||||
|
|
||||||
auto options = (AVDictionary*)nullptr;
|
auto options = (AVDictionary*)nullptr;
|
||||||
const auto guard = gsl::finally([&] { av_dict_free(&options); });
|
const auto guard = gsl::finally([&] { av_dict_free(&options); });
|
||||||
av_dict_set(&options, "usetoc", "1", 0);
|
av_dict_set(&options, "usetoc", "1", 0);
|
||||||
|
|
||||||
const auto error = AvErrorWrap(avformat_open_input(
|
const auto error = AvErrorWrap(avformat_open_input(
|
||||||
&result,
|
&result,
|
||||||
nullptr,
|
nullptr,
|
||||||
|
@ -307,6 +309,54 @@ FormatPointer MakeFormatPointer(
|
||||||
return FormatPointer(result);
|
return FormatPointer(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FormatPointer MakeWriteFormatPointer(
|
||||||
|
void *opaque,
|
||||||
|
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||||
|
#if DA_FFMPEG_CONST_WRITE_CALLBACK
|
||||||
|
int(*write)(void *opaque, const uint8_t *buffer, int bufferSize),
|
||||||
|
#else
|
||||||
|
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||||
|
#endif
|
||||||
|
int64_t(*seek)(void *opaque, int64_t offset, int whence),
|
||||||
|
const QByteArray &format) {
|
||||||
|
const AVOutputFormat *found = nullptr;
|
||||||
|
void *i = nullptr;
|
||||||
|
while ((found = av_muxer_iterate(&i))) {
|
||||||
|
if (found->name == format) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!found) {
|
||||||
|
LogError(
|
||||||
|
"av_muxer_iterate",
|
||||||
|
u"Format %1 not found"_q.arg(QString::fromUtf8(format)));
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
auto io = MakeIOPointer(opaque, read, write, seek);
|
||||||
|
if (!io) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
io->seekable = (seek != nullptr);
|
||||||
|
|
||||||
|
auto result = (AVFormatContext*)nullptr;
|
||||||
|
auto error = AvErrorWrap(avformat_alloc_output_context2(
|
||||||
|
&result,
|
||||||
|
(AVOutputFormat*)found,
|
||||||
|
nullptr,
|
||||||
|
nullptr));
|
||||||
|
if (!result || error) {
|
||||||
|
LogError("avformat_alloc_output_context2", error);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
result->pb = io.get();
|
||||||
|
result->flags |= AVFMT_FLAG_CUSTOM_IO;
|
||||||
|
|
||||||
|
// Now FormatPointer will own and free the IO context.
|
||||||
|
io.release();
|
||||||
|
return FormatPointer(result);
|
||||||
|
}
|
||||||
|
|
||||||
void FormatDeleter::operator()(AVFormatContext *value) {
|
void FormatDeleter::operator()(AVFormatContext *value) {
|
||||||
if (value) {
|
if (value) {
|
||||||
const auto deleter = IOPointer(value->pb);
|
const auto deleter = IOPointer(value->pb);
|
||||||
|
@ -448,21 +498,134 @@ SwscalePointer MakeSwscalePointer(
|
||||||
existing);
|
existing);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SwresampleDeleter::operator()(SwrContext *value) {
|
||||||
|
if (value) {
|
||||||
|
swr_free(&value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SwresamplePointer MakeSwresamplePointer(
|
||||||
|
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
AVChannelLayout *srcLayout,
|
||||||
|
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
uint64_t srcLayout,
|
||||||
|
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
AVSampleFormat srcFormat,
|
||||||
|
int srcRate,
|
||||||
|
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
AVChannelLayout *dstLayout,
|
||||||
|
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
uint64_t dstLayout,
|
||||||
|
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
AVSampleFormat dstFormat,
|
||||||
|
int dstRate,
|
||||||
|
SwresamplePointer *existing) {
|
||||||
|
// We have to use custom caching for SwsContext, because
|
||||||
|
// sws_getCachedContext checks passed flags with existing context flags,
|
||||||
|
// and re-creates context if they're different, but in the process of
|
||||||
|
// context creation the passed flags are modified before being written
|
||||||
|
// to the resulting context, so the caching doesn't work.
|
||||||
|
if (existing && (*existing) != nullptr) {
|
||||||
|
const auto &deleter = existing->get_deleter();
|
||||||
|
if (true
|
||||||
|
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
&& srcLayout->nb_channels == deleter.srcChannels
|
||||||
|
&& dstLayout->nb_channels == deleter.dstChannels
|
||||||
|
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
&& (av_get_channel_layout_nb_channels(srcLayout)
|
||||||
|
== deleter.srcChannels)
|
||||||
|
&& (av_get_channel_layout_nb_channels(dstLayout)
|
||||||
|
== deleter.dstChannels)
|
||||||
|
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
&& srcFormat == deleter.srcFormat
|
||||||
|
&& dstFormat == deleter.dstFormat
|
||||||
|
&& srcRate == deleter.srcRate
|
||||||
|
&& dstRate == deleter.dstRate) {
|
||||||
|
return std::move(*existing);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize audio resampler
|
||||||
|
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
auto result = (SwrContext*)nullptr;
|
||||||
|
auto error = AvErrorWrap(swr_alloc_set_opts2(
|
||||||
|
&result,
|
||||||
|
dstLayout,
|
||||||
|
dstFormat,
|
||||||
|
dstRate,
|
||||||
|
srcLayout,
|
||||||
|
srcFormat,
|
||||||
|
srcRate,
|
||||||
|
0,
|
||||||
|
nullptr));
|
||||||
|
if (error || !result) {
|
||||||
|
LogError(u"swr_alloc_set_opts2"_q, error);
|
||||||
|
return SwresamplePointer();
|
||||||
|
}
|
||||||
|
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
auto result = swr_alloc_set_opts(
|
||||||
|
existing ? existing.get() : nullptr,
|
||||||
|
dstLayout,
|
||||||
|
dstFormat,
|
||||||
|
dstRate,
|
||||||
|
srcLayout,
|
||||||
|
srcFormat,
|
||||||
|
srcRate,
|
||||||
|
0,
|
||||||
|
nullptr);
|
||||||
|
if (!result) {
|
||||||
|
LogError(u"swr_alloc_set_opts"_q);
|
||||||
|
}
|
||||||
|
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
|
||||||
|
error = AvErrorWrap(swr_init(result));
|
||||||
|
if (error) {
|
||||||
|
LogError(u"swr_init"_q, error);
|
||||||
|
swr_free(&result);
|
||||||
|
return SwresamplePointer();
|
||||||
|
}
|
||||||
|
|
||||||
|
return SwresamplePointer(
|
||||||
|
result,
|
||||||
|
{
|
||||||
|
srcFormat,
|
||||||
|
srcRate,
|
||||||
|
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
srcLayout->nb_channels,
|
||||||
|
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
av_get_channel_layout_nb_channels(srcLayout),
|
||||||
|
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
dstFormat,
|
||||||
|
dstRate,
|
||||||
|
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
dstLayout->nb_channels,
|
||||||
|
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
av_get_channel_layout_nb_channels(dstLayout),
|
||||||
|
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
void SwscaleDeleter::operator()(SwsContext *value) {
|
void SwscaleDeleter::operator()(SwsContext *value) {
|
||||||
if (value) {
|
if (value) {
|
||||||
sws_freeContext(value);
|
sws_freeContext(value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void LogError(const QString &method) {
|
void LogError(const QString &method, const QString &details) {
|
||||||
LOG(("Streaming Error: Error in %1.").arg(method));
|
LOG(("Streaming Error: Error in %1%2."
|
||||||
|
).arg(method
|
||||||
|
).arg(details.isEmpty() ? QString() : " - " + details));
|
||||||
}
|
}
|
||||||
|
|
||||||
void LogError(const QString &method, AvErrorWrap error) {
|
void LogError(
|
||||||
LOG(("Streaming Error: Error in %1 (code: %2, text: %3)."
|
const QString &method,
|
||||||
|
AvErrorWrap error,
|
||||||
|
const QString &details) {
|
||||||
|
LOG(("Streaming Error: Error in %1 (code: %2, text: %3)%4."
|
||||||
).arg(method
|
).arg(method
|
||||||
).arg(error.code()
|
).arg(error.code()
|
||||||
).arg(error.text()));
|
).arg(error.text()
|
||||||
|
).arg(details.isEmpty() ? QString() : " - " + details));
|
||||||
}
|
}
|
||||||
|
|
||||||
crl::time PtsToTime(int64_t pts, AVRational timeBase) {
|
crl::time PtsToTime(int64_t pts, AVRational timeBase) {
|
||||||
|
|
|
@ -19,6 +19,8 @@ extern "C" {
|
||||||
#include <libavcodec/avcodec.h>
|
#include <libavcodec/avcodec.h>
|
||||||
#include <libavformat/avformat.h>
|
#include <libavformat/avformat.h>
|
||||||
#include <libswscale/swscale.h>
|
#include <libswscale/swscale.h>
|
||||||
|
#include <libswresample/swresample.h>
|
||||||
|
#include <libavutil/opt.h>
|
||||||
#include <libavutil/version.h>
|
#include <libavutil/version.h>
|
||||||
} // extern "C"
|
} // extern "C"
|
||||||
|
|
||||||
|
@ -138,6 +140,16 @@ using FormatPointer = std::unique_ptr<AVFormatContext, FormatDeleter>;
|
||||||
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
|
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||||
#endif
|
#endif
|
||||||
int64_t(*seek)(void *opaque, int64_t offset, int whence));
|
int64_t(*seek)(void *opaque, int64_t offset, int whence));
|
||||||
|
[[nodiscard]] FormatPointer MakeWriteFormatPointer(
|
||||||
|
void *opaque,
|
||||||
|
int(*read)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||||
|
#if DA_FFMPEG_CONST_WRITE_CALLBACK
|
||||||
|
int(*write)(void *opaque, const uint8_t *buffer, int bufferSize),
|
||||||
|
#else
|
||||||
|
int(*write)(void *opaque, uint8_t *buffer, int bufferSize),
|
||||||
|
#endif
|
||||||
|
int64_t(*seek)(void *opaque, int64_t offset, int whence),
|
||||||
|
const QByteArray &format);
|
||||||
|
|
||||||
struct CodecDeleter {
|
struct CodecDeleter {
|
||||||
void operator()(AVCodecContext *value);
|
void operator()(AVCodecContext *value);
|
||||||
|
@ -179,8 +191,39 @@ using SwscalePointer = std::unique_ptr<SwsContext, SwscaleDeleter>;
|
||||||
QSize resize,
|
QSize resize,
|
||||||
SwscalePointer *existing = nullptr);
|
SwscalePointer *existing = nullptr);
|
||||||
|
|
||||||
void LogError(const QString &method);
|
struct SwresampleDeleter {
|
||||||
void LogError(const QString &method, FFmpeg::AvErrorWrap error);
|
AVSampleFormat srcFormat = AV_SAMPLE_FMT_NONE;
|
||||||
|
int srcRate = 0;
|
||||||
|
int srcChannels = 0;
|
||||||
|
AVSampleFormat dstFormat = AV_SAMPLE_FMT_NONE;
|
||||||
|
int dstRate = 0;
|
||||||
|
int dstChannels = 0;
|
||||||
|
|
||||||
|
void operator()(SwrContext *value);
|
||||||
|
};
|
||||||
|
using SwresamplePointer = std::unique_ptr<SwrContext, SwresampleDeleter>;
|
||||||
|
[[nodiscard]] SwresamplePointer MakeSwresamplePointer(
|
||||||
|
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
AVChannelLayout *srcLayout,
|
||||||
|
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
uint64_t srcLayout,
|
||||||
|
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
AVSampleFormat srcFormat,
|
||||||
|
int srcRate,
|
||||||
|
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
AVChannelLayout *dstLayout,
|
||||||
|
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
uint64_t dstLayout,
|
||||||
|
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
AVSampleFormat dstFormat,
|
||||||
|
int dstRate,
|
||||||
|
SwresamplePointer *existing = nullptr);
|
||||||
|
|
||||||
|
void LogError(const QString &method, const QString &details = {});
|
||||||
|
void LogError(
|
||||||
|
const QString &method,
|
||||||
|
FFmpeg::AvErrorWrap error,
|
||||||
|
const QString &details = {});
|
||||||
|
|
||||||
[[nodiscard]] const AVCodec *FindDecoder(not_null<AVCodecContext*> context);
|
[[nodiscard]] const AVCodec *FindDecoder(not_null<AVCodecContext*> context);
|
||||||
[[nodiscard]] crl::time PtsToTime(int64_t pts, AVRational timeBase);
|
[[nodiscard]] crl::time PtsToTime(int64_t pts, AVRational timeBase);
|
||||||
|
|
|
@ -1042,6 +1042,7 @@ void HistoryWidget::initVoiceRecordBar() {
|
||||||
data.bytes,
|
data.bytes,
|
||||||
data.waveform,
|
data.waveform,
|
||||||
data.duration,
|
data.duration,
|
||||||
|
data.video,
|
||||||
action);
|
action);
|
||||||
_voiceRecordBar->clearListenState();
|
_voiceRecordBar->clearListenState();
|
||||||
}, lifetime());
|
}, lifetime());
|
||||||
|
|
|
@ -28,6 +28,7 @@ struct VoiceToSend {
|
||||||
VoiceWaveform waveform;
|
VoiceWaveform waveform;
|
||||||
crl::time duration = 0;
|
crl::time duration = 0;
|
||||||
Api::SendOptions options;
|
Api::SendOptions options;
|
||||||
|
bool video = false;
|
||||||
};
|
};
|
||||||
struct SendActionUpdate {
|
struct SendActionUpdate {
|
||||||
Api::SendProgressType type = Api::SendProgressType();
|
Api::SendProgressType type = Api::SendProgressType();
|
||||||
|
|
|
@ -12,6 +12,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "base/random.h"
|
#include "base/random.h"
|
||||||
#include "base/unixtime.h"
|
#include "base/unixtime.h"
|
||||||
#include "ui/boxes/confirm_box.h"
|
#include "ui/boxes/confirm_box.h"
|
||||||
|
#include "calls/calls_instance.h"
|
||||||
#include "chat_helpers/compose/compose_show.h"
|
#include "chat_helpers/compose/compose_show.h"
|
||||||
#include "core/application.h"
|
#include "core/application.h"
|
||||||
#include "data/data_document.h"
|
#include "data/data_document.h"
|
||||||
|
@ -27,6 +28,7 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "media/audio/media_audio_capture.h"
|
#include "media/audio/media_audio_capture.h"
|
||||||
#include "media/player/media_player_button.h"
|
#include "media/player/media_player_button.h"
|
||||||
#include "media/player/media_player_instance.h"
|
#include "media/player/media_player_instance.h"
|
||||||
|
#include "ui/controls/round_video_recorder.h"
|
||||||
#include "ui/controls/send_button.h"
|
#include "ui/controls/send_button.h"
|
||||||
#include "ui/effects/animation_value.h"
|
#include "ui/effects/animation_value.h"
|
||||||
#include "ui/effects/animation_value_f.h"
|
#include "ui/effects/animation_value_f.h"
|
||||||
|
@ -37,11 +39,14 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
#include "ui/widgets/tooltip.h"
|
#include "ui/widgets/tooltip.h"
|
||||||
#include "ui/rect.h"
|
#include "ui/rect.h"
|
||||||
#include "ui/ui_utility.h"
|
#include "ui/ui_utility.h"
|
||||||
|
#include "webrtc/webrtc_video_track.h"
|
||||||
#include "styles/style_chat.h"
|
#include "styles/style_chat.h"
|
||||||
#include "styles/style_chat_helpers.h"
|
#include "styles/style_chat_helpers.h"
|
||||||
#include "styles/style_layers.h"
|
#include "styles/style_layers.h"
|
||||||
#include "styles/style_media_player.h"
|
#include "styles/style_media_player.h"
|
||||||
|
|
||||||
|
#include <tgcalls/VideoCaptureInterface.h>
|
||||||
|
|
||||||
namespace HistoryView::Controls {
|
namespace HistoryView::Controls {
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
@ -1579,6 +1584,11 @@ void VoiceRecordBar::activeAnimate(bool active) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void VoiceRecordBar::visibilityAnimate(bool show, Fn<void()> &&callback) {
|
void VoiceRecordBar::visibilityAnimate(bool show, Fn<void()> &&callback) {
|
||||||
|
//if (_videoRecorder) {
|
||||||
|
// _videoHiding.push_back(base::take(_videoRecorder));
|
||||||
|
// _videoHiding.back()->hide();
|
||||||
|
//}
|
||||||
|
AssertIsDebug();
|
||||||
const auto to = show ? 1. : 0.;
|
const auto to = show ? 1. : 0.;
|
||||||
const auto from = show ? 0. : 1.;
|
const auto from = show ? 0. : 1.;
|
||||||
auto animationCallback = [=, callback = std::move(callback)](auto value) {
|
auto animationCallback = [=, callback = std::move(callback)](auto value) {
|
||||||
|
@ -1646,12 +1656,17 @@ void VoiceRecordBar::startRecording() {
|
||||||
if (isRecording()) {
|
if (isRecording()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
_recordingVideo = true; AssertIsDebug();
|
||||||
auto appearanceCallback = [=] {
|
auto appearanceCallback = [=] {
|
||||||
if (_showAnimation.animating()) {
|
if (_showAnimation.animating()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
using namespace ::Media::Capture;
|
using namespace ::Media::Capture;
|
||||||
|
if (_recordingVideo && !createVideoRecorder()) {
|
||||||
|
stop(false);
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (!instance()->available()) {
|
if (!instance()->available()) {
|
||||||
stop(false);
|
stop(false);
|
||||||
return;
|
return;
|
||||||
|
@ -1664,8 +1679,13 @@ void VoiceRecordBar::startRecording() {
|
||||||
if (_paused.current()) {
|
if (_paused.current()) {
|
||||||
_paused = false;
|
_paused = false;
|
||||||
instance()->pause(false, nullptr);
|
instance()->pause(false, nullptr);
|
||||||
|
if (_videoRecorder) {
|
||||||
|
_videoRecorder->setPaused(false);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
instance()->start();
|
instance()->start(_videoRecorder
|
||||||
|
? _videoRecorder->audioChunkProcessor()
|
||||||
|
: nullptr);
|
||||||
}
|
}
|
||||||
instance()->updated(
|
instance()->updated(
|
||||||
) | rpl::start_with_next_error([=](const Update &update) {
|
) | rpl::start_with_next_error([=](const Update &update) {
|
||||||
|
@ -1769,10 +1789,17 @@ void VoiceRecordBar::hideFast() {
|
||||||
void VoiceRecordBar::stopRecording(StopType type, bool ttlBeforeHide) {
|
void VoiceRecordBar::stopRecording(StopType type, bool ttlBeforeHide) {
|
||||||
using namespace ::Media::Capture;
|
using namespace ::Media::Capture;
|
||||||
if (type == StopType::Cancel) {
|
if (type == StopType::Cancel) {
|
||||||
|
if (_videoRecorder) {
|
||||||
|
_videoRecorder->setPaused(true);
|
||||||
|
_videoRecorder->hide();
|
||||||
|
}
|
||||||
instance()->stop(crl::guard(this, [=](Result &&data) {
|
instance()->stop(crl::guard(this, [=](Result &&data) {
|
||||||
_cancelRequests.fire({});
|
_cancelRequests.fire({});
|
||||||
}));
|
}));
|
||||||
} else if (type == StopType::Listen) {
|
} else if (type == StopType::Listen) {
|
||||||
|
if (_videoRecorder) {
|
||||||
|
_videoRecorder->setPaused(true);
|
||||||
|
}
|
||||||
instance()->pause(true, crl::guard(this, [=](Result &&data) {
|
instance()->pause(true, crl::guard(this, [=](Result &&data) {
|
||||||
if (data.bytes.isEmpty()) {
|
if (data.bytes.isEmpty()) {
|
||||||
// Close everything.
|
// Close everything.
|
||||||
|
@ -1795,6 +1822,29 @@ void VoiceRecordBar::stopRecording(StopType type, bool ttlBeforeHide) {
|
||||||
// _lockShowing = false;
|
// _lockShowing = false;
|
||||||
}));
|
}));
|
||||||
} else if (type == StopType::Send) {
|
} else if (type == StopType::Send) {
|
||||||
|
if (_videoRecorder) {
|
||||||
|
const auto weak = Ui::MakeWeak(this);
|
||||||
|
_videoRecorder->hide([=](Ui::RoundVideoResult data) {
|
||||||
|
crl::on_main([=, data = std::move(data)]() mutable {
|
||||||
|
if (weak) {
|
||||||
|
window()->raise();
|
||||||
|
window()->activateWindow();
|
||||||
|
const auto options = Api::SendOptions{
|
||||||
|
.ttlSeconds = (ttlBeforeHide
|
||||||
|
? std::numeric_limits<int>::max()
|
||||||
|
: 0),
|
||||||
|
};
|
||||||
|
_sendVoiceRequests.fire({
|
||||||
|
data.content,
|
||||||
|
VoiceWaveform{},
|
||||||
|
data.duration,
|
||||||
|
options,
|
||||||
|
true,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
instance()->stop(crl::guard(this, [=](Result &&data) {
|
instance()->stop(crl::guard(this, [=](Result &&data) {
|
||||||
if (data.bytes.isEmpty()) {
|
if (data.bytes.isEmpty()) {
|
||||||
// Close everything.
|
// Close everything.
|
||||||
|
@ -2094,4 +2144,40 @@ void VoiceRecordBar::showDiscardBox(
|
||||||
_warningShown = true;
|
_warningShown = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool VoiceRecordBar::createVideoRecorder() {
|
||||||
|
if (_videoRecorder) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
const auto hidden = [=](not_null<Ui::RoundVideoRecorder*> which) {
|
||||||
|
if (_videoRecorder.get() == which) {
|
||||||
|
_videoRecorder = nullptr;
|
||||||
|
}
|
||||||
|
_videoHiding.erase(
|
||||||
|
ranges::remove(
|
||||||
|
_videoHiding,
|
||||||
|
which.get(),
|
||||||
|
&std::unique_ptr<Ui::RoundVideoRecorder>::get),
|
||||||
|
end(_videoHiding));
|
||||||
|
};
|
||||||
|
auto capturer = Core::App().calls().getVideoCapture();
|
||||||
|
auto track = std::make_shared<Webrtc::VideoTrack>(
|
||||||
|
Webrtc::VideoState::Active);
|
||||||
|
capturer->setOutput(track->sink());
|
||||||
|
capturer->setPreferredAspectRatio(1.);
|
||||||
|
_videoCapturerLifetime = track->stateValue(
|
||||||
|
) | rpl::start_with_next([=](Webrtc::VideoState state) {
|
||||||
|
capturer->setState((state == Webrtc::VideoState::Active)
|
||||||
|
? tgcalls::VideoState::Active
|
||||||
|
: tgcalls::VideoState::Inactive);
|
||||||
|
});
|
||||||
|
_videoRecorder = std::make_unique<Ui::RoundVideoRecorder>(
|
||||||
|
Ui::RoundVideoRecorderDescriptor{
|
||||||
|
.container = _outerContainer,
|
||||||
|
.hidden = hidden,
|
||||||
|
.capturer = std::move(capturer),
|
||||||
|
.track = std::move(track),
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace HistoryView::Controls
|
} // namespace HistoryView::Controls
|
||||||
|
|
|
@ -24,6 +24,7 @@ struct RecordBar;
|
||||||
namespace Ui {
|
namespace Ui {
|
||||||
class AbstractButton;
|
class AbstractButton;
|
||||||
class SendButton;
|
class SendButton;
|
||||||
|
class RoundVideoRecorder;
|
||||||
} // namespace Ui
|
} // namespace Ui
|
||||||
|
|
||||||
namespace Window {
|
namespace Window {
|
||||||
|
@ -124,13 +125,10 @@ private:
|
||||||
|
|
||||||
void recordUpdated(quint16 level, int samples);
|
void recordUpdated(quint16 level, int samples);
|
||||||
|
|
||||||
[[nodiscard]] bool recordingAnimationCallback(crl::time now);
|
|
||||||
|
|
||||||
void stop(bool send);
|
void stop(bool send);
|
||||||
void stopRecording(StopType type, bool ttlBeforeHide = false);
|
void stopRecording(StopType type, bool ttlBeforeHide = false);
|
||||||
void visibilityAnimate(bool show, Fn<void()> &&callback);
|
void visibilityAnimate(bool show, Fn<void()> &&callback);
|
||||||
|
|
||||||
[[nodiscard]] bool showRecordButton() const;
|
|
||||||
void drawDuration(QPainter &p);
|
void drawDuration(QPainter &p);
|
||||||
void drawRedCircle(QPainter &p);
|
void drawRedCircle(QPainter &p);
|
||||||
void drawMessage(QPainter &p, float64 recordActive);
|
void drawMessage(QPainter &p, float64 recordActive);
|
||||||
|
@ -153,6 +151,8 @@ private:
|
||||||
[[nodiscard]] bool peekTTLState() const;
|
[[nodiscard]] bool peekTTLState() const;
|
||||||
[[nodiscard]] bool takeTTLState() const;
|
[[nodiscard]] bool takeTTLState() const;
|
||||||
|
|
||||||
|
[[nodiscard]] bool createVideoRecorder();
|
||||||
|
|
||||||
const style::RecordBar &_st;
|
const style::RecordBar &_st;
|
||||||
const not_null<Ui::RpWidget*> _outerContainer;
|
const not_null<Ui::RpWidget*> _outerContainer;
|
||||||
const std::shared_ptr<ChatHelpers::Show> _show;
|
const std::shared_ptr<ChatHelpers::Show> _show;
|
||||||
|
@ -195,6 +195,11 @@ private:
|
||||||
bool _recordingTipRequired = false;
|
bool _recordingTipRequired = false;
|
||||||
bool _lockFromBottom = false;
|
bool _lockFromBottom = false;
|
||||||
|
|
||||||
|
std::unique_ptr<Ui::RoundVideoRecorder> _videoRecorder;
|
||||||
|
std::vector<std::unique_ptr<Ui::RoundVideoRecorder>> _videoHiding;
|
||||||
|
rpl::lifetime _videoCapturerLifetime;
|
||||||
|
bool _recordingVideo = false;
|
||||||
|
|
||||||
const style::font &_cancelFont;
|
const style::font &_cancelFont;
|
||||||
|
|
||||||
rpl::lifetime _recordingLifetime;
|
rpl::lifetime _recordingLifetime;
|
||||||
|
|
|
@ -1224,6 +1224,7 @@ void RepliesWidget::sendVoice(ComposeControls::VoiceToSend &&data) {
|
||||||
data.bytes,
|
data.bytes,
|
||||||
data.waveform,
|
data.waveform,
|
||||||
data.duration,
|
data.duration,
|
||||||
|
data.video,
|
||||||
std::move(action));
|
std::move(action));
|
||||||
|
|
||||||
_composeControls->cancelReplyMessage();
|
_composeControls->cancelReplyMessage();
|
||||||
|
|
|
@ -57,29 +57,29 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
namespace HistoryView {
|
namespace HistoryView {
|
||||||
|
|
||||||
ScheduledMemento::ScheduledMemento(not_null<History*> history)
|
ScheduledMemento::ScheduledMemento(not_null<History*> history)
|
||||||
: _history(history)
|
: _history(history)
|
||||||
, _forumTopic(nullptr) {
|
, _forumTopic(nullptr) {
|
||||||
const auto list = _history->session().scheduledMessages().list(_history);
|
const auto list = _history->session().scheduledMessages().list(_history);
|
||||||
if (!list.ids.empty()) {
|
if (!list.ids.empty()) {
|
||||||
_list.setScrollTopState({ .item = { .fullId = list.ids.front() } });
|
_list.setScrollTopState({ .item = {.fullId = list.ids.front() } });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ScheduledMemento::ScheduledMemento(not_null<Data::ForumTopic*> forumTopic)
|
ScheduledMemento::ScheduledMemento(not_null<Data::ForumTopic*> forumTopic)
|
||||||
: _history(forumTopic->owningHistory())
|
: _history(forumTopic->owningHistory())
|
||||||
, _forumTopic(forumTopic) {
|
, _forumTopic(forumTopic) {
|
||||||
const auto list = _history->session().scheduledMessages().list(
|
const auto list = _history->session().scheduledMessages().list(
|
||||||
_forumTopic);
|
_forumTopic);
|
||||||
if (!list.ids.empty()) {
|
if (!list.ids.empty()) {
|
||||||
_list.setScrollTopState({ .item = { .fullId = list.ids.front() } });
|
_list.setScrollTopState({ .item = {.fullId = list.ids.front() } });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
object_ptr<Window::SectionWidget> ScheduledMemento::createWidget(
|
object_ptr<Window::SectionWidget> ScheduledMemento::createWidget(
|
||||||
QWidget *parent,
|
QWidget *parent,
|
||||||
not_null<Window::SessionController*> controller,
|
not_null<Window::SessionController*> controller,
|
||||||
Window::Column column,
|
Window::Column column,
|
||||||
const QRect &geometry) {
|
const QRect &geometry) {
|
||||||
if (column == Window::Column::Third) {
|
if (column == Window::Column::Third) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
@ -97,30 +97,30 @@ ScheduledWidget::ScheduledWidget(
|
||||||
not_null<Window::SessionController*> controller,
|
not_null<Window::SessionController*> controller,
|
||||||
not_null<History*> history,
|
not_null<History*> history,
|
||||||
const Data::ForumTopic *forumTopic)
|
const Data::ForumTopic *forumTopic)
|
||||||
: Window::SectionWidget(parent, controller, history->peer)
|
: Window::SectionWidget(parent, controller, history->peer)
|
||||||
, WindowListDelegate(controller)
|
, WindowListDelegate(controller)
|
||||||
, _show(controller->uiShow())
|
, _show(controller->uiShow())
|
||||||
, _history(history)
|
, _history(history)
|
||||||
, _forumTopic(forumTopic)
|
, _forumTopic(forumTopic)
|
||||||
, _scroll(
|
, _scroll(
|
||||||
this,
|
this,
|
||||||
controller->chatStyle()->value(lifetime(), st::historyScroll),
|
controller->chatStyle()->value(lifetime(), st::historyScroll),
|
||||||
false)
|
false)
|
||||||
, _topBar(this, controller)
|
, _topBar(this, controller)
|
||||||
, _topBarShadow(this)
|
, _topBarShadow(this)
|
||||||
, _composeControls(std::make_unique<ComposeControls>(
|
, _composeControls(std::make_unique<ComposeControls>(
|
||||||
this,
|
this,
|
||||||
ComposeControlsDescriptor{
|
ComposeControlsDescriptor{
|
||||||
.show = controller->uiShow(),
|
.show = controller->uiShow(),
|
||||||
.unavailableEmojiPasted = [=](not_null<DocumentData*> emoji) {
|
.unavailableEmojiPasted = [=](not_null<DocumentData*> emoji) {
|
||||||
listShowPremiumToast(emoji);
|
listShowPremiumToast(emoji);
|
||||||
},
|
},
|
||||||
.mode = ComposeControls::Mode::Scheduled,
|
.mode = ComposeControls::Mode::Scheduled,
|
||||||
.sendMenuDetails = [] { return SendMenu::Details(); },
|
.sendMenuDetails = [] { return SendMenu::Details(); },
|
||||||
.regularWindow = controller,
|
.regularWindow = controller,
|
||||||
.stickerOrEmojiChosen = controller->stickerOrEmojiChosen(),
|
.stickerOrEmojiChosen = controller->stickerOrEmojiChosen(),
|
||||||
}))
|
}))
|
||||||
, _cornerButtons(
|
, _cornerButtons(
|
||||||
_scroll.data(),
|
_scroll.data(),
|
||||||
controller->chatStyle(),
|
controller->chatStyle(),
|
||||||
static_cast<HistoryView::CornerButtonsDelegate*>(this)) {
|
static_cast<HistoryView::CornerButtonsDelegate*>(this)) {
|
||||||
|
@ -209,83 +209,83 @@ ScheduledWidget::~ScheduledWidget() = default;
|
||||||
void ScheduledWidget::setupComposeControls() {
|
void ScheduledWidget::setupComposeControls() {
|
||||||
auto writeRestriction = _forumTopic
|
auto writeRestriction = _forumTopic
|
||||||
? [&] {
|
? [&] {
|
||||||
auto topicWriteRestrictions = rpl::single(
|
auto topicWriteRestrictions = rpl::single(
|
||||||
) | rpl::then(session().changes().topicUpdates(
|
) | rpl::then(session().changes().topicUpdates(
|
||||||
Data::TopicUpdate::Flag::Closed
|
Data::TopicUpdate::Flag::Closed
|
||||||
) | rpl::filter([=](const Data::TopicUpdate &update) {
|
) | rpl::filter([=](const Data::TopicUpdate &update) {
|
||||||
return (update.topic->history() == _history)
|
return (update.topic->history() == _history)
|
||||||
&& (update.topic->rootId() == _forumTopic->rootId());
|
&& (update.topic->rootId() == _forumTopic->rootId());
|
||||||
}) | rpl::to_empty) | rpl::map([=] {
|
}) | rpl::to_empty) | rpl::map([=] {
|
||||||
return (!_forumTopic
|
return (!_forumTopic
|
||||||
|| _forumTopic->canToggleClosed()
|
|| _forumTopic->canToggleClosed()
|
||||||
|| !_forumTopic->closed())
|
|| !_forumTopic->closed())
|
||||||
? std::optional<QString>()
|
? std::optional<QString>()
|
||||||
: tr::lng_forum_topic_closed(tr::now);
|
: tr::lng_forum_topic_closed(tr::now);
|
||||||
});
|
});
|
||||||
return rpl::combine(
|
return rpl::combine(
|
||||||
session().changes().peerFlagsValue(
|
session().changes().peerFlagsValue(
|
||||||
_history->peer,
|
_history->peer,
|
||||||
Data::PeerUpdate::Flag::Rights),
|
Data::PeerUpdate::Flag::Rights),
|
||||||
Data::CanSendAnythingValue(_history->peer),
|
Data::CanSendAnythingValue(_history->peer),
|
||||||
std::move(topicWriteRestrictions)
|
std::move(topicWriteRestrictions)
|
||||||
) | rpl::map([=](
|
) | rpl::map([=](
|
||||||
auto,
|
auto,
|
||||||
auto,
|
auto,
|
||||||
std::optional<QString> topicRestriction) {
|
std::optional<QString> topicRestriction) {
|
||||||
const auto allWithoutPolls = Data::AllSendRestrictions()
|
const auto allWithoutPolls = Data::AllSendRestrictions()
|
||||||
& ~ChatRestriction::SendPolls;
|
& ~ChatRestriction::SendPolls;
|
||||||
const auto canSendAnything = Data::CanSendAnyOf(
|
const auto canSendAnything = Data::CanSendAnyOf(
|
||||||
_forumTopic,
|
_forumTopic,
|
||||||
allWithoutPolls);
|
allWithoutPolls);
|
||||||
const auto restriction = Data::RestrictionError(
|
const auto restriction = Data::RestrictionError(
|
||||||
_history->peer,
|
_history->peer,
|
||||||
ChatRestriction::SendOther);
|
ChatRestriction::SendOther);
|
||||||
auto text = !canSendAnything
|
auto text = !canSendAnything
|
||||||
? (restriction
|
? (restriction
|
||||||
? restriction
|
? restriction
|
||||||
: topicRestriction
|
|
||||||
? std::move(topicRestriction)
|
|
||||||
: tr::lng_group_not_accessible(tr::now))
|
|
||||||
: topicRestriction
|
: topicRestriction
|
||||||
? std::move(topicRestriction)
|
? std::move(topicRestriction)
|
||||||
: std::optional<QString>();
|
: tr::lng_group_not_accessible(tr::now))
|
||||||
return text ? Controls::WriteRestriction{
|
: topicRestriction
|
||||||
.text = std::move(*text),
|
? std::move(topicRestriction)
|
||||||
.type = Controls::WriteRestrictionType::Rights,
|
: std::optional<QString>();
|
||||||
} : Controls::WriteRestriction();
|
return text ? Controls::WriteRestriction{
|
||||||
}) | rpl::type_erased();
|
.text = std::move(*text),
|
||||||
}()
|
.type = Controls::WriteRestrictionType::Rights,
|
||||||
|
} : Controls::WriteRestriction();
|
||||||
|
}) | rpl::type_erased();
|
||||||
|
}()
|
||||||
: [&] {
|
: [&] {
|
||||||
return rpl::combine(
|
return rpl::combine(
|
||||||
session().changes().peerFlagsValue(
|
session().changes().peerFlagsValue(
|
||||||
_history->peer,
|
_history->peer,
|
||||||
Data::PeerUpdate::Flag::Rights),
|
Data::PeerUpdate::Flag::Rights),
|
||||||
Data::CanSendAnythingValue(_history->peer)
|
Data::CanSendAnythingValue(_history->peer)
|
||||||
) | rpl::map([=] {
|
) | rpl::map([=] {
|
||||||
const auto allWithoutPolls = Data::AllSendRestrictions()
|
const auto allWithoutPolls = Data::AllSendRestrictions()
|
||||||
& ~ChatRestriction::SendPolls;
|
& ~ChatRestriction::SendPolls;
|
||||||
const auto canSendAnything = Data::CanSendAnyOf(
|
const auto canSendAnything = Data::CanSendAnyOf(
|
||||||
_history->peer,
|
_history->peer,
|
||||||
allWithoutPolls,
|
allWithoutPolls,
|
||||||
false);
|
false);
|
||||||
const auto restriction = Data::RestrictionError(
|
const auto restriction = Data::RestrictionError(
|
||||||
_history->peer,
|
_history->peer,
|
||||||
ChatRestriction::SendOther);
|
ChatRestriction::SendOther);
|
||||||
auto text = !canSendAnything
|
auto text = !canSendAnything
|
||||||
? (restriction
|
? (restriction
|
||||||
? restriction
|
? restriction
|
||||||
: tr::lng_group_not_accessible(tr::now))
|
: tr::lng_group_not_accessible(tr::now))
|
||||||
: std::optional<QString>();
|
: std::optional<QString>();
|
||||||
return text ? Controls::WriteRestriction{
|
return text ? Controls::WriteRestriction{
|
||||||
.text = std::move(*text),
|
.text = std::move(*text),
|
||||||
.type = Controls::WriteRestrictionType::Rights,
|
.type = Controls::WriteRestrictionType::Rights,
|
||||||
} : Controls::WriteRestriction();
|
} : Controls::WriteRestriction();
|
||||||
}) | rpl::type_erased();
|
}) | rpl::type_erased();
|
||||||
}();
|
}();
|
||||||
_composeControls->setHistory({
|
_composeControls->setHistory({
|
||||||
.history = _history.get(),
|
.history = _history.get(),
|
||||||
.writeRestriction = std::move(writeRestriction),
|
.writeRestriction = std::move(writeRestriction),
|
||||||
});
|
});
|
||||||
|
|
||||||
_composeControls->height(
|
_composeControls->height(
|
||||||
) | rpl::start_with_next([=] {
|
) | rpl::start_with_next([=] {
|
||||||
|
@ -308,7 +308,7 @@ void ScheduledWidget::setupComposeControls() {
|
||||||
|
|
||||||
_composeControls->sendVoiceRequests(
|
_composeControls->sendVoiceRequests(
|
||||||
) | rpl::start_with_next([=](ComposeControls::VoiceToSend &&data) {
|
) | rpl::start_with_next([=](ComposeControls::VoiceToSend &&data) {
|
||||||
sendVoice(data.bytes, data.waveform, data.duration);
|
sendVoice(std::move(data));
|
||||||
}, lifetime());
|
}, lifetime());
|
||||||
|
|
||||||
_composeControls->sendCommandRequests(
|
_composeControls->sendCommandRequests(
|
||||||
|
@ -393,8 +393,8 @@ void ScheduledWidget::setupComposeControls() {
|
||||||
}, lifetime());
|
}, lifetime());
|
||||||
|
|
||||||
_composeControls->setMimeDataHook([=](
|
_composeControls->setMimeDataHook([=](
|
||||||
not_null<const QMimeData*> data,
|
not_null<const QMimeData*> data,
|
||||||
Ui::InputField::MimeAction action) {
|
Ui::InputField::MimeAction action) {
|
||||||
if (action == Ui::InputField::MimeAction::Check) {
|
if (action == Ui::InputField::MimeAction::Check) {
|
||||||
return Core::CanSendFiles(data);
|
return Core::CanSendFiles(data);
|
||||||
} else if (action == Ui::InputField::MimeAction::Insert) {
|
} else if (action == Ui::InputField::MimeAction::Insert) {
|
||||||
|
@ -426,7 +426,7 @@ void ScheduledWidget::chooseAttach() {
|
||||||
|
|
||||||
const auto filter = FileDialog::AllOrImagesFilter();
|
const auto filter = FileDialog::AllOrImagesFilter();
|
||||||
FileDialog::GetOpenPaths(this, tr::lng_choose_files(tr::now), filter, crl::guard(this, [=](
|
FileDialog::GetOpenPaths(this, tr::lng_choose_files(tr::now), filter, crl::guard(this, [=](
|
||||||
FileDialog::OpenResult &&result) {
|
FileDialog::OpenResult &&result) {
|
||||||
if (result.paths.isEmpty() && result.remoteContent.isEmpty()) {
|
if (result.paths.isEmpty() && result.remoteContent.isEmpty()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -434,7 +434,7 @@ void ScheduledWidget::chooseAttach() {
|
||||||
if (!result.remoteContent.isEmpty()) {
|
if (!result.remoteContent.isEmpty()) {
|
||||||
auto read = Images::Read({
|
auto read = Images::Read({
|
||||||
.content = result.remoteContent,
|
.content = result.remoteContent,
|
||||||
});
|
});
|
||||||
if (!read.image.isNull() && !read.animated) {
|
if (!read.image.isNull() && !read.animated) {
|
||||||
confirmSendingFiles(
|
confirmSendingFiles(
|
||||||
std::move(read.image),
|
std::move(read.image),
|
||||||
|
@ -454,9 +454,9 @@ void ScheduledWidget::chooseAttach() {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ScheduledWidget::confirmSendingFiles(
|
bool ScheduledWidget::confirmSendingFiles(
|
||||||
not_null<const QMimeData*> data,
|
not_null<const QMimeData*> data,
|
||||||
std::optional<bool> overrideSendImagesAsPhotos,
|
std::optional<bool> overrideSendImagesAsPhotos,
|
||||||
const QString &insertTextOnCancel) {
|
const QString &insertTextOnCancel) {
|
||||||
const auto hasImage = data->hasImage();
|
const auto hasImage = data->hasImage();
|
||||||
const auto premium = controller()->session().user()->isPremium();
|
const auto premium = controller()->session().user()->isPremium();
|
||||||
|
|
||||||
|
@ -488,8 +488,8 @@ bool ScheduledWidget::confirmSendingFiles(
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ScheduledWidget::confirmSendingFiles(
|
bool ScheduledWidget::confirmSendingFiles(
|
||||||
Ui::PreparedList &&list,
|
Ui::PreparedList &&list,
|
||||||
const QString &insertTextOnCancel) {
|
const QString &insertTextOnCancel) {
|
||||||
if (_composeControls->confirmMediaEdit(list)) {
|
if (_composeControls->confirmMediaEdit(list)) {
|
||||||
return true;
|
return true;
|
||||||
} else if (showSendingFilesError(list)) {
|
} else if (showSendingFilesError(list)) {
|
||||||
|
@ -507,11 +507,11 @@ bool ScheduledWidget::confirmSendingFiles(
|
||||||
SendMenu::Details());
|
SendMenu::Details());
|
||||||
|
|
||||||
box->setConfirmedCallback(crl::guard(this, [=](
|
box->setConfirmedCallback(crl::guard(this, [=](
|
||||||
Ui::PreparedList &&list,
|
Ui::PreparedList &&list,
|
||||||
Ui::SendFilesWay way,
|
Ui::SendFilesWay way,
|
||||||
TextWithTags &&caption,
|
TextWithTags &&caption,
|
||||||
Api::SendOptions options,
|
Api::SendOptions options,
|
||||||
bool ctrlShiftEnter) {
|
bool ctrlShiftEnter) {
|
||||||
sendingFilesConfirmed(
|
sendingFilesConfirmed(
|
||||||
std::move(list),
|
std::move(list),
|
||||||
way,
|
way,
|
||||||
|
@ -529,11 +529,11 @@ bool ScheduledWidget::confirmSendingFiles(
|
||||||
}
|
}
|
||||||
|
|
||||||
void ScheduledWidget::sendingFilesConfirmed(
|
void ScheduledWidget::sendingFilesConfirmed(
|
||||||
Ui::PreparedList &&list,
|
Ui::PreparedList &&list,
|
||||||
Ui::SendFilesWay way,
|
Ui::SendFilesWay way,
|
||||||
TextWithTags &&caption,
|
TextWithTags &&caption,
|
||||||
Api::SendOptions options,
|
Api::SendOptions options,
|
||||||
bool ctrlShiftEnter) {
|
bool ctrlShiftEnter) {
|
||||||
Expects(list.filesToProcess.empty());
|
Expects(list.filesToProcess.empty());
|
||||||
|
|
||||||
if (showSendingFilesError(list, way.sendImagesAsPhotos())) {
|
if (showSendingFilesError(list, way.sendImagesAsPhotos())) {
|
||||||
|
@ -565,10 +565,10 @@ void ScheduledWidget::sendingFilesConfirmed(
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ScheduledWidget::confirmSendingFiles(
|
bool ScheduledWidget::confirmSendingFiles(
|
||||||
QImage &&image,
|
QImage &&image,
|
||||||
QByteArray &&content,
|
QByteArray &&content,
|
||||||
std::optional<bool> overrideSendImagesAsPhotos,
|
std::optional<bool> overrideSendImagesAsPhotos,
|
||||||
const QString &insertTextOnCancel) {
|
const QString &insertTextOnCancel) {
|
||||||
if (image.isNull()) {
|
if (image.isNull()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -604,8 +604,8 @@ void ScheduledWidget::checkReplyReturns() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ScheduledWidget::uploadFile(
|
void ScheduledWidget::uploadFile(
|
||||||
const QByteArray &fileContent,
|
const QByteArray &fileContent,
|
||||||
SendMediaType type) {
|
SendMediaType type) {
|
||||||
const auto callback = [=](Api::SendOptions options) {
|
const auto callback = [=](Api::SendOptions options) {
|
||||||
session().api().sendFile(
|
session().api().sendFile(
|
||||||
fileContent,
|
fileContent,
|
||||||
|
@ -617,13 +617,13 @@ void ScheduledWidget::uploadFile(
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ScheduledWidget::showSendingFilesError(
|
bool ScheduledWidget::showSendingFilesError(
|
||||||
const Ui::PreparedList &list) const {
|
const Ui::PreparedList &list) const {
|
||||||
return showSendingFilesError(list, std::nullopt);
|
return showSendingFilesError(list, std::nullopt);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ScheduledWidget::showSendingFilesError(
|
bool ScheduledWidget::showSendingFilesError(
|
||||||
const Ui::PreparedList &list,
|
const Ui::PreparedList &list,
|
||||||
std::optional<bool> compress) const {
|
std::optional<bool> compress) const {
|
||||||
const auto text = [&] {
|
const auto text = [&] {
|
||||||
using Error = Ui::PreparedList::Error;
|
using Error = Ui::PreparedList::Error;
|
||||||
const auto peer = _history->peer;
|
const auto peer = _history->peer;
|
||||||
|
@ -656,7 +656,7 @@ bool ScheduledWidget::showSendingFilesError(
|
||||||
}
|
}
|
||||||
|
|
||||||
Api::SendAction ScheduledWidget::prepareSendAction(
|
Api::SendAction ScheduledWidget::prepareSendAction(
|
||||||
Api::SendOptions options) const {
|
Api::SendOptions options) const {
|
||||||
auto result = Api::SendAction(_history, options);
|
auto result = Api::SendAction(_history, options);
|
||||||
result.options.sendAs = _composeControls->sendAsPeer();
|
result.options.sendAs = _composeControls->sendAsPeer();
|
||||||
if (_forumTopic) {
|
if (_forumTopic) {
|
||||||
|
@ -716,26 +716,22 @@ void ScheduledWidget::send(Api::SendOptions options) {
|
||||||
_composeControls->focus();
|
_composeControls->focus();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ScheduledWidget::sendVoice(
|
void ScheduledWidget::sendVoice(const Controls::VoiceToSend &data) {
|
||||||
QByteArray bytes,
|
|
||||||
VoiceWaveform waveform,
|
|
||||||
crl::time duration) {
|
|
||||||
const auto callback = [=](Api::SendOptions options) {
|
const auto callback = [=](Api::SendOptions options) {
|
||||||
sendVoice(bytes, waveform, duration, options);
|
sendVoice(base::duplicate(data), options);
|
||||||
};
|
};
|
||||||
controller()->show(
|
controller()->show(
|
||||||
PrepareScheduleBox(this, _show, sendMenuDetails(), callback));
|
PrepareScheduleBox(this, _show, sendMenuDetails(), callback));
|
||||||
}
|
}
|
||||||
|
|
||||||
void ScheduledWidget::sendVoice(
|
void ScheduledWidget::sendVoice(
|
||||||
QByteArray bytes,
|
const Controls::VoiceToSend &data,
|
||||||
VoiceWaveform waveform,
|
|
||||||
crl::time duration,
|
|
||||||
Api::SendOptions options) {
|
Api::SendOptions options) {
|
||||||
session().api().sendVoiceMessage(
|
session().api().sendVoiceMessage(
|
||||||
bytes,
|
data.bytes,
|
||||||
waveform,
|
data.waveform,
|
||||||
duration,
|
data.duration,
|
||||||
|
data.video,
|
||||||
prepareSendAction(options));
|
prepareSendAction(options));
|
||||||
_composeControls->clearListenState();
|
_composeControls->clearListenState();
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,6 +47,10 @@ namespace InlineBots {
|
||||||
class Result;
|
class Result;
|
||||||
} // namespace InlineBots
|
} // namespace InlineBots
|
||||||
|
|
||||||
|
namespace HistoryView::Controls {
|
||||||
|
struct VoiceToSend;
|
||||||
|
} // namespace HistoryView::Controls
|
||||||
|
|
||||||
namespace HistoryView {
|
namespace HistoryView {
|
||||||
|
|
||||||
class Element;
|
class Element;
|
||||||
|
@ -207,14 +211,9 @@ private:
|
||||||
Api::SendOptions options) const;
|
Api::SendOptions options) const;
|
||||||
void send();
|
void send();
|
||||||
void send(Api::SendOptions options);
|
void send(Api::SendOptions options);
|
||||||
|
void sendVoice(const Controls::VoiceToSend &data);
|
||||||
void sendVoice(
|
void sendVoice(
|
||||||
QByteArray bytes,
|
const Controls::VoiceToSend &data,
|
||||||
VoiceWaveform waveform,
|
|
||||||
crl::time duration);
|
|
||||||
void sendVoice(
|
|
||||||
QByteArray bytes,
|
|
||||||
VoiceWaveform waveform,
|
|
||||||
crl::time duration,
|
|
||||||
Api::SendOptions options);
|
Api::SendOptions options);
|
||||||
void edit(
|
void edit(
|
||||||
not_null<HistoryItem*> item,
|
not_null<HistoryItem*> item,
|
||||||
|
|
|
@ -88,13 +88,15 @@ public:
|
||||||
void start(
|
void start(
|
||||||
Webrtc::DeviceResolvedId id,
|
Webrtc::DeviceResolvedId id,
|
||||||
Fn<void(Update)> updated,
|
Fn<void(Update)> updated,
|
||||||
Fn<void()> error);
|
Fn<void()> error,
|
||||||
|
Fn<void(Chunk)> externalProcessing);
|
||||||
void stop(Fn<void(Result&&)> callback = nullptr);
|
void stop(Fn<void(Result&&)> callback = nullptr);
|
||||||
void pause(bool value, Fn<void(Result&&)> callback);
|
void pause(bool value, Fn<void(Result&&)> callback);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void process();
|
void process();
|
||||||
|
|
||||||
|
bool initializeFFmpeg();
|
||||||
[[nodiscard]] bool processFrame(int32 offset, int32 framesize);
|
[[nodiscard]] bool processFrame(int32 offset, int32 framesize);
|
||||||
void fail();
|
void fail();
|
||||||
|
|
||||||
|
@ -104,6 +106,7 @@ private:
|
||||||
// Returns number of packets written or -1 on error
|
// Returns number of packets written or -1 on error
|
||||||
[[nodiscard]] int writePackets();
|
[[nodiscard]] int writePackets();
|
||||||
|
|
||||||
|
Fn<void(Chunk)> _externalProcessing;
|
||||||
Fn<void(Update)> _updated;
|
Fn<void(Update)> _updated;
|
||||||
Fn<void()> _error;
|
Fn<void()> _error;
|
||||||
|
|
||||||
|
@ -131,7 +134,7 @@ Instance::Instance() : _inner(std::make_unique<Inner>(&_thread)) {
|
||||||
_thread.start();
|
_thread.start();
|
||||||
}
|
}
|
||||||
|
|
||||||
void Instance::start() {
|
void Instance::start(Fn<void(Chunk)> externalProcessing) {
|
||||||
_updates.fire_done();
|
_updates.fire_done();
|
||||||
const auto id = Audio::Current().captureDeviceId();
|
const auto id = Audio::Current().captureDeviceId();
|
||||||
InvokeQueued(_inner.get(), [=] {
|
InvokeQueued(_inner.get(), [=] {
|
||||||
|
@ -143,7 +146,7 @@ void Instance::start() {
|
||||||
crl::on_main(this, [=] {
|
crl::on_main(this, [=] {
|
||||||
_updates.fire_error({});
|
_updates.fire_error({});
|
||||||
});
|
});
|
||||||
});
|
}, externalProcessing);
|
||||||
crl::on_main(this, [=] {
|
crl::on_main(this, [=] {
|
||||||
_started = true;
|
_started = true;
|
||||||
});
|
});
|
||||||
|
@ -304,7 +307,9 @@ void Instance::Inner::fail() {
|
||||||
void Instance::Inner::start(
|
void Instance::Inner::start(
|
||||||
Webrtc::DeviceResolvedId id,
|
Webrtc::DeviceResolvedId id,
|
||||||
Fn<void(Update)> updated,
|
Fn<void(Update)> updated,
|
||||||
Fn<void()> error) {
|
Fn<void()> error,
|
||||||
|
Fn<void(Chunk)> externalProcessing) {
|
||||||
|
_externalProcessing = std::move(externalProcessing);
|
||||||
_updated = std::move(updated);
|
_updated = std::move(updated);
|
||||||
_error = std::move(error);
|
_error = std::move(error);
|
||||||
if (_paused) {
|
if (_paused) {
|
||||||
|
@ -329,8 +334,19 @@ void Instance::Inner::start(
|
||||||
d->device = nullptr;
|
d->device = nullptr;
|
||||||
fail();
|
fail();
|
||||||
return;
|
return;
|
||||||
|
} else if (!_externalProcessing) {
|
||||||
|
if (!initializeFFmpeg()) {
|
||||||
|
fail();
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
_timer.callEach(50);
|
||||||
|
_captured.clear();
|
||||||
|
_captured.reserve(kCaptureBufferSlice);
|
||||||
|
DEBUG_LOG(("Audio Capture: started!"));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Instance::Inner::initializeFFmpeg() {
|
||||||
// Create encoding context
|
// Create encoding context
|
||||||
|
|
||||||
d->ioBuffer = (uchar*)av_malloc(FFmpeg::kAVBlockSize);
|
d->ioBuffer = (uchar*)av_malloc(FFmpeg::kAVBlockSize);
|
||||||
|
@ -347,14 +363,12 @@ void Instance::Inner::start(
|
||||||
}
|
}
|
||||||
if (!fmt) {
|
if (!fmt) {
|
||||||
LOG(("Audio Error: Unable to find opus AVOutputFormat for capture"));
|
LOG(("Audio Error: Unable to find opus AVOutputFormat for capture"));
|
||||||
fail();
|
return false;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((res = avformat_alloc_output_context2(&d->fmtContext, (AVOutputFormat*)fmt, 0, 0)) < 0) {
|
if ((res = avformat_alloc_output_context2(&d->fmtContext, (AVOutputFormat*)fmt, 0, 0)) < 0) {
|
||||||
LOG(("Audio Error: Unable to avformat_alloc_output_context2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to avformat_alloc_output_context2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
fail();
|
return false;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
d->fmtContext->pb = d->ioContext;
|
d->fmtContext->pb = d->ioContext;
|
||||||
d->fmtContext->flags |= AVFMT_FLAG_CUSTOM_IO;
|
d->fmtContext->flags |= AVFMT_FLAG_CUSTOM_IO;
|
||||||
|
@ -364,21 +378,18 @@ void Instance::Inner::start(
|
||||||
d->codec = avcodec_find_encoder(fmt->audio_codec);
|
d->codec = avcodec_find_encoder(fmt->audio_codec);
|
||||||
if (!d->codec) {
|
if (!d->codec) {
|
||||||
LOG(("Audio Error: Unable to avcodec_find_encoder for capture"));
|
LOG(("Audio Error: Unable to avcodec_find_encoder for capture"));
|
||||||
fail();
|
return false;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
d->stream = avformat_new_stream(d->fmtContext, d->codec);
|
d->stream = avformat_new_stream(d->fmtContext, d->codec);
|
||||||
if (!d->stream) {
|
if (!d->stream) {
|
||||||
LOG(("Audio Error: Unable to avformat_new_stream for capture"));
|
LOG(("Audio Error: Unable to avformat_new_stream for capture"));
|
||||||
fail();
|
return false;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
d->stream->id = d->fmtContext->nb_streams - 1;
|
d->stream->id = d->fmtContext->nb_streams - 1;
|
||||||
d->codecContext = avcodec_alloc_context3(d->codec);
|
d->codecContext = avcodec_alloc_context3(d->codec);
|
||||||
if (!d->codecContext) {
|
if (!d->codecContext) {
|
||||||
LOG(("Audio Error: Unable to avcodec_alloc_context3 for capture"));
|
LOG(("Audio Error: Unable to avcodec_alloc_context3 for capture"));
|
||||||
fail();
|
return false;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
av_opt_set_int(d->codecContext, "refcounted_frames", 1, 0);
|
av_opt_set_int(d->codecContext, "refcounted_frames", 1, 0);
|
||||||
|
@ -401,8 +412,7 @@ void Instance::Inner::start(
|
||||||
// Open audio stream
|
// Open audio stream
|
||||||
if ((res = avcodec_open2(d->codecContext, d->codec, nullptr)) < 0) {
|
if ((res = avcodec_open2(d->codecContext, d->codec, nullptr)) < 0) {
|
||||||
LOG(("Audio Error: Unable to avcodec_open2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to avcodec_open2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
fail();
|
return false;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Alloc source samples
|
// Alloc source samples
|
||||||
|
@ -443,39 +453,27 @@ void Instance::Inner::start(
|
||||||
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
if (res < 0 || !d->swrContext) {
|
if (res < 0 || !d->swrContext) {
|
||||||
LOG(("Audio Error: Unable to swr_alloc_set_opts2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to swr_alloc_set_opts2 for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
fail();
|
return false;
|
||||||
return;
|
|
||||||
} else if ((res = swr_init(d->swrContext)) < 0) {
|
} else if ((res = swr_init(d->swrContext)) < 0) {
|
||||||
LOG(("Audio Error: Unable to swr_init for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to swr_init for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
fail();
|
return false;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d->maxDstSamples = d->srcSamples;
|
d->maxDstSamples = d->srcSamples;
|
||||||
if ((res = av_samples_alloc_array_and_samples(&d->dstSamplesData, 0, d->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0)) < 0) {
|
if ((res = av_samples_alloc_array_and_samples(&d->dstSamplesData, 0, d->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0)) < 0) {
|
||||||
LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to av_samples_alloc_array_and_samples for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
fail();
|
return false;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
d->dstSamplesSize = av_samples_get_buffer_size(0, d->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0);
|
d->dstSamplesSize = av_samples_get_buffer_size(0, d->channels, d->maxDstSamples, d->codecContext->sample_fmt, 0);
|
||||||
|
|
||||||
if ((res = avcodec_parameters_from_context(d->stream->codecpar, d->codecContext)) < 0) {
|
if ((res = avcodec_parameters_from_context(d->stream->codecpar, d->codecContext)) < 0) {
|
||||||
LOG(("Audio Error: Unable to avcodec_parameters_from_context for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to avcodec_parameters_from_context for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
fail();
|
return false;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write file header
|
// Write file header
|
||||||
if ((res = avformat_write_header(d->fmtContext, 0)) < 0) {
|
if ((res = avformat_write_header(d->fmtContext, 0)) < 0) {
|
||||||
LOG(("Audio Error: Unable to avformat_write_header for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
LOG(("Audio Error: Unable to avformat_write_header for capture, error %1, %2").arg(res).arg(av_make_error_string(err, sizeof(err), res)));
|
||||||
fail();
|
return false;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
_timer.callEach(50);
|
|
||||||
_captured.clear();
|
|
||||||
_captured.reserve(kCaptureBufferSlice);
|
|
||||||
DEBUG_LOG(("Audio Capture: started!"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Instance::Inner::pause(bool value, Fn<void(Result&&)> callback) {
|
void Instance::Inner::pause(bool value, Fn<void(Result&&)> callback) {
|
||||||
|
@ -559,7 +557,7 @@ void Instance::Inner::stop(Fn<void(Result&&)> callback) {
|
||||||
_captured = QByteArray();
|
_captured = QByteArray();
|
||||||
|
|
||||||
// Finish stream
|
// Finish stream
|
||||||
if (needResult && hadDevice) {
|
if (needResult && hadDevice && d->fmtContext) {
|
||||||
av_write_trailer(d->fmtContext);
|
av_write_trailer(d->fmtContext);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -658,6 +656,13 @@ void Instance::Inner::process() {
|
||||||
if (ErrorHappened(d->device)) {
|
if (ErrorHappened(d->device)) {
|
||||||
fail();
|
fail();
|
||||||
return;
|
return;
|
||||||
|
} else if (_externalProcessing) {
|
||||||
|
_externalProcessing({
|
||||||
|
.finished = crl::now(),
|
||||||
|
.samples = base::take(_captured),
|
||||||
|
.frequency = kCaptureFrequency,
|
||||||
|
});
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Count new recording level and update view
|
// Count new recording level and update view
|
||||||
|
|
|
@ -7,10 +7,9 @@ https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
*/
|
*/
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <QtCore/QThread>
|
||||||
#include <QtCore/QTimer>
|
#include <QtCore/QTimer>
|
||||||
|
|
||||||
struct AVFrame;
|
|
||||||
|
|
||||||
namespace Media {
|
namespace Media {
|
||||||
namespace Capture {
|
namespace Capture {
|
||||||
|
|
||||||
|
@ -19,6 +18,12 @@ struct Update {
|
||||||
ushort level = 0;
|
ushort level = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct Chunk {
|
||||||
|
crl::time finished = 0;
|
||||||
|
QByteArray samples;
|
||||||
|
int frequency = 0;
|
||||||
|
};
|
||||||
|
|
||||||
struct Result;
|
struct Result;
|
||||||
|
|
||||||
void Start();
|
void Start();
|
||||||
|
@ -45,7 +50,7 @@ public:
|
||||||
return _started.changes();
|
return _started.changes();
|
||||||
}
|
}
|
||||||
|
|
||||||
void start();
|
void start(Fn<void(Chunk)> externalProcessing = nullptr);
|
||||||
void stop(Fn<void(Result&&)> callback = nullptr);
|
void stop(Fn<void(Result&&)> callback = nullptr);
|
||||||
void pause(bool value, Fn<void(Result&&)> callback);
|
void pause(bool value, Fn<void(Result&&)> callback);
|
||||||
|
|
||||||
|
|
|
@ -244,6 +244,7 @@ void ReplyArea::sendVoice(VoiceToSend &&data) {
|
||||||
data.bytes,
|
data.bytes,
|
||||||
data.waveform,
|
data.waveform,
|
||||||
data.duration,
|
data.duration,
|
||||||
|
data.video,
|
||||||
std::move(action));
|
std::move(action));
|
||||||
|
|
||||||
_controls->clearListenState();
|
_controls->clearListenState();
|
||||||
|
|
|
@ -1200,6 +1200,7 @@ void ShortcutMessages::sendVoice(ComposeControls::VoiceToSend &&data) {
|
||||||
data.bytes,
|
data.bytes,
|
||||||
data.waveform,
|
data.waveform,
|
||||||
data.duration,
|
data.duration,
|
||||||
|
data.video,
|
||||||
std::move(action));
|
std::move(action));
|
||||||
|
|
||||||
_composeControls->cancelReplyMessage();
|
_composeControls->cancelReplyMessage();
|
||||||
|
|
|
@ -498,6 +498,7 @@ FileLoadTask::FileLoadTask(
|
||||||
const QByteArray &voice,
|
const QByteArray &voice,
|
||||||
crl::time duration,
|
crl::time duration,
|
||||||
const VoiceWaveform &waveform,
|
const VoiceWaveform &waveform,
|
||||||
|
bool video,
|
||||||
const FileLoadTo &to,
|
const FileLoadTo &to,
|
||||||
const TextWithTags &caption)
|
const TextWithTags &caption)
|
||||||
: _id(base::RandomValue<uint64>())
|
: _id(base::RandomValue<uint64>())
|
||||||
|
@ -507,7 +508,7 @@ FileLoadTask::FileLoadTask(
|
||||||
, _content(voice)
|
, _content(voice)
|
||||||
, _duration(duration)
|
, _duration(duration)
|
||||||
, _waveform(waveform)
|
, _waveform(waveform)
|
||||||
, _type(SendMediaType::Audio)
|
, _type(video ? SendMediaType::Round : SendMediaType::Audio)
|
||||||
, _caption(caption) {
|
, _caption(caption) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -696,6 +697,7 @@ void FileLoadTask::process(Args &&args) {
|
||||||
auto isSong = false;
|
auto isSong = false;
|
||||||
auto isVideo = false;
|
auto isVideo = false;
|
||||||
auto isVoice = (_type == SendMediaType::Audio);
|
auto isVoice = (_type == SendMediaType::Audio);
|
||||||
|
auto isRound = (_type == SendMediaType::Round);
|
||||||
auto isSticker = false;
|
auto isSticker = false;
|
||||||
|
|
||||||
auto fullimage = QImage();
|
auto fullimage = QImage();
|
||||||
|
@ -711,7 +713,7 @@ void FileLoadTask::process(Args &&args) {
|
||||||
// Voice sending is supported only from memory for now.
|
// Voice sending is supported only from memory for now.
|
||||||
// Because for voice we force mime type and don't read MediaInformation.
|
// Because for voice we force mime type and don't read MediaInformation.
|
||||||
// For a real file we always read mime type and read MediaInformation.
|
// For a real file we always read mime type and read MediaInformation.
|
||||||
Assert(!isVoice);
|
Assert(!isVoice && !isRound);
|
||||||
|
|
||||||
filesize = info.size();
|
filesize = info.size();
|
||||||
filename = info.fileName();
|
filename = info.fileName();
|
||||||
|
@ -736,6 +738,9 @@ void FileLoadTask::process(Args &&args) {
|
||||||
if (isVoice) {
|
if (isVoice) {
|
||||||
filename = filedialogDefaultName(u"audio"_q, u".ogg"_q, QString(), true);
|
filename = filedialogDefaultName(u"audio"_q, u".ogg"_q, QString(), true);
|
||||||
filemime = "audio/ogg";
|
filemime = "audio/ogg";
|
||||||
|
} else if (isRound) {
|
||||||
|
filename = filedialogDefaultName(u"round"_q, u".mp4"_q, QString(), true);
|
||||||
|
filemime = "video/mp4";
|
||||||
} else {
|
} else {
|
||||||
if (_information) {
|
if (_information) {
|
||||||
if (auto image = std::get_if<Ui::PreparedFileInformation::Image>(
|
if (auto image = std::get_if<Ui::PreparedFileInformation::Image>(
|
||||||
|
@ -815,7 +820,41 @@ void FileLoadTask::process(Args &&args) {
|
||||||
auto photo = MTP_photoEmpty(MTP_long(0));
|
auto photo = MTP_photoEmpty(MTP_long(0));
|
||||||
auto document = MTP_documentEmpty(MTP_long(0));
|
auto document = MTP_documentEmpty(MTP_long(0));
|
||||||
|
|
||||||
if (!isVoice) {
|
if (isRound) {
|
||||||
|
_information = readMediaInformation(u"video/mp4"_q);
|
||||||
|
if (auto video = std::get_if<Ui::PreparedFileInformation::Video>(
|
||||||
|
&_information->media)) {
|
||||||
|
isVideo = true;
|
||||||
|
auto coverWidth = video->thumbnail.width();
|
||||||
|
auto coverHeight = video->thumbnail.height();
|
||||||
|
if (video->isGifv && !_album) {
|
||||||
|
attributes.push_back(MTP_documentAttributeAnimated());
|
||||||
|
}
|
||||||
|
auto flags = MTPDdocumentAttributeVideo::Flags(
|
||||||
|
MTPDdocumentAttributeVideo::Flag::f_round_message);
|
||||||
|
if (video->supportsStreaming) {
|
||||||
|
flags |= MTPDdocumentAttributeVideo::Flag::f_supports_streaming;
|
||||||
|
}
|
||||||
|
const auto realSeconds = video->duration / 1000.;
|
||||||
|
attributes.push_back(MTP_documentAttributeVideo(
|
||||||
|
MTP_flags(flags),
|
||||||
|
MTP_double(realSeconds),
|
||||||
|
MTP_int(coverWidth),
|
||||||
|
MTP_int(coverHeight),
|
||||||
|
MTPint(), // preload_prefix_size
|
||||||
|
MTPdouble(), // video_start_ts
|
||||||
|
MTPstring())); // video_codec
|
||||||
|
|
||||||
|
if (args.generateGoodThumbnail) {
|
||||||
|
goodThumbnail = video->thumbnail;
|
||||||
|
{
|
||||||
|
QBuffer buffer(&goodThumbnailBytes);
|
||||||
|
goodThumbnail.save(&buffer, "JPG", kThumbnailQuality);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
thumbnail = PrepareFileThumbnail(std::move(video->thumbnail));
|
||||||
|
}
|
||||||
|
} else if (!isVoice) {
|
||||||
if (!_information) {
|
if (!_information) {
|
||||||
_information = readMediaInformation(filemime);
|
_information = readMediaInformation(filemime);
|
||||||
filemime = _information->filemime;
|
filemime = _information->filemime;
|
||||||
|
@ -869,7 +908,7 @@ void FileLoadTask::process(Args &&args) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!fullimage.isNull() && fullimage.width() > 0 && !isSong && !isVideo && !isVoice) {
|
if (!fullimage.isNull() && fullimage.width() > 0 && !isSong && !isVideo && !isVoice && !isRound) {
|
||||||
auto w = fullimage.width(), h = fullimage.height();
|
auto w = fullimage.width(), h = fullimage.height();
|
||||||
attributes.push_back(MTP_documentAttributeImageSize(MTP_int(w), MTP_int(h)));
|
attributes.push_back(MTP_documentAttributeImageSize(MTP_int(w), MTP_int(h)));
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ extern const char kOptionSendLargePhotos[];
|
||||||
enum class SendMediaType {
|
enum class SendMediaType {
|
||||||
Photo,
|
Photo,
|
||||||
Audio,
|
Audio,
|
||||||
|
Round,
|
||||||
File,
|
File,
|
||||||
ThemeFile,
|
ThemeFile,
|
||||||
Secure,
|
Secure,
|
||||||
|
@ -231,6 +232,7 @@ public:
|
||||||
const QByteArray &voice,
|
const QByteArray &voice,
|
||||||
crl::time duration,
|
crl::time duration,
|
||||||
const VoiceWaveform &waveform,
|
const VoiceWaveform &waveform,
|
||||||
|
bool video,
|
||||||
const FileLoadTo &to,
|
const FileLoadTo &to,
|
||||||
const TextWithTags &caption);
|
const TextWithTags &caption);
|
||||||
~FileLoadTask();
|
~FileLoadTask();
|
||||||
|
|
649
Telegram/SourceFiles/ui/controls/round_video_recorder.cpp
Normal file
649
Telegram/SourceFiles/ui/controls/round_video_recorder.cpp
Normal file
|
@ -0,0 +1,649 @@
|
||||||
|
/*
|
||||||
|
This file is part of Telegram Desktop,
|
||||||
|
the official desktop application for the Telegram messaging service.
|
||||||
|
|
||||||
|
For license and copyright information please follow this link:
|
||||||
|
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
|
*/
|
||||||
|
#include "ui/controls/round_video_recorder.h"
|
||||||
|
|
||||||
|
#include "base/debug_log.h"
|
||||||
|
#include "ffmpeg/ffmpeg_utility.h"
|
||||||
|
#include "media/audio/media_audio_capture.h"
|
||||||
|
#include "ui/painter.h"
|
||||||
|
#include "ui/rp_widget.h"
|
||||||
|
#include "webrtc/webrtc_video_track.h"
|
||||||
|
#include "styles/style_chat_helpers.h"
|
||||||
|
|
||||||
|
namespace Ui {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
constexpr auto kSide = 400;
|
||||||
|
constexpr auto kOutputFilename = "C:\\Tmp\\TestVideo\\output.mp4";
|
||||||
|
|
||||||
|
using namespace FFmpeg;
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
class RoundVideoRecorder::Private final {
|
||||||
|
public:
|
||||||
|
Private(crl::weak_on_queue<Private> weak);
|
||||||
|
~Private();
|
||||||
|
|
||||||
|
void push(int64 mcstimestamp, const QImage &frame);
|
||||||
|
void push(const Media::Capture::Chunk &chunk);
|
||||||
|
|
||||||
|
[[nodiscard]] RoundVideoResult finish();
|
||||||
|
|
||||||
|
private:
|
||||||
|
static int Write(void *opaque, uint8_t *buf, int buf_size);
|
||||||
|
static int64_t Seek(void *opaque, int64_t offset, int whence);
|
||||||
|
|
||||||
|
int write(uint8_t *buf, int buf_size);
|
||||||
|
int64_t seek(int64_t offset, int whence);
|
||||||
|
|
||||||
|
const crl::weak_on_queue<Private> _weak;
|
||||||
|
|
||||||
|
FormatPointer _format;
|
||||||
|
|
||||||
|
AVStream *_videoStream = nullptr;
|
||||||
|
CodecPointer _videoCodec;
|
||||||
|
FramePointer _videoFrame;
|
||||||
|
SwscalePointer _swsContext;
|
||||||
|
int64_t _videoPts = 0;
|
||||||
|
|
||||||
|
// This is the first recorded frame timestamp in microseconds.
|
||||||
|
int64_t _videoFirstTimestamp = -1;
|
||||||
|
|
||||||
|
// Audio-related members
|
||||||
|
AVStream *_audioStream = nullptr;
|
||||||
|
CodecPointer _audioCodec;
|
||||||
|
FramePointer _audioFrame;
|
||||||
|
SwresamplePointer _swrContext;
|
||||||
|
QByteArray _audioTail;
|
||||||
|
int64_t _audioPts = 0;
|
||||||
|
int _audioChannels = 0;
|
||||||
|
|
||||||
|
// Those timestamps are in 'ms' used for sync between audio and video.
|
||||||
|
crl::time _firstAudioChunkFinished = 0;
|
||||||
|
crl::time _firstVideoFrameTime = 0;
|
||||||
|
|
||||||
|
QByteArray _result;
|
||||||
|
int64_t _resultOffset = 0;
|
||||||
|
crl::time _resultDuration = 0;
|
||||||
|
|
||||||
|
void initEncoding();
|
||||||
|
bool initVideo();
|
||||||
|
bool initAudio();
|
||||||
|
void deinitEncoding();
|
||||||
|
void finishEncoding();
|
||||||
|
|
||||||
|
void encodeVideoFrame(int64 mcstimestamp, const QImage &frame);
|
||||||
|
void encodeAudioFrame(const Media::Capture::Chunk &chunk);
|
||||||
|
bool writeFrame(
|
||||||
|
const FramePointer &frame,
|
||||||
|
const CodecPointer &codec,
|
||||||
|
AVStream *stream);
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
RoundVideoRecorder::Private::Private(crl::weak_on_queue<Private> weak)
|
||||||
|
: _weak(std::move(weak)) {
|
||||||
|
initEncoding();
|
||||||
|
}
|
||||||
|
|
||||||
|
RoundVideoRecorder::Private::~Private() {
|
||||||
|
finishEncoding();
|
||||||
|
|
||||||
|
QFile file(kOutputFilename);
|
||||||
|
if (file.open(QIODevice::WriteOnly)) {
|
||||||
|
file.write(_result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int RoundVideoRecorder::Private::Write(void *opaque, uint8_t *buf, int buf_size) {
|
||||||
|
return static_cast<Private*>(opaque)->write(buf, buf_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t RoundVideoRecorder::Private::Seek(void *opaque, int64_t offset, int whence) {
|
||||||
|
return static_cast<Private*>(opaque)->seek(offset, whence);
|
||||||
|
}
|
||||||
|
|
||||||
|
int RoundVideoRecorder::Private::write(uint8_t *buf, int buf_size) {
|
||||||
|
if (const auto total = _resultOffset + int64(buf_size)) {
|
||||||
|
const auto size = int64(_result.size());
|
||||||
|
constexpr auto kReserve = 1024 * 1024;
|
||||||
|
_result.reserve((total / kReserve) * kReserve);
|
||||||
|
const auto overwrite = std::min(
|
||||||
|
size - _resultOffset,
|
||||||
|
int64(buf_size));
|
||||||
|
if (overwrite) {
|
||||||
|
memcpy(_result.data() + _resultOffset, buf, overwrite);
|
||||||
|
}
|
||||||
|
if (const auto append = buf_size - overwrite) {
|
||||||
|
_result.append(
|
||||||
|
reinterpret_cast<const char*>(buf) + overwrite,
|
||||||
|
append);
|
||||||
|
}
|
||||||
|
_resultOffset += buf_size;
|
||||||
|
}
|
||||||
|
return buf_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t RoundVideoRecorder::Private::seek(int64_t offset, int whence) {
|
||||||
|
const auto checkedSeek = [&](int64_t offset) {
|
||||||
|
if (offset < 0 || offset > int64(_result.size())) {
|
||||||
|
return int64(-1);
|
||||||
|
}
|
||||||
|
return (_resultOffset = offset);
|
||||||
|
};
|
||||||
|
switch (whence) {
|
||||||
|
case SEEK_SET: return checkedSeek(offset);
|
||||||
|
case SEEK_CUR: return checkedSeek(_resultOffset + offset);
|
||||||
|
case SEEK_END: return checkedSeek(int64(_result.size()) + offset);
|
||||||
|
case AVSEEK_SIZE: return int64(_result.size());
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RoundVideoRecorder::Private::initEncoding() {
|
||||||
|
_format = MakeWriteFormatPointer(
|
||||||
|
static_cast<void*>(this),
|
||||||
|
nullptr,
|
||||||
|
&Private::Write,
|
||||||
|
&Private::Seek,
|
||||||
|
"mp4"_q);
|
||||||
|
|
||||||
|
if (!initVideo() || !initAudio()) {
|
||||||
|
deinitEncoding();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto error = AvErrorWrap(avformat_write_header(
|
||||||
|
_format.get(),
|
||||||
|
nullptr));
|
||||||
|
if (error) {
|
||||||
|
LogError("avformat_write_header", error);
|
||||||
|
deinitEncoding();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool RoundVideoRecorder::Private::initVideo() {
|
||||||
|
if (!_format) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto videoCodec = avcodec_find_encoder_by_name("libopenh264");
|
||||||
|
if (!videoCodec) {
|
||||||
|
LogError("avcodec_find_encoder_by_name", "libopenh264");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_videoStream = avformat_new_stream(_format.get(), videoCodec);
|
||||||
|
if (!_videoStream) {
|
||||||
|
LogError("avformat_new_stream", "libopenh264");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_videoCodec = CodecPointer(avcodec_alloc_context3(videoCodec));
|
||||||
|
if (!_videoCodec) {
|
||||||
|
LogError("avcodec_alloc_context3", "libopenh264");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_videoCodec->codec_id = videoCodec->id;
|
||||||
|
_videoCodec->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||||
|
_videoCodec->width = kSide;
|
||||||
|
_videoCodec->height = kSide;
|
||||||
|
_videoCodec->time_base = AVRational{ 1, 1'000'000 }; // Microseconds.
|
||||||
|
_videoCodec->framerate = AVRational{ 0, 1 }; // Variable frame rate.
|
||||||
|
_videoCodec->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||||
|
_videoCodec->bit_rate = 5 * 1024 * 1024; // 5Mbps
|
||||||
|
|
||||||
|
auto error = AvErrorWrap(avcodec_open2(
|
||||||
|
_videoCodec.get(),
|
||||||
|
videoCodec,
|
||||||
|
nullptr));
|
||||||
|
if (error) {
|
||||||
|
LogError("avcodec_open2", error, "libopenh264");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
error = AvErrorWrap(avcodec_parameters_from_context(
|
||||||
|
_videoStream->codecpar,
|
||||||
|
_videoCodec.get()));
|
||||||
|
if (error) {
|
||||||
|
LogError("avcodec_parameters_from_context", error, "libopenh264");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_videoFrame = MakeFramePointer();
|
||||||
|
if (!_videoFrame) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_videoFrame->format = _videoCodec->pix_fmt;
|
||||||
|
_videoFrame->width = _videoCodec->width;
|
||||||
|
_videoFrame->height = _videoCodec->height;
|
||||||
|
|
||||||
|
error = AvErrorWrap(av_frame_get_buffer(_videoFrame.get(), 0));
|
||||||
|
if (error) {
|
||||||
|
LogError("av_frame_get_buffer", error, "libopenh264");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool RoundVideoRecorder::Private::initAudio() {
|
||||||
|
if (!_format) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto audioCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);
|
||||||
|
if (!audioCodec) {
|
||||||
|
LogError("avcodec_find_encoder", "AAC");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_audioStream = avformat_new_stream(_format.get(), audioCodec);
|
||||||
|
if (!_audioStream) {
|
||||||
|
LogError("avformat_new_stream", "AAC");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_audioCodec = CodecPointer(avcodec_alloc_context3(audioCodec));
|
||||||
|
if (!_audioCodec) {
|
||||||
|
LogError("avcodec_alloc_context3", "AAC");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_audioChannels = 1;
|
||||||
|
_audioCodec->sample_fmt = AV_SAMPLE_FMT_FLTP;
|
||||||
|
_audioCodec->bit_rate = 32000;
|
||||||
|
_audioCodec->sample_rate = 48000;
|
||||||
|
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
_audioCodec->ch_layout = AV_CHANNEL_LAYOUT_MONO;
|
||||||
|
_audioCodec->channels = _audioCodec->ch_layout.nb_channels;
|
||||||
|
#else
|
||||||
|
_audioCodec->channel_layout = AV_CH_LAYOUT_MONO;
|
||||||
|
_audioCodec->channels = _audioChannels;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
auto error = AvErrorWrap(avcodec_open2(
|
||||||
|
_audioCodec.get(),
|
||||||
|
audioCodec,
|
||||||
|
nullptr));
|
||||||
|
if (error) {
|
||||||
|
LogError("avcodec_open2", error, "AAC");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
error = AvErrorWrap(avcodec_parameters_from_context(
|
||||||
|
_audioStream->codecpar,
|
||||||
|
_audioCodec.get()));
|
||||||
|
if (error) {
|
||||||
|
LogError("avcodec_parameters_from_context", error, "AAC");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
_swrContext = MakeSwresamplePointer(
|
||||||
|
&_audioCodec->ch_layout,
|
||||||
|
AV_SAMPLE_FMT_S16,
|
||||||
|
_audioCodec->sample_rate,
|
||||||
|
&_audioCodec->ch_layout,
|
||||||
|
_audioCodec->sample_fmt,
|
||||||
|
_audioCodec->sample_rate,
|
||||||
|
&_swrContext);
|
||||||
|
#else // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
_swrContext = MakeSwresamplePointer(
|
||||||
|
&_audioCodec->ch_layout,
|
||||||
|
AV_SAMPLE_FMT_S16,
|
||||||
|
_audioCodec->sample_rate,
|
||||||
|
&_audioCodec->ch_layout,
|
||||||
|
_audioCodec->sample_fmt,
|
||||||
|
_audioCodec->sample_rate,
|
||||||
|
&_swrContext);
|
||||||
|
#endif // DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
if (!_swrContext) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_audioFrame = MakeFramePointer();
|
||||||
|
if (!_audioFrame) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
_audioFrame->nb_samples = _audioCodec->frame_size;
|
||||||
|
_audioFrame->format = _audioCodec->sample_fmt;
|
||||||
|
_audioFrame->sample_rate = _audioCodec->sample_rate;
|
||||||
|
#if DA_FFMPEG_NEW_CHANNEL_LAYOUT
|
||||||
|
av_channel_layout_copy(&_audioFrame->ch_layout, &_audioCodec->ch_layout);
|
||||||
|
#else
|
||||||
|
_audioFrame->channel_layout = _audioCodec->channel_layout;
|
||||||
|
_audioFrame->channels = _audioCodec->channels;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
error = AvErrorWrap(av_frame_get_buffer(_audioFrame.get(), 0));
|
||||||
|
if (error) {
|
||||||
|
LogError("av_frame_get_buffer", error, "AAC");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RoundVideoRecorder::Private::finishEncoding() {
|
||||||
|
if (_format
|
||||||
|
&& writeFrame(nullptr, _videoCodec, _videoStream)
|
||||||
|
&& writeFrame(nullptr, _audioCodec, _audioStream)) {
|
||||||
|
av_write_trailer(_format.get());
|
||||||
|
}
|
||||||
|
deinitEncoding();
|
||||||
|
}
|
||||||
|
|
||||||
|
RoundVideoResult RoundVideoRecorder::Private::finish() {
|
||||||
|
if (!_format) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
finishEncoding();
|
||||||
|
return {
|
||||||
|
.content = _result,
|
||||||
|
.waveform = QByteArray(),
|
||||||
|
.duration = _resultDuration,
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
void RoundVideoRecorder::Private::deinitEncoding() {
|
||||||
|
_swsContext = nullptr;
|
||||||
|
_videoCodec = nullptr;
|
||||||
|
_videoStream = nullptr;
|
||||||
|
_videoFrame = nullptr;
|
||||||
|
_swrContext = nullptr;
|
||||||
|
_audioCodec = nullptr;
|
||||||
|
_audioStream = nullptr;
|
||||||
|
_audioFrame = nullptr;
|
||||||
|
_format = nullptr;
|
||||||
|
|
||||||
|
_videoFirstTimestamp = -1;
|
||||||
|
_videoPts = 0;
|
||||||
|
_audioPts = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RoundVideoRecorder::Private::push(
|
||||||
|
int64 mcstimestamp,
|
||||||
|
const QImage &frame) {
|
||||||
|
if (!_format) {
|
||||||
|
return;
|
||||||
|
} else if (!_firstAudioChunkFinished) {
|
||||||
|
// Skip frames while we didn't start receiving audio.
|
||||||
|
return;
|
||||||
|
} else if (!_firstVideoFrameTime) {
|
||||||
|
_firstVideoFrameTime = crl::now();
|
||||||
|
}
|
||||||
|
encodeVideoFrame(mcstimestamp, frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RoundVideoRecorder::Private::push(const Media::Capture::Chunk &chunk) {
|
||||||
|
if (!_format) {
|
||||||
|
return;
|
||||||
|
} else if (!_firstAudioChunkFinished || !_firstVideoFrameTime) {
|
||||||
|
_firstAudioChunkFinished = chunk.finished;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// We get a chunk roughly every 50ms and need to encode it interleaved.
|
||||||
|
encodeAudioFrame(chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
void RoundVideoRecorder::Private::encodeVideoFrame(
|
||||||
|
int64 mcstimestamp,
|
||||||
|
const QImage &frame) {
|
||||||
|
_swsContext = MakeSwscalePointer(
|
||||||
|
QSize(kSide, kSide),
|
||||||
|
AV_PIX_FMT_BGRA,
|
||||||
|
QSize(kSide, kSide),
|
||||||
|
AV_PIX_FMT_YUV420P,
|
||||||
|
&_swsContext);
|
||||||
|
if (!_swsContext) {
|
||||||
|
deinitEncoding();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_videoFirstTimestamp == -1) {
|
||||||
|
_videoFirstTimestamp = mcstimestamp;
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto fwidth = frame.width();
|
||||||
|
const auto fheight = frame.height();
|
||||||
|
const auto fmin = std::min(fwidth, fheight);
|
||||||
|
const auto fx = (fwidth > fheight) ? (fwidth - fheight) / 2 : 0;
|
||||||
|
const auto fy = (fwidth < fheight) ? (fheight - fwidth) / 2 : 0;
|
||||||
|
const auto crop = QRect(fx, fy, fmin, fmin);
|
||||||
|
const auto cropped = frame.copy(crop).scaled(
|
||||||
|
kSide,
|
||||||
|
kSide,
|
||||||
|
Qt::KeepAspectRatio,
|
||||||
|
Qt::SmoothTransformation);
|
||||||
|
|
||||||
|
// Convert QImage to RGB32 format
|
||||||
|
// QImage rgbImage = cropped.convertToFormat(QImage::Format_ARGB32);
|
||||||
|
|
||||||
|
// Prepare source data
|
||||||
|
const uint8_t *srcSlice[1] = { cropped.constBits() };
|
||||||
|
int srcStride[1] = { cropped.bytesPerLine() };
|
||||||
|
|
||||||
|
// Perform the color space conversion
|
||||||
|
sws_scale(
|
||||||
|
_swsContext.get(),
|
||||||
|
srcSlice,
|
||||||
|
srcStride,
|
||||||
|
0,
|
||||||
|
kSide,
|
||||||
|
_videoFrame->data,
|
||||||
|
_videoFrame->linesize);
|
||||||
|
|
||||||
|
_videoFrame->pts = mcstimestamp - _videoFirstTimestamp;
|
||||||
|
|
||||||
|
LOG(("Audio At: %1").arg(_videoFrame->pts / 1'000'000.));
|
||||||
|
if (!writeFrame(_videoFrame, _videoCodec, _videoStream)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void RoundVideoRecorder::Private::encodeAudioFrame(const Media::Capture::Chunk &chunk) {
|
||||||
|
if (_audioTail.isEmpty()) {
|
||||||
|
_audioTail = chunk.samples;
|
||||||
|
} else {
|
||||||
|
_audioTail.append(chunk.samples);
|
||||||
|
}
|
||||||
|
|
||||||
|
const int inSamples = _audioTail.size() / sizeof(int16_t);
|
||||||
|
const uint8_t *inData = reinterpret_cast<const uint8_t*>(_audioTail.constData());
|
||||||
|
int samplesProcessed = 0;
|
||||||
|
|
||||||
|
while (samplesProcessed + _audioCodec->frame_size <= inSamples) {
|
||||||
|
int remainingSamples = inSamples - samplesProcessed;
|
||||||
|
int outSamples = av_rescale_rnd(
|
||||||
|
swr_get_delay(_swrContext.get(), 48000) + remainingSamples,
|
||||||
|
_audioCodec->sample_rate,
|
||||||
|
48000,
|
||||||
|
AV_ROUND_UP);
|
||||||
|
|
||||||
|
// Ensure we don't exceed the frame's capacity
|
||||||
|
outSamples = std::min(outSamples, _audioCodec->frame_size);
|
||||||
|
|
||||||
|
const auto process = std::min(remainingSamples, outSamples);
|
||||||
|
auto dataptr = inData + samplesProcessed * sizeof(int16_t);
|
||||||
|
auto error = AvErrorWrap(swr_convert(
|
||||||
|
_swrContext.get(),
|
||||||
|
_audioFrame->data,
|
||||||
|
outSamples,
|
||||||
|
&dataptr,
|
||||||
|
process));
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
LogError("swr_convert", error);
|
||||||
|
deinitEncoding();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the actual number of samples in the frame
|
||||||
|
_audioFrame->nb_samples = error.code();
|
||||||
|
|
||||||
|
_audioFrame->pts = _audioPts;
|
||||||
|
_audioPts += _audioFrame->nb_samples;
|
||||||
|
|
||||||
|
LOG(("Audio At: %1").arg(_audioFrame->pts / 48'000.));
|
||||||
|
if (!writeFrame(_audioFrame, _audioCodec, _audioStream)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
samplesProcessed += process;
|
||||||
|
}
|
||||||
|
const auto left = inSamples - samplesProcessed;
|
||||||
|
if (left > 0) {
|
||||||
|
memmove(_audioTail.data(), _audioTail.data() + samplesProcessed * sizeof(int16_t), left * sizeof(int16_t));
|
||||||
|
_audioTail.resize(left * sizeof(int16_t));
|
||||||
|
} else {
|
||||||
|
_audioTail.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool RoundVideoRecorder::Private::writeFrame(
|
||||||
|
const FramePointer &frame,
|
||||||
|
const CodecPointer &codec,
|
||||||
|
AVStream *stream) {
|
||||||
|
auto error = AvErrorWrap(avcodec_send_frame(codec.get(), frame.get()));
|
||||||
|
if (error) {
|
||||||
|
LogError("avcodec_send_frame", error);
|
||||||
|
deinitEncoding();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto pkt = av_packet_alloc();
|
||||||
|
const auto guard = gsl::finally([&] {
|
||||||
|
av_packet_free(&pkt);
|
||||||
|
});
|
||||||
|
while (true) {
|
||||||
|
error = AvErrorWrap(avcodec_receive_packet(codec.get(), pkt));
|
||||||
|
if (error.code() == AVERROR(EAGAIN)) {
|
||||||
|
return true; // Need more input
|
||||||
|
} else if (error.code() == AVERROR_EOF) {
|
||||||
|
return true; // Encoding finished
|
||||||
|
} else if (error) {
|
||||||
|
LogError("avcodec_receive_packet", error);
|
||||||
|
deinitEncoding();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
pkt->stream_index = stream->index;
|
||||||
|
av_packet_rescale_ts(pkt, codec->time_base, stream->time_base);
|
||||||
|
|
||||||
|
accumulate_max(
|
||||||
|
_resultDuration,
|
||||||
|
PtsToTimeCeil(pkt->pts, stream->time_base));
|
||||||
|
|
||||||
|
error = AvErrorWrap(av_interleaved_write_frame(_format.get(), pkt));
|
||||||
|
if (error) {
|
||||||
|
LogError("av_interleaved_write_frame", error);
|
||||||
|
deinitEncoding();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
RoundVideoRecorder::RoundVideoRecorder(
|
||||||
|
RoundVideoRecorderDescriptor &&descriptor)
|
||||||
|
: _descriptor(std::move(descriptor))
|
||||||
|
, _preview(std::make_unique<RpWidget>(_descriptor.container))
|
||||||
|
, _private() {
|
||||||
|
setup();
|
||||||
|
}
|
||||||
|
|
||||||
|
RoundVideoRecorder::~RoundVideoRecorder() = default;
|
||||||
|
|
||||||
|
Fn<void(Media::Capture::Chunk)> RoundVideoRecorder::audioChunkProcessor() {
|
||||||
|
return [weak = _private.weak()](Media::Capture::Chunk chunk) {
|
||||||
|
weak.with([copy = std::move(chunk)](Private &that) {
|
||||||
|
that.push(copy);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
void RoundVideoRecorder::hide(Fn<void(RoundVideoResult)> done) {
|
||||||
|
if (done) {
|
||||||
|
_private.with([done = std::move(done)](Private &that) {
|
||||||
|
done(that.finish());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
setPaused(true);
|
||||||
|
|
||||||
|
_preview->hide();
|
||||||
|
if (const auto onstack = _descriptor.hidden) {
|
||||||
|
onstack(this);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void RoundVideoRecorder::setup() {
|
||||||
|
const auto raw = _preview.get();
|
||||||
|
|
||||||
|
const auto side = style::ConvertScale(kSide * 3 / 4);
|
||||||
|
_descriptor.container->sizeValue(
|
||||||
|
) | rpl::start_with_next([=](QSize outer) {
|
||||||
|
raw->setGeometry(
|
||||||
|
style::centerrect(
|
||||||
|
QRect(QPoint(), outer),
|
||||||
|
QRect(0, 0, side, side)));
|
||||||
|
}, raw->lifetime());
|
||||||
|
|
||||||
|
raw->paintRequest() | rpl::start_with_next([=] {
|
||||||
|
auto p = QPainter(raw);
|
||||||
|
auto hq = PainterHighQualityEnabler(p);
|
||||||
|
|
||||||
|
auto info = _descriptor.track->frameWithInfo(true);
|
||||||
|
if (!info.original.isNull()) {
|
||||||
|
const auto owidth = info.original.width();
|
||||||
|
const auto oheight = info.original.height();
|
||||||
|
const auto omin = std::min(owidth, oheight);
|
||||||
|
const auto ox = (owidth > oheight) ? (owidth - oheight) / 2 : 0;
|
||||||
|
const auto oy = (owidth < oheight) ? (oheight - owidth) / 2 : 0;
|
||||||
|
const auto from = QRect(ox, oy, omin, omin);
|
||||||
|
p.drawImage(QRect(0, 0, side, side), info.original, from);
|
||||||
|
} else {
|
||||||
|
p.setPen(Qt::NoPen);
|
||||||
|
p.setBrush(QColor(0, 0, 0));
|
||||||
|
p.drawEllipse(0, 0, side, side);
|
||||||
|
}
|
||||||
|
_descriptor.track->markFrameShown();
|
||||||
|
}, raw->lifetime());
|
||||||
|
|
||||||
|
_descriptor.track->renderNextFrame() | rpl::start_with_next([=] {
|
||||||
|
const auto info = _descriptor.track->frameWithInfo(true);
|
||||||
|
if (!info.original.isNull() && _lastAddedIndex != info.index) {
|
||||||
|
_lastAddedIndex = info.index;
|
||||||
|
const auto ts = info.mcstimestamp;
|
||||||
|
_private.with([copy = info.original, ts](Private &that) {
|
||||||
|
that.push(ts, copy);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
raw->update();
|
||||||
|
}, raw->lifetime());
|
||||||
|
|
||||||
|
raw->show();
|
||||||
|
raw->raise();
|
||||||
|
}
|
||||||
|
|
||||||
|
void RoundVideoRecorder::setPaused(bool paused) {
|
||||||
|
if (_paused == paused) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
_paused = paused;
|
||||||
|
_preview->update();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace Ui
|
65
Telegram/SourceFiles/ui/controls/round_video_recorder.h
Normal file
65
Telegram/SourceFiles/ui/controls/round_video_recorder.h
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
/*
|
||||||
|
This file is part of Telegram Desktop,
|
||||||
|
the official desktop application for the Telegram messaging service.
|
||||||
|
|
||||||
|
For license and copyright information please follow this link:
|
||||||
|
https://github.com/telegramdesktop/tdesktop/blob/master/LEGAL
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <crl/crl_object_on_queue.h>
|
||||||
|
|
||||||
|
namespace Media::Capture {
|
||||||
|
struct Chunk;
|
||||||
|
} // namespace Media::Capture
|
||||||
|
|
||||||
|
namespace tgcalls {
|
||||||
|
class VideoCaptureInterface;
|
||||||
|
} // namespace tgcalls
|
||||||
|
|
||||||
|
namespace Webrtc {
|
||||||
|
class VideoTrack;
|
||||||
|
} // namespace Webrtc
|
||||||
|
|
||||||
|
namespace Ui {
|
||||||
|
|
||||||
|
class RpWidget;
|
||||||
|
class RoundVideoRecorder;
|
||||||
|
|
||||||
|
struct RoundVideoRecorderDescriptor {
|
||||||
|
not_null<RpWidget*> container;
|
||||||
|
Fn<void(not_null<RoundVideoRecorder*>)> hidden;
|
||||||
|
std::shared_ptr<tgcalls::VideoCaptureInterface> capturer;
|
||||||
|
std::shared_ptr<Webrtc::VideoTrack> track;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct RoundVideoResult {
|
||||||
|
QByteArray content;
|
||||||
|
QByteArray waveform;
|
||||||
|
crl::time duration = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
class RoundVideoRecorder final {
|
||||||
|
public:
|
||||||
|
explicit RoundVideoRecorder(RoundVideoRecorderDescriptor &&descriptor);
|
||||||
|
~RoundVideoRecorder();
|
||||||
|
|
||||||
|
[[nodiscard]] Fn<void(Media::Capture::Chunk)> audioChunkProcessor();
|
||||||
|
|
||||||
|
void setPaused(bool paused);
|
||||||
|
void hide(Fn<void(RoundVideoResult)> done = nullptr);
|
||||||
|
|
||||||
|
private:
|
||||||
|
class Private;
|
||||||
|
|
||||||
|
void setup();
|
||||||
|
|
||||||
|
const RoundVideoRecorderDescriptor _descriptor;
|
||||||
|
std::unique_ptr<RpWidget> _preview;
|
||||||
|
crl::object_on_queue<Private> _private;
|
||||||
|
int _lastAddedIndex = 0;
|
||||||
|
bool _paused = false;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Ui
|
|
@ -366,6 +366,8 @@ PRIVATE
|
||||||
ui/controls/invite_link_label.h
|
ui/controls/invite_link_label.h
|
||||||
ui/controls/peer_list_dummy.cpp
|
ui/controls/peer_list_dummy.cpp
|
||||||
ui/controls/peer_list_dummy.h
|
ui/controls/peer_list_dummy.h
|
||||||
|
ui/controls/round_video_recorder.cpp
|
||||||
|
ui/controls/round_video_recorder.h
|
||||||
ui/controls/send_as_button.cpp
|
ui/controls/send_as_button.cpp
|
||||||
ui/controls/send_as_button.h
|
ui/controls/send_as_button.h
|
||||||
ui/controls/send_button.cpp
|
ui/controls/send_button.cpp
|
||||||
|
@ -500,4 +502,6 @@ PRIVATE
|
||||||
desktop-app::lib_spellcheck
|
desktop-app::lib_spellcheck
|
||||||
desktop-app::lib_stripe
|
desktop-app::lib_stripe
|
||||||
desktop-app::external_kcoreaddons
|
desktop-app::external_kcoreaddons
|
||||||
|
desktop-app::external_openh264
|
||||||
|
desktop-app::external_webrtc
|
||||||
)
|
)
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 8751e27d50d2f26b5d20673e5ddba38e90953570
|
Subproject commit fc726486ebd261283583b5cd5f6a97a18b2ab6ca
|
Loading…
Add table
Reference in a new issue