Add stream layer

The decoder initially read from the socket, decoded the video and sent
the decoded frames to the screen:

              +---------+      +----------+
  socket ---> | decoder | ---> |  screen  |
              +---------+      +----------+

The design was simple, but the decoder had several responsabilities.

Then we added the recording feature, so we added a recorder, which
reused the packets received from the socket managed by the decoder:

                                    +----------+
                               ---> |  screen  |
              +---------+     /     +----------+
  socket ---> | decoder | ----
              +---------+     \     +----------+
                               ---> | recorder |
                                    +----------+

This lack of separation of concerns now have concrete implications: we
could not (properly) disable the decoder/display to only record the
video.

Therefore, split the decoder to extract the stream:

                                    +----------+      +----------+
                               ---> | decoder  | ---> |  screen  |
              +---------+     /     +----------+      +----------+
  socket ---> | stream  | ----
              +---------+     \     +----------+
                               ---> | recorder |
                                    +----------+

This will allow to record the stream without decoding the video.
This commit is contained in:
Barry 2019-06-18 14:40:25 +08:00
commit 60507d6cc6
8 changed files with 538 additions and 440 deletions

View file

@ -1,19 +1,11 @@
#include <QDebug> #include <QDebug>
#include <QTime>
#include "compat.h" #include "compat.h"
#include "decoder.h"
#include "videobuffer.h" #include "videobuffer.h"
#include "devicesocket.h" #include "decoder.h"
#include "recorder.h"
#define BUFSIZE 0x10000
#define HEADER_SIZE 12
#define NO_PTS UINT64_C(-1)
typedef qint32 (*ReadPacketFunc)(void*, quint8*, qint32);
Decoder::Decoder() Decoder::Decoder()
: QObject(Q_NULLPTR)
{ {
} }
@ -23,319 +15,54 @@ Decoder::~Decoder()
} }
static void avLogCallback(void *avcl, int level, const char *fmt, va_list vl) {
Q_UNUSED(avcl);
Q_UNUSED(vl);
QString localFmt = QString::fromUtf8(fmt);
localFmt.prepend("[FFmpeg] ");
switch (level) {
case AV_LOG_PANIC:
case AV_LOG_FATAL:
qFatal(localFmt.toUtf8());
break;
case AV_LOG_ERROR:
qCritical(localFmt.toUtf8());
break;
case AV_LOG_WARNING:
qWarning(localFmt.toUtf8());
break;
case AV_LOG_INFO:
qInfo(localFmt.toUtf8());
break;
case AV_LOG_DEBUG:
//qDebug(localFmt.toUtf8());
break;
}
// do not forward others, which are too verbose
return;
}
bool Decoder::init()
{
#ifdef QTSCRCPY_LAVF_REQUIRES_REGISTER_ALL
av_register_all();
#endif
if (avformat_network_init()) {
return false;
}
av_log_set_callback(avLogCallback);
return true;
}
void Decoder::deInit()
{
avformat_network_deinit(); // ignore failure
}
void Decoder::setVideoBuffer(VideoBuffer* vb) void Decoder::setVideoBuffer(VideoBuffer* vb)
{ {
m_vb = vb; m_vb = vb;
} }
static quint32 bufferRead32be(quint8* buf) { bool Decoder::open(const AVCodec *codec)
return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
}
static quint64 bufferRead64be(quint8* buf) {
quint32 msb = bufferRead32be(buf);
quint32 lsb = bufferRead32be(&buf[4]);
return ((quint64) msb << 32) | lsb;
}
static Decoder::FrameMeta* frameMetaNew(quint64 pts) {
Decoder::FrameMeta* meta = new Decoder::FrameMeta;
if (!meta) {
return meta;
}
meta->pts = pts;
meta->next = Q_NULLPTR;
return meta;
}
static void frameMetaDelete(Decoder::FrameMeta* frameMeta) {
if (frameMeta) {
delete frameMeta;
}
}
static bool receiverStatePushMeta(Decoder::ReceiverState* state, quint64 pts) {
Decoder::FrameMeta* frameMeta = frameMetaNew(pts);
if (!frameMeta) {
return false;
}
// append to the list
// (iterate to find the last item, in practice the list should be tiny)
Decoder::FrameMeta **p = &state->frameMetaQueue;
while (*p) {
p = &(*p)->next;
}
*p = frameMeta;
return true;
}
static quint64 receiverStateTakeMeta(Decoder::ReceiverState* state) {
Decoder::FrameMeta *frameMeta = state->frameMetaQueue; // first item
Q_ASSERT(frameMeta); // must not be empty
quint64 pts = frameMeta->pts;
state->frameMetaQueue = frameMeta->next; // remove the item
frameMetaDelete(frameMeta);
return pts;
}
static qint32 readPacketWithMeta(void *opaque, uint8_t *buf, int bufSize) {
Decoder* decoder = (Decoder*)opaque;
Decoder::ReceiverState* state = decoder->getReceiverState();
// The video stream contains raw packets, without time information. When we
// record, we retrieve the timestamps separately, from a "meta" header
// added by the server before each raw packet.
//
// The "meta" header length is 12 bytes:
// [. . . . . . . .|. . . .]. . . . . . . . . . . . . . . ...
// <-------------> <-----> <-----------------------------...
// PTS packet raw packet
// size
//
// It is followed by <packet_size> bytes containing the packet/frame.
if (!state->remaining) {
quint8 header[HEADER_SIZE];
qint32 r = decoder->recvData(header, HEADER_SIZE);
if (r == -1) {
return AVERROR(errno);
}
if (r == 0) {
return AVERROR_EOF;
}
// no partial read (net_recv_all())
if (r != HEADER_SIZE) {
return AVERROR(ENOMEM);
}
uint64_t pts = bufferRead64be(header);
state->remaining = bufferRead32be(&header[8]);
if (pts != NO_PTS && !receiverStatePushMeta(state, pts)) {
qCritical("Could not store PTS for recording");
// we cannot save the PTS, the recording would be broken
return AVERROR(ENOMEM);
}
}
Q_ASSERT(state->remaining);
if (bufSize > state->remaining) {
bufSize = state->remaining;
}
qint32 r = decoder->recvData(buf, bufSize);
if (r == -1) {
return AVERROR(errno);
}
if (r == 0) {
return AVERROR_EOF;
}
Q_ASSERT(state->remaining >= r);
state->remaining -= r;
return r;
}
static qint32 readRawPacket(void *opaque, quint8 *buf, qint32 bufSize) {
Decoder *decoder = (Decoder*)opaque;
if (decoder) {
qint32 len = decoder->recvData(buf, bufSize);
if (len == -1) {
return AVERROR(errno);
}
if (len == 0) {
return AVERROR_EOF;
}
return len;
}
return AVERROR_EOF;
}
void Decoder::setDeviceSocket(DeviceSocket* deviceSocket)
{ {
m_deviceSocket = deviceSocket;
}
void Decoder::setRecoder(Recorder *recorder)
{
m_recorder = recorder;
}
qint32 Decoder::recvData(quint8* buf, qint32 bufSize)
{
if (!buf) {
return 0;
}
if (m_deviceSocket) {
qint32 len = m_deviceSocket->subThreadRecvData(buf, bufSize);
return len;
}
return 0;
}
bool Decoder::startDecode()
{
if (!m_deviceSocket) {
return false;
}
m_quit = false;
start();
return true;
}
void Decoder::stopDecode()
{
m_quit = true;
if (m_vb) {
m_vb->interrupt();
}
wait();
}
Decoder::ReceiverState *Decoder::getReceiverState()
{
return &m_receiverState;
}
void Decoder::run()
{
unsigned char *decoderBuffer = Q_NULLPTR;
AVIOContext *avioCtx = Q_NULLPTR;
AVFormatContext *formatCtx = Q_NULLPTR;
AVCodec *codec = Q_NULLPTR;
AVCodecContext *codecCtx = Q_NULLPTR;
ReadPacketFunc readPacket = Q_NULLPTR;
bool isFormatCtxOpen = false;
bool isCodecCtxOpen = false;
// decoder buffer
decoderBuffer = (unsigned char*)av_malloc(BUFSIZE);
if (!decoderBuffer) {
qCritical("Could not allocate buffer");
goto runQuit;
}
// initialize the receiver state
m_receiverState.frameMetaQueue = Q_NULLPTR;
m_receiverState.remaining = 0;
// if recording is enabled, a "header" is sent between raw packets
readPacket = m_recorder ? readPacketWithMeta: readRawPacket;
// io context
avioCtx = avio_alloc_context(decoderBuffer, BUFSIZE, 0, this, readPacket, NULL, NULL);
if (!avioCtx) {
qCritical("Could not allocate avio context");
// avformat_open_input takes ownership of 'decoderBuffer'
// so only free the buffer before avformat_open_input()
av_free(decoderBuffer);
goto runQuit;
}
// format context
formatCtx = avformat_alloc_context();
if (!formatCtx) {
qCritical("Could not allocate format context");
goto runQuit;
}
formatCtx->pb = avioCtx;
if (avformat_open_input(&formatCtx, NULL, NULL, NULL) < 0) {
qCritical("Could not open video stream");
goto runQuit;
}
isFormatCtxOpen = true;
// codec
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!codec) {
qCritical("H.264 decoder not found");
goto runQuit;
}
// codec context // codec context
codecCtx = avcodec_alloc_context3(codec); m_codecCtx = avcodec_alloc_context3(codec);
if (!codecCtx) { if (!m_codecCtx) {
qCritical("Could not allocate decoder context"); qCritical("Could not allocate decoder context");
goto runQuit; return false;
} }
if (avcodec_open2(codecCtx, codec, NULL) < 0) { if (avcodec_open2(m_codecCtx, codec, NULL) < 0) {
qCritical("Could not open H.264 codec"); qCritical("Could not open H.264 codec");
goto runQuit; return false;
} }
isCodecCtxOpen = true; m_isCodecCtxOpen = true;
return true;
}
if (m_recorder && !m_recorder->open(codec)) { void Decoder::close()
qCritical("Could not open recorder"); {
goto runQuit; if (!m_codecCtx) {
return;
} }
if (m_isCodecCtxOpen) {
avcodec_close(m_codecCtx);
}
avcodec_free_context(&m_codecCtx);
}
AVPacket packet; bool Decoder::push(const AVPacket* packet)
av_init_packet(&packet); {
packet.data = Q_NULLPTR; if (!m_codecCtx || !m_vb) {
packet.size = 0; return false;
}
while (!m_quit && !av_read_frame(formatCtx, &packet)) {
AVFrame* decodingFrame = m_vb->decodingFrame(); AVFrame* decodingFrame = m_vb->decodingFrame();
// the new decoding/encoding API has been introduced by:
// <http://git.videolan.org/?p=ffmpeg.git;a=commitdiff;h=7fc329e2dd6226dfecaa4a1d7adf353bf2773726>
#ifdef QTSCRCPY_LAVF_HAS_NEW_ENCODING_DECODING_API #ifdef QTSCRCPY_LAVF_HAS_NEW_ENCODING_DECODING_API
int ret; int ret = -1;
if ((ret = avcodec_send_packet(codecCtx, &packet)) < 0) { if ((ret = avcodec_send_packet(m_codecCtx, packet)) < 0) {
char errorbuf[255] = { 0 }; char errorbuf[255] = { 0 };
av_strerror(ret, errorbuf, 254); av_strerror(ret, errorbuf, 254);
qCritical("Could not send video packet: %s", errorbuf); qCritical("Could not send video packet: %s", errorbuf);
goto runQuit; return false;
} }
if (decodingFrame) { if (decodingFrame) {
ret = avcodec_receive_frame(codecCtx, decodingFrame); ret = avcodec_receive_frame(m_codecCtx, decodingFrame);
} }
if (!ret) { if (!ret) {
// a frame was received // a frame was received
@ -364,73 +91,37 @@ void Decoder::run()
*/ */
} else if (ret != AVERROR(EAGAIN)) { } else if (ret != AVERROR(EAGAIN)) {
qCritical("Could not receive video frame: %d", ret); qCritical("Could not receive video frame: %d", ret);
av_packet_unref(&packet); return false;
goto runQuit;
} }
#else #else
int gotPicture = 0; int gotPicture = 0;
int len = -1; int len = -1;
if (decodingFrame) { if (decodingFrame) {
len = avcodec_decode_video2(codecCtx, decodingFrame, &gotPicture, &packet); len = avcodec_decode_video2(m_codecCtx, decodingFrame, &gotPicture, packet);
} }
if (len < 0) { if (len < 0) {
qCritical("Could not decode video packet: %d", len); qCritical("Could not decode video packet: %d", len);
av_packet_unref(&packet); return false;
goto runQuit;
} }
if (gotPicture) { if (gotPicture) {
pushFrame(); pushFrame();
} }
#endif #endif
if (m_recorder) { return true;
// we retrieve the PTS in order they were received, so they will }
// be assigned to the correct frame
quint64 pts = receiverStateTakeMeta(&m_receiverState);
packet.pts = pts;
packet.dts = pts;
// no need to rescale with av_packet_rescale_ts(), the timestamps
// are in microseconds both in input and output
if (!m_recorder->write(&packet)) {
qCritical("Could not write frame to output file");
av_packet_unref(&packet);
goto runQuit;
}
}
av_packet_unref(&packet); void Decoder::interrupt()
{
if (avioCtx->eof_reached) { if (m_vb) {
break; m_vb->interrupt();
} }
}
qDebug() << "End of frames";
runQuit:
if (m_recorder) {
m_recorder->close();
}
if (avioCtx) {
av_free(avioCtx->buffer);
av_freep(&avioCtx);
}
if (formatCtx && isFormatCtxOpen) {
avformat_close_input(&formatCtx);
}
if (formatCtx) {
avformat_free_context(formatCtx);
}
if (codecCtx && isCodecCtxOpen) {
avcodec_close(codecCtx);
}
if (codecCtx) {
avcodec_free_context(&codecCtx);
}
emit onDecodeStop();
} }
void Decoder::pushFrame() void Decoder::pushFrame()
{ {
if (!m_vb) {
return;
}
bool previousFrameConsumed = m_vb->offerDecodedFrame(); bool previousFrameConsumed = m_vb->offerDecodedFrame();
if (!previousFrameConsumed) { if (!previousFrameConsumed) {
// the previous newFrame will consume this frame // the previous newFrame will consume this frame

View file

@ -1,65 +1,36 @@
#ifndef DECODER_H #ifndef DECODER_H
#define DECODER_H #define DECODER_H
#include <QObject>
#include <QThread>
#include <QPointer>
#include <QMutex>
extern "C" extern "C"
{ {
#include "libavcodec/avcodec.h" #include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
} }
class VideoBuffer; class VideoBuffer;
class DeviceSocket; class Decoder : public QObject
class Recorder;
class Decoder : public QThread
{ {
Q_OBJECT Q_OBJECT
public: public:
typedef struct FrameMeta {
quint64 pts;
struct FrameMeta* next;
} FrameMeta;
typedef struct ReceiverState {
// meta (in order) for frames not consumed yet
FrameMeta* frameMetaQueue;
qint32 remaining; // remaining bytes to receive for the current frame
} ReceiverState;
Decoder(); Decoder();
virtual ~Decoder(); virtual ~Decoder();
public:
static bool init();
static void deInit();
void setVideoBuffer(VideoBuffer* vb); void setVideoBuffer(VideoBuffer* vb);
void setDeviceSocket(DeviceSocket* deviceSocket); bool open(const AVCodec *codec);
void setRecoder(Recorder* recorder); void close();
qint32 recvData(quint8* buf, qint32 bufSize); bool push(const AVPacket *packet);
bool startDecode(); void interrupt();
void stopDecode();
ReceiverState* getReceiverState();
signals: signals:
void onNewFrame(); void onNewFrame();
void onDecodeStop();
protected: protected:
void run();
void pushFrame(); void pushFrame();
private: private:
QPointer<DeviceSocket> m_deviceSocket; VideoBuffer* m_vb = Q_NULLPTR;
bool m_quit = false; AVCodecContext* m_codecCtx = Q_NULLPTR;
VideoBuffer* m_vb; bool m_isCodecCtxOpen = false;
// for recorder
Recorder* m_recorder = Q_NULLPTR;
ReceiverState m_receiverState;
}; };
#endif // DECODER_H #endif // DECODER_H

View file

@ -2,11 +2,13 @@ HEADERS += \
$$PWD/decoder.h \ $$PWD/decoder.h \
$$PWD/fpscounter.h \ $$PWD/fpscounter.h \
$$PWD/avframeconvert.h \ $$PWD/avframeconvert.h \
$$PWD/videobuffer.h $$PWD/videobuffer.h \
$$PWD/stream.h
SOURCES += \ SOURCES += \
$$PWD/decoder.cpp \ $$PWD/decoder.cpp \
$$PWD/fpscounter.cpp \ $$PWD/fpscounter.cpp \
$$PWD/avframeconvert.cpp \ $$PWD/avframeconvert.cpp \
$$PWD/videobuffer.cpp $$PWD/videobuffer.cpp \
$$PWD/stream.cpp

367
QtScrcpy/decoder/stream.cpp Normal file
View file

@ -0,0 +1,367 @@
#include <QDebug>
#include <QTime>
#include "compat.h"
#include "stream.h"
#include "decoder.h"
#include "devicesocket.h"
#include "recorder.h"
#define BUFSIZE 0x10000
#define HEADER_SIZE 12
#define NO_PTS UINT64_C(-1)
typedef qint32 (*ReadPacketFunc)(void*, quint8*, qint32);
Stream::Stream()
{
}
Stream::~Stream()
{
}
static void avLogCallback(void *avcl, int level, const char *fmt, va_list vl) {
Q_UNUSED(avcl);
Q_UNUSED(vl);
QString localFmt = QString::fromUtf8(fmt);
localFmt.prepend("[FFmpeg] ");
switch (level) {
case AV_LOG_PANIC:
case AV_LOG_FATAL:
qFatal(localFmt.toUtf8());
break;
case AV_LOG_ERROR:
qCritical(localFmt.toUtf8());
break;
case AV_LOG_WARNING:
qWarning(localFmt.toUtf8());
break;
case AV_LOG_INFO:
qInfo(localFmt.toUtf8());
break;
case AV_LOG_DEBUG:
//qDebug(localFmt.toUtf8());
break;
}
// do not forward others, which are too verbose
return;
}
bool Stream::init()
{
#ifdef QTSCRCPY_LAVF_REQUIRES_REGISTER_ALL
av_register_all();
#endif
if (avformat_network_init()) {
return false;
}
av_log_set_callback(avLogCallback);
return true;
}
void Stream::deInit()
{
avformat_network_deinit(); // ignore failure
}
void Stream::setDecoder(Decoder* decoder)
{
m_decoder = decoder;
}
static quint32 bufferRead32be(quint8* buf) {
return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
}
static quint64 bufferRead64be(quint8* buf) {
quint32 msb = bufferRead32be(buf);
quint32 lsb = bufferRead32be(&buf[4]);
return ((quint64) msb << 32) | lsb;
}
static Stream::FrameMeta* frameMetaNew(quint64 pts) {
Stream::FrameMeta* meta = new Stream::FrameMeta;
if (!meta) {
return meta;
}
meta->pts = pts;
meta->next = Q_NULLPTR;
return meta;
}
static void frameMetaDelete(Stream::FrameMeta* frameMeta) {
if (frameMeta) {
delete frameMeta;
}
}
static bool receiverStatePushMeta(Stream::ReceiverState* state, quint64 pts) {
Stream::FrameMeta* frameMeta = frameMetaNew(pts);
if (!frameMeta) {
return false;
}
// append to the list
// (iterate to find the last item, in practice the list should be tiny)
Stream::FrameMeta **p = &state->frameMetaQueue;
while (*p) {
p = &(*p)->next;
}
*p = frameMeta;
return true;
}
static quint64 receiverStateTakeMeta(Stream::ReceiverState* state) {
Stream::FrameMeta *frameMeta = state->frameMetaQueue; // first item
Q_ASSERT(frameMeta); // must not be empty
quint64 pts = frameMeta->pts;
state->frameMetaQueue = frameMeta->next; // remove the item
frameMetaDelete(frameMeta);
return pts;
}
static qint32 readPacketWithMeta(void *opaque, uint8_t *buf, int bufSize) {
Stream* stream = (Stream*)opaque;
Stream::ReceiverState* state = stream->getReceiverState();
// The video stream contains raw packets, without time information. When we
// record, we retrieve the timestamps separately, from a "meta" header
// added by the server before each raw packet.
//
// The "meta" header length is 12 bytes:
// [. . . . . . . .|. . . .]. . . . . . . . . . . . . . . ...
// <-------------> <-----> <-----------------------------...
// PTS packet raw packet
// size
//
// It is followed by <packet_size> bytes containing the packet/frame.
if (!state->remaining) {
quint8 header[HEADER_SIZE];
qint32 r = stream->recvData(header, HEADER_SIZE);
if (r == -1) {
return AVERROR(errno);
}
if (r == 0) {
return AVERROR_EOF;
}
// no partial read (net_recv_all())
if (r != HEADER_SIZE) {
return AVERROR(ENOMEM);
}
uint64_t pts = bufferRead64be(header);
state->remaining = bufferRead32be(&header[8]);
if (pts != NO_PTS && !receiverStatePushMeta(state, pts)) {
qCritical("Could not store PTS for recording");
// we cannot save the PTS, the recording would be broken
return AVERROR(ENOMEM);
}
}
Q_ASSERT(state->remaining);
if (bufSize > state->remaining) {
bufSize = state->remaining;
}
qint32 r = stream->recvData(buf, bufSize);
if (r == -1) {
return AVERROR(errno);
}
if (r == 0) {
return AVERROR_EOF;
}
Q_ASSERT(state->remaining >= r);
state->remaining -= r;
return r;
}
static qint32 readRawPacket(void *opaque, quint8 *buf, qint32 bufSize) {
Stream *stream = (Stream*)opaque;
if (stream) {
qint32 len = stream->recvData(buf, bufSize);
if (len == -1) {
return AVERROR(errno);
}
if (len == 0) {
return AVERROR_EOF;
}
return len;
}
return AVERROR_EOF;
}
void Stream::setDeviceSocket(DeviceSocket* deviceSocket)
{
m_deviceSocket = deviceSocket;
}
void Stream::setRecoder(Recorder *recorder)
{
m_recorder = recorder;
}
qint32 Stream::recvData(quint8* buf, qint32 bufSize)
{
if (!buf) {
return 0;
}
if (m_deviceSocket) {
qint32 len = m_deviceSocket->subThreadRecvData(buf, bufSize);
return len;
}
return 0;
}
bool Stream::startDecode()
{
if (!m_deviceSocket) {
return false;
}
m_quit.store(0);
start();
return true;
}
void Stream::stopDecode()
{
m_quit.store(1);
if (m_decoder) {
m_decoder->interrupt();
}
wait();
}
Stream::ReceiverState *Stream::getReceiverState()
{
return &m_receiverState;
}
void Stream::run()
{
unsigned char *decoderBuffer = Q_NULLPTR;
AVIOContext *avioCtx = Q_NULLPTR;
AVFormatContext *formatCtx = Q_NULLPTR;
AVCodec *codec = Q_NULLPTR;
AVCodecContext *codecCtx = Q_NULLPTR;
ReadPacketFunc readPacket = Q_NULLPTR;
bool isFormatCtxOpen = false;
// decoder buffer
decoderBuffer = (unsigned char*)av_malloc(BUFSIZE);
if (!decoderBuffer) {
qCritical("Could not allocate buffer");
goto runQuit;
}
// initialize the receiver state
m_receiverState.frameMetaQueue = Q_NULLPTR;
m_receiverState.remaining = 0;
// if recording is enabled, a "header" is sent between raw packets
readPacket = m_recorder ? readPacketWithMeta: readRawPacket;
// io context
avioCtx = avio_alloc_context(decoderBuffer, BUFSIZE, 0, this, readPacket, NULL, NULL);
if (!avioCtx) {
qCritical("Could not allocate avio context");
// avformat_open_input takes ownership of 'decoderBuffer'
// so only free the buffer before avformat_open_input()
av_free(decoderBuffer);
goto runQuit;
}
// format context
formatCtx = avformat_alloc_context();
if (!formatCtx) {
qCritical("Could not allocate format context");
goto runQuit;
}
formatCtx->pb = avioCtx;
if (avformat_open_input(&formatCtx, NULL, NULL, NULL) < 0) {
qCritical("Could not open video stream");
goto runQuit;
}
isFormatCtxOpen = true;
// codec
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!codec) {
qCritical("H.264 decoder not found");
goto runQuit;
}
if (m_decoder && !m_decoder->open(codec)) {
qCritical("Could not open m_decoder");
goto runQuit;
}
if (m_recorder && !m_recorder->open(codec)) {
qCritical("Could not open recorder");
goto runQuit;
}
AVPacket packet;
av_init_packet(&packet);
packet.data = Q_NULLPTR;
packet.size = 0;
while (!m_quit.load() && !av_read_frame(formatCtx, &packet)) {
if (m_decoder && !m_decoder->push(&packet)) {
av_packet_unref(&packet);
goto runQuit;
}
if (m_recorder) {
// we retrieve the PTS in order they were received, so they will
// be assigned to the correct frame
quint64 pts = receiverStateTakeMeta(&m_receiverState);
packet.pts = pts;
packet.dts = pts;
// no need to rescale with av_packet_rescale_ts(), the timestamps
// are in microseconds both in input and output
if (!m_recorder->write(&packet)) {
qCritical("Could not write frame to output file");
av_packet_unref(&packet);
goto runQuit;
}
}
av_packet_unref(&packet);
if (avioCtx->eof_reached) {
break;
}
}
qDebug() << "End of frames";
runQuit:
if (m_recorder) {
m_recorder->close();
}
if (avioCtx) {
av_free(avioCtx->buffer);
av_freep(&avioCtx);
}
if (formatCtx && isFormatCtxOpen) {
avformat_close_input(&formatCtx);
}
if (formatCtx) {
avformat_free_context(formatCtx);
}
if (m_decoder) {
m_decoder->close();
}
if (codecCtx) {
avcodec_free_context(&codecCtx);
}
emit onDecodeStop();
}

64
QtScrcpy/decoder/stream.h Normal file
View file

@ -0,0 +1,64 @@
#ifndef STREAM_H
#define STREAM_H
#include <QThread>
#include <QPointer>
#include <QMutex>
#include <QAtomicInteger>
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
}
class DeviceSocket;
class Recorder;
class Decoder;
class Stream : public QThread
{
Q_OBJECT
public:
typedef struct FrameMeta {
quint64 pts;
struct FrameMeta* next;
} FrameMeta;
typedef struct ReceiverState {
// meta (in order) for frames not consumed yet
FrameMeta* frameMetaQueue;
qint32 remaining; // remaining bytes to receive for the current frame
} ReceiverState;
Stream();
virtual ~Stream();
public:
static bool init();
static void deInit();
void setDecoder(Decoder* vb);
void setDeviceSocket(DeviceSocket* deviceSocket);
void setRecoder(Recorder* recorder);
qint32 recvData(quint8* buf, qint32 bufSize);
bool startDecode();
void stopDecode();
ReceiverState* getReceiverState();
signals:
void onDecodeStop();
protected:
void run();
private:
QPointer<DeviceSocket> m_deviceSocket;
QAtomicInteger<qint8> m_quit;
// for recorder
Recorder* m_recorder = Q_NULLPTR;
ReceiverState m_receiverState;
Decoder* m_decoder = Q_NULLPTR;
};
#endif // STREAM_H

View file

@ -5,7 +5,7 @@
#include <QTranslator> #include <QTranslator>
#include "dialog.h" #include "dialog.h"
#include "decoder.h" #include "stream.h"
#include "mousetap/mousetap.h" #include "mousetap/mousetap.h"
Dialog* g_mainDlg = Q_NULLPTR; Dialog* g_mainDlg = Q_NULLPTR;
@ -21,7 +21,7 @@ int main(int argc, char *argv[])
//QApplication::setAttribute(Qt::AA_UseSoftwareOpenGL); //QApplication::setAttribute(Qt::AA_UseSoftwareOpenGL);
g_oldMessageHandler = qInstallMessageHandler(myMessageOutput); g_oldMessageHandler = qInstallMessageHandler(myMessageOutput);
Decoder::init(); Stream::init();
QApplication a(argc, argv); QApplication a(argc, argv);
installTranslator(); installTranslator();
@ -54,7 +54,7 @@ int main(int argc, char *argv[])
int ret = a.exec(); int ret = a.exec();
MouseTap::getInstance()->quitMouseEventTap(); MouseTap::getInstance()->quitMouseEventTap();
Decoder::deInit(); Stream::deInit();
return ret; return ret;
} }

View file

@ -33,9 +33,10 @@ VideoForm::VideoForm(const QString& serial, quint16 maxSize, quint32 bitRate, co
m_server = new Server(); m_server = new Server();
m_vb.init(); m_vb.init();
m_decoder.setVideoBuffer(&m_vb); m_decoder.setVideoBuffer(&m_vb);
m_stream.setDecoder(&m_decoder);
if (!fileName.trimmed().isEmpty()) { if (!fileName.trimmed().isEmpty()) {
m_recorder = new Recorder(fileName.trimmed()); m_recorder = new Recorder(fileName.trimmed());
m_decoder.setRecoder(m_recorder); m_stream.setRecoder(m_recorder);
} }
initSignals(); initSignals();
@ -63,7 +64,7 @@ VideoForm::~VideoForm()
{ {
m_server->stop(); m_server->stop();
// server must stop before decoder, because decoder block main thread // server must stop before decoder, because decoder block main thread
m_decoder.stopDecode(); m_stream.stopDecode();
delete m_server; delete m_server;
if (m_recorder) { if (m_recorder) {
delete m_recorder; delete m_recorder;
@ -167,8 +168,8 @@ void VideoForm::initSignals()
} }
// init decoder // init decoder
m_decoder.setDeviceSocket(m_server->getDeviceSocket()); m_stream.setDeviceSocket(m_server->getDeviceSocket());
m_decoder.startDecode(); m_stream.startDecode();
// init controller // init controller
m_inputConvert.setDeviceSocket(m_server->getDeviceSocket()); m_inputConvert.setDeviceSocket(m_server->getDeviceSocket());
@ -180,9 +181,9 @@ void VideoForm::initSignals()
qDebug() << "server process stop"; qDebug() << "server process stop";
}); });
connect(&m_decoder, &Decoder::onDecodeStop, this, [this](){ connect(&m_stream, &Stream::onStreamStop, this, [this](){
close(); close();
qDebug() << "decoder thread stop"; qDebug() << "stream thread stop";
}); });
// must be Qt::QueuedConnection, ui update must be main thread // must be Qt::QueuedConnection, ui update must be main thread

View file

@ -6,8 +6,9 @@
#include <QTime> #include <QTime>
#include "server.h" #include "server.h"
#include "decoder.h" #include "stream.h"
#include "videobuffer.h" #include "videobuffer.h"
#include "decoder.h"
#include "inputconvertnormal.h" #include "inputconvertnormal.h"
#include "inputconvertgame.h" #include "inputconvertgame.h"
#include "filehandler.h" #include "filehandler.h"
@ -70,7 +71,7 @@ private:
Ui::videoForm *ui; Ui::videoForm *ui;
QSize frameSize; QSize frameSize;
Server* m_server = Q_NULLPTR; Server* m_server = Q_NULLPTR;
Decoder m_decoder; Stream m_stream;
VideoBuffer m_vb; VideoBuffer m_vb;
//InputConvertNormal m_inputConvert; //InputConvertNormal m_inputConvert;
InputConvertGame m_inputConvert; InputConvertGame m_inputConvert;
@ -82,6 +83,7 @@ private:
float m_widthHeightRatio = 0.5f; float m_widthHeightRatio = 0.5f;
QPointer<ToolForm> m_toolForm; QPointer<ToolForm> m_toolForm;
Recorder* m_recorder = Q_NULLPTR; Recorder* m_recorder = Q_NULLPTR;
Decoder m_decoder;
QTime m_startTimeCount; QTime m_startTimeCount;
QPointer<QWidget> m_loadingWidget; QPointer<QWidget> m_loadingWidget;
}; };