diff --git a/QtScrcpy/decoder/decoder.cpp b/QtScrcpy/decoder/decoder.cpp index f698466..5486519 100644 --- a/QtScrcpy/decoder/decoder.cpp +++ b/QtScrcpy/decoder/decoder.cpp @@ -1,19 +1,11 @@ #include -#include #include "compat.h" -#include "decoder.h" #include "videobuffer.h" -#include "devicesocket.h" -#include "recorder.h" - -#define BUFSIZE 0x10000 -#define HEADER_SIZE 12 -#define NO_PTS UINT64_C(-1) - -typedef qint32 (*ReadPacketFunc)(void*, quint8*, qint32); +#include "decoder.h" Decoder::Decoder() + : QObject(Q_NULLPTR) { } @@ -23,414 +15,113 @@ Decoder::~Decoder() } -static void avLogCallback(void *avcl, int level, const char *fmt, va_list vl) { - Q_UNUSED(avcl); - Q_UNUSED(vl); - - QString localFmt = QString::fromUtf8(fmt); - localFmt.prepend("[FFmpeg] "); - switch (level) { - case AV_LOG_PANIC: - case AV_LOG_FATAL: - qFatal(localFmt.toUtf8()); - break; - case AV_LOG_ERROR: - qCritical(localFmt.toUtf8()); - break; - case AV_LOG_WARNING: - qWarning(localFmt.toUtf8()); - break; - case AV_LOG_INFO: - qInfo(localFmt.toUtf8()); - break; - case AV_LOG_DEBUG: - //qDebug(localFmt.toUtf8()); - break; - } - - // do not forward others, which are too verbose - return; -} - -bool Decoder::init() -{ -#ifdef QTSCRCPY_LAVF_REQUIRES_REGISTER_ALL - av_register_all(); -#endif - if (avformat_network_init()) { - return false; - } - av_log_set_callback(avLogCallback); - return true; -} - -void Decoder::deInit() -{ - avformat_network_deinit(); // ignore failure -} - void Decoder::setVideoBuffer(VideoBuffer* vb) { m_vb = vb; } -static quint32 bufferRead32be(quint8* buf) { - return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; -} - -static quint64 bufferRead64be(quint8* buf) { - quint32 msb = bufferRead32be(buf); - quint32 lsb = bufferRead32be(&buf[4]); - return ((quint64) msb << 32) | lsb; -} - -static Decoder::FrameMeta* frameMetaNew(quint64 pts) { - Decoder::FrameMeta* meta = new Decoder::FrameMeta; - if (!meta) { - return meta; - } - meta->pts = pts; - meta->next = Q_NULLPTR; - return meta; -} - -static void frameMetaDelete(Decoder::FrameMeta* frameMeta) { - if (frameMeta) { - delete frameMeta; - } -} - -static bool receiverStatePushMeta(Decoder::ReceiverState* state, quint64 pts) { - Decoder::FrameMeta* frameMeta = frameMetaNew(pts); - if (!frameMeta) { +bool Decoder::open(const AVCodec *codec) +{ + // codec context + m_codecCtx = avcodec_alloc_context3(codec); + if (!m_codecCtx) { + qCritical("Could not allocate decoder context"); return false; } - - // append to the list - // (iterate to find the last item, in practice the list should be tiny) - Decoder::FrameMeta **p = &state->frameMetaQueue; - while (*p) { - p = &(*p)->next; + if (avcodec_open2(m_codecCtx, codec, NULL) < 0) { + qCritical("Could not open H.264 codec"); + return false; } - *p = frameMeta; + m_isCodecCtxOpen = true; return true; } -static quint64 receiverStateTakeMeta(Decoder::ReceiverState* state) { - Decoder::FrameMeta *frameMeta = state->frameMetaQueue; // first item - Q_ASSERT(frameMeta); // must not be empty - quint64 pts = frameMeta->pts; - state->frameMetaQueue = frameMeta->next; // remove the item - frameMetaDelete(frameMeta); - return pts; -} - -static qint32 readPacketWithMeta(void *opaque, uint8_t *buf, int bufSize) { - Decoder* decoder = (Decoder*)opaque; - Decoder::ReceiverState* state = decoder->getReceiverState(); - - // The video stream contains raw packets, without time information. When we - // record, we retrieve the timestamps separately, from a "meta" header - // added by the server before each raw packet. - // - // The "meta" header length is 12 bytes: - // [. . . . . . . .|. . . .]. . . . . . . . . . . . . . . ... - // <-------------> <-----> <-----------------------------... - // PTS packet raw packet - // size - // - // It is followed by bytes containing the packet/frame. - - if (!state->remaining) { - quint8 header[HEADER_SIZE]; - qint32 r = decoder->recvData(header, HEADER_SIZE); - if (r == -1) { - return AVERROR(errno); - } - if (r == 0) { - return AVERROR_EOF; - } - // no partial read (net_recv_all()) - if (r != HEADER_SIZE) { - return AVERROR(ENOMEM); - } - - uint64_t pts = bufferRead64be(header); - state->remaining = bufferRead32be(&header[8]); - - if (pts != NO_PTS && !receiverStatePushMeta(state, pts)) { - qCritical("Could not store PTS for recording"); - // we cannot save the PTS, the recording would be broken - return AVERROR(ENOMEM); - } - } - - Q_ASSERT(state->remaining); - - if (bufSize > state->remaining) { - bufSize = state->remaining; - } - - qint32 r = decoder->recvData(buf, bufSize); - if (r == -1) { - return AVERROR(errno); - } - if (r == 0) { - return AVERROR_EOF; - } - - Q_ASSERT(state->remaining >= r); - state->remaining -= r; - return r; -} - -static qint32 readRawPacket(void *opaque, quint8 *buf, qint32 bufSize) { - Decoder *decoder = (Decoder*)opaque; - if (decoder) { - qint32 len = decoder->recvData(buf, bufSize); - if (len == -1) { - return AVERROR(errno); - } - if (len == 0) { - return AVERROR_EOF; - } - return len; - } - return AVERROR_EOF; -} - -void Decoder::setDeviceSocket(DeviceSocket* deviceSocket) +void Decoder::close() { - m_deviceSocket = deviceSocket; -} - -void Decoder::setRecoder(Recorder *recorder) -{ - m_recorder = recorder; -} - -qint32 Decoder::recvData(quint8* buf, qint32 bufSize) -{ - if (!buf) { - return 0; + if (!m_codecCtx) { + return; } - if (m_deviceSocket) { - qint32 len = m_deviceSocket->subThreadRecvData(buf, bufSize); - return len; + if (m_isCodecCtxOpen) { + avcodec_close(m_codecCtx); } - return 0; + avcodec_free_context(&m_codecCtx); } -bool Decoder::startDecode() +bool Decoder::push(const AVPacket* packet) { - if (!m_deviceSocket) { + if (!m_codecCtx || !m_vb) { return false; } - m_quit = false; - start(); + AVFrame* decodingFrame = m_vb->decodingFrame(); +#ifdef QTSCRCPY_LAVF_HAS_NEW_ENCODING_DECODING_API + int ret = -1; + if ((ret = avcodec_send_packet(m_codecCtx, packet)) < 0) { + char errorbuf[255] = { 0 }; + av_strerror(ret, errorbuf, 254); + qCritical("Could not send video packet: %s", errorbuf); + return false; + } + if (decodingFrame) { + ret = avcodec_receive_frame(m_codecCtx, decodingFrame); + } + if (!ret) { + // a frame was received + pushFrame(); + + //emit getOneFrame(yuvDecoderFrame->data[0], yuvDecoderFrame->data[1], yuvDecoderFrame->data[2], + // yuvDecoderFrame->linesize[0], yuvDecoderFrame->linesize[1], yuvDecoderFrame->linesize[2]); + + /* + // m_conver转换yuv为rgb是使用cpu转的,占用cpu太高,改用opengl渲染yuv + // QImage的copy也非常占用内存,此方案不考虑 + if (!m_conver.isInit()) { + qDebug() << "decoder frame format" << decodingFrame->format; + m_conver.setSrcFrameInfo(codecCtx->width, codecCtx->height, AV_PIX_FMT_YUV420P); + m_conver.setDstFrameInfo(codecCtx->width, codecCtx->height, AV_PIX_FMT_RGB32); + m_conver.init(); + } + if (!outBuffer) { + outBuffer=new quint8[avpicture_get_size(AV_PIX_FMT_RGB32, codecCtx->width, codecCtx->height)]; + avpicture_fill((AVPicture *)rgbDecoderFrame, outBuffer, AV_PIX_FMT_RGB32, codecCtx->width, codecCtx->height); + } + m_conver.convert(decodingFrame, rgbDecoderFrame); + //QImage tmpImg((uchar *)outBuffer, codecCtx->width, codecCtx->height, QImage::Format_RGB32); + //QImage image = tmpImg.copy(); + //emit getOneImage(image); + */ + } else if (ret != AVERROR(EAGAIN)) { + qCritical("Could not receive video frame: %d", ret); + return false; + } +#else + int gotPicture = 0; + int len = -1; + if (decodingFrame) { + len = avcodec_decode_video2(m_codecCtx, decodingFrame, &gotPicture, packet); + } + if (len < 0) { + qCritical("Could not decode video packet: %d", len); + return false; + } + if (gotPicture) { + pushFrame(); + } +#endif return true; } -void Decoder::stopDecode() +void Decoder::interrupt() { - m_quit = true; if (m_vb) { m_vb->interrupt(); } - wait(); -} - -Decoder::ReceiverState *Decoder::getReceiverState() -{ - return &m_receiverState; -} - -void Decoder::run() -{ - unsigned char *decoderBuffer = Q_NULLPTR; - AVIOContext *avioCtx = Q_NULLPTR; - AVFormatContext *formatCtx = Q_NULLPTR; - AVCodec *codec = Q_NULLPTR; - AVCodecContext *codecCtx = Q_NULLPTR; - ReadPacketFunc readPacket = Q_NULLPTR; - bool isFormatCtxOpen = false; - bool isCodecCtxOpen = false; - - // decoder buffer - decoderBuffer = (unsigned char*)av_malloc(BUFSIZE); - if (!decoderBuffer) { - qCritical("Could not allocate buffer"); - goto runQuit; - } - - // initialize the receiver state - m_receiverState.frameMetaQueue = Q_NULLPTR; - m_receiverState.remaining = 0; - - // if recording is enabled, a "header" is sent between raw packets - readPacket = m_recorder ? readPacketWithMeta: readRawPacket; - - // io context - avioCtx = avio_alloc_context(decoderBuffer, BUFSIZE, 0, this, readPacket, NULL, NULL); - if (!avioCtx) { - qCritical("Could not allocate avio context"); - // avformat_open_input takes ownership of 'decoderBuffer' - // so only free the buffer before avformat_open_input() - av_free(decoderBuffer); - goto runQuit; - } - - // format context - formatCtx = avformat_alloc_context(); - if (!formatCtx) { - qCritical("Could not allocate format context"); - goto runQuit; - } - formatCtx->pb = avioCtx; - if (avformat_open_input(&formatCtx, NULL, NULL, NULL) < 0) { - qCritical("Could not open video stream"); - goto runQuit; - } - isFormatCtxOpen = true; - - // codec - codec = avcodec_find_decoder(AV_CODEC_ID_H264); - if (!codec) { - qCritical("H.264 decoder not found"); - goto runQuit; - } - - // codec context - codecCtx = avcodec_alloc_context3(codec); - if (!codecCtx) { - qCritical("Could not allocate decoder context"); - goto runQuit; - } - if (avcodec_open2(codecCtx, codec, NULL) < 0) { - qCritical("Could not open H.264 codec"); - goto runQuit; - } - isCodecCtxOpen = true; - - if (m_recorder && !m_recorder->open(codec)) { - qCritical("Could not open recorder"); - goto runQuit; - } - - AVPacket packet; - av_init_packet(&packet); - packet.data = Q_NULLPTR; - packet.size = 0; - - while (!m_quit && !av_read_frame(formatCtx, &packet)) { - AVFrame* decodingFrame = m_vb->decodingFrame(); - // the new decoding/encoding API has been introduced by: - // -#ifdef QTSCRCPY_LAVF_HAS_NEW_ENCODING_DECODING_API - int ret; - if ((ret = avcodec_send_packet(codecCtx, &packet)) < 0) { - char errorbuf[255] = { 0 }; - av_strerror(ret, errorbuf, 254); - qCritical("Could not send video packet: %s", errorbuf); - goto runQuit; - } - if (decodingFrame) { - ret = avcodec_receive_frame(codecCtx, decodingFrame); - } - if (!ret) { - // a frame was received - pushFrame(); - - //emit getOneFrame(yuvDecoderFrame->data[0], yuvDecoderFrame->data[1], yuvDecoderFrame->data[2], - // yuvDecoderFrame->linesize[0], yuvDecoderFrame->linesize[1], yuvDecoderFrame->linesize[2]); - - /* - // m_conver转换yuv为rgb是使用cpu转的,占用cpu太高,改用opengl渲染yuv - // QImage的copy也非常占用内存,此方案不考虑 - if (!m_conver.isInit()) { - qDebug() << "decoder frame format" << decodingFrame->format; - m_conver.setSrcFrameInfo(codecCtx->width, codecCtx->height, AV_PIX_FMT_YUV420P); - m_conver.setDstFrameInfo(codecCtx->width, codecCtx->height, AV_PIX_FMT_RGB32); - m_conver.init(); - } - if (!outBuffer) { - outBuffer=new quint8[avpicture_get_size(AV_PIX_FMT_RGB32, codecCtx->width, codecCtx->height)]; - avpicture_fill((AVPicture *)rgbDecoderFrame, outBuffer, AV_PIX_FMT_RGB32, codecCtx->width, codecCtx->height); - } - m_conver.convert(decodingFrame, rgbDecoderFrame); - //QImage tmpImg((uchar *)outBuffer, codecCtx->width, codecCtx->height, QImage::Format_RGB32); - //QImage image = tmpImg.copy(); - //emit getOneImage(image); - */ - } else if (ret != AVERROR(EAGAIN)) { - qCritical("Could not receive video frame: %d", ret); - av_packet_unref(&packet); - goto runQuit; - } -#else - int gotPicture = 0; - int len = -1; - if (decodingFrame) { - len = avcodec_decode_video2(codecCtx, decodingFrame, &gotPicture, &packet); - } - if (len < 0) { - qCritical("Could not decode video packet: %d", len); - av_packet_unref(&packet); - goto runQuit; - } - if (gotPicture) { - pushFrame(); - } -#endif - if (m_recorder) { - // we retrieve the PTS in order they were received, so they will - // be assigned to the correct frame - quint64 pts = receiverStateTakeMeta(&m_receiverState); - packet.pts = pts; - packet.dts = pts; - // no need to rescale with av_packet_rescale_ts(), the timestamps - // are in microseconds both in input and output - if (!m_recorder->write(&packet)) { - qCritical("Could not write frame to output file"); - av_packet_unref(&packet); - goto runQuit; - } - } - - av_packet_unref(&packet); - - if (avioCtx->eof_reached) { - break; - } - } - qDebug() << "End of frames"; - -runQuit: - if (m_recorder) { - m_recorder->close(); - } - if (avioCtx) { - av_free(avioCtx->buffer); - av_freep(&avioCtx); - } - if (formatCtx && isFormatCtxOpen) { - avformat_close_input(&formatCtx); - } - if (formatCtx) { - avformat_free_context(formatCtx); - } - if (codecCtx && isCodecCtxOpen) { - avcodec_close(codecCtx); - } - if (codecCtx) { - avcodec_free_context(&codecCtx); - } - - emit onDecodeStop(); } void Decoder::pushFrame() { + if (!m_vb) { + return; + } bool previousFrameConsumed = m_vb->offerDecodedFrame(); if (!previousFrameConsumed) { // the previous newFrame will consume this frame diff --git a/QtScrcpy/decoder/decoder.h b/QtScrcpy/decoder/decoder.h index dbe3a35..c72a491 100644 --- a/QtScrcpy/decoder/decoder.h +++ b/QtScrcpy/decoder/decoder.h @@ -1,65 +1,36 @@ #ifndef DECODER_H #define DECODER_H - -#include -#include -#include +#include extern "C" { #include "libavcodec/avcodec.h" -#include "libavformat/avformat.h" } class VideoBuffer; -class DeviceSocket; -class Recorder; -class Decoder : public QThread +class Decoder : public QObject { Q_OBJECT public: - typedef struct FrameMeta { - quint64 pts; - struct FrameMeta* next; - } FrameMeta; - - typedef struct ReceiverState { - // meta (in order) for frames not consumed yet - FrameMeta* frameMetaQueue; - qint32 remaining; // remaining bytes to receive for the current frame - } ReceiverState; - Decoder(); virtual ~Decoder(); -public: - static bool init(); - static void deInit(); - void setVideoBuffer(VideoBuffer* vb); - void setDeviceSocket(DeviceSocket* deviceSocket); - void setRecoder(Recorder* recorder); - qint32 recvData(quint8* buf, qint32 bufSize); - bool startDecode(); - void stopDecode(); - ReceiverState* getReceiverState(); + bool open(const AVCodec *codec); + void close(); + bool push(const AVPacket *packet); + void interrupt(); signals: void onNewFrame(); - void onDecodeStop(); protected: - void run(); void pushFrame(); -private: - QPointer m_deviceSocket; - bool m_quit = false; - VideoBuffer* m_vb; - - // for recorder - Recorder* m_recorder = Q_NULLPTR; - ReceiverState m_receiverState; +private: + VideoBuffer* m_vb = Q_NULLPTR; + AVCodecContext* m_codecCtx = Q_NULLPTR; + bool m_isCodecCtxOpen = false; }; #endif // DECODER_H diff --git a/QtScrcpy/decoder/decoder.pri b/QtScrcpy/decoder/decoder.pri index 5e124fe..214643f 100644 --- a/QtScrcpy/decoder/decoder.pri +++ b/QtScrcpy/decoder/decoder.pri @@ -2,11 +2,13 @@ HEADERS += \ $$PWD/decoder.h \ $$PWD/fpscounter.h \ $$PWD/avframeconvert.h \ - $$PWD/videobuffer.h + $$PWD/videobuffer.h \ + $$PWD/stream.h SOURCES += \ $$PWD/decoder.cpp \ $$PWD/fpscounter.cpp \ $$PWD/avframeconvert.cpp \ - $$PWD/videobuffer.cpp + $$PWD/videobuffer.cpp \ + $$PWD/stream.cpp diff --git a/QtScrcpy/decoder/stream.cpp b/QtScrcpy/decoder/stream.cpp new file mode 100644 index 0000000..f114071 --- /dev/null +++ b/QtScrcpy/decoder/stream.cpp @@ -0,0 +1,367 @@ +#include +#include + +#include "compat.h" +#include "stream.h" +#include "decoder.h" +#include "devicesocket.h" +#include "recorder.h" + +#define BUFSIZE 0x10000 +#define HEADER_SIZE 12 +#define NO_PTS UINT64_C(-1) + +typedef qint32 (*ReadPacketFunc)(void*, quint8*, qint32); + +Stream::Stream() +{ + +} + +Stream::~Stream() +{ + +} + +static void avLogCallback(void *avcl, int level, const char *fmt, va_list vl) { + Q_UNUSED(avcl); + Q_UNUSED(vl); + + QString localFmt = QString::fromUtf8(fmt); + localFmt.prepend("[FFmpeg] "); + switch (level) { + case AV_LOG_PANIC: + case AV_LOG_FATAL: + qFatal(localFmt.toUtf8()); + break; + case AV_LOG_ERROR: + qCritical(localFmt.toUtf8()); + break; + case AV_LOG_WARNING: + qWarning(localFmt.toUtf8()); + break; + case AV_LOG_INFO: + qInfo(localFmt.toUtf8()); + break; + case AV_LOG_DEBUG: + //qDebug(localFmt.toUtf8()); + break; + } + + // do not forward others, which are too verbose + return; +} + +bool Stream::init() +{ +#ifdef QTSCRCPY_LAVF_REQUIRES_REGISTER_ALL + av_register_all(); +#endif + if (avformat_network_init()) { + return false; + } + av_log_set_callback(avLogCallback); + return true; +} + +void Stream::deInit() +{ + avformat_network_deinit(); // ignore failure +} + +void Stream::setDecoder(Decoder* decoder) +{ + m_decoder = decoder; +} + +static quint32 bufferRead32be(quint8* buf) { + return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; +} + +static quint64 bufferRead64be(quint8* buf) { + quint32 msb = bufferRead32be(buf); + quint32 lsb = bufferRead32be(&buf[4]); + return ((quint64) msb << 32) | lsb; +} + +static Stream::FrameMeta* frameMetaNew(quint64 pts) { + Stream::FrameMeta* meta = new Stream::FrameMeta; + if (!meta) { + return meta; + } + meta->pts = pts; + meta->next = Q_NULLPTR; + return meta; +} + +static void frameMetaDelete(Stream::FrameMeta* frameMeta) { + if (frameMeta) { + delete frameMeta; + } +} + +static bool receiverStatePushMeta(Stream::ReceiverState* state, quint64 pts) { + Stream::FrameMeta* frameMeta = frameMetaNew(pts); + if (!frameMeta) { + return false; + } + + // append to the list + // (iterate to find the last item, in practice the list should be tiny) + Stream::FrameMeta **p = &state->frameMetaQueue; + while (*p) { + p = &(*p)->next; + } + *p = frameMeta; + return true; +} + +static quint64 receiverStateTakeMeta(Stream::ReceiverState* state) { + Stream::FrameMeta *frameMeta = state->frameMetaQueue; // first item + Q_ASSERT(frameMeta); // must not be empty + quint64 pts = frameMeta->pts; + state->frameMetaQueue = frameMeta->next; // remove the item + frameMetaDelete(frameMeta); + return pts; +} + +static qint32 readPacketWithMeta(void *opaque, uint8_t *buf, int bufSize) { + Stream* stream = (Stream*)opaque; + Stream::ReceiverState* state = stream->getReceiverState(); + + // The video stream contains raw packets, without time information. When we + // record, we retrieve the timestamps separately, from a "meta" header + // added by the server before each raw packet. + // + // The "meta" header length is 12 bytes: + // [. . . . . . . .|. . . .]. . . . . . . . . . . . . . . ... + // <-------------> <-----> <-----------------------------... + // PTS packet raw packet + // size + // + // It is followed by bytes containing the packet/frame. + + if (!state->remaining) { + quint8 header[HEADER_SIZE]; + qint32 r = stream->recvData(header, HEADER_SIZE); + if (r == -1) { + return AVERROR(errno); + } + if (r == 0) { + return AVERROR_EOF; + } + // no partial read (net_recv_all()) + if (r != HEADER_SIZE) { + return AVERROR(ENOMEM); + } + + uint64_t pts = bufferRead64be(header); + state->remaining = bufferRead32be(&header[8]); + + if (pts != NO_PTS && !receiverStatePushMeta(state, pts)) { + qCritical("Could not store PTS for recording"); + // we cannot save the PTS, the recording would be broken + return AVERROR(ENOMEM); + } + } + + Q_ASSERT(state->remaining); + + if (bufSize > state->remaining) { + bufSize = state->remaining; + } + + qint32 r = stream->recvData(buf, bufSize); + if (r == -1) { + return AVERROR(errno); + } + if (r == 0) { + return AVERROR_EOF; + } + + Q_ASSERT(state->remaining >= r); + state->remaining -= r; + return r; +} + +static qint32 readRawPacket(void *opaque, quint8 *buf, qint32 bufSize) { + Stream *stream = (Stream*)opaque; + if (stream) { + qint32 len = stream->recvData(buf, bufSize); + if (len == -1) { + return AVERROR(errno); + } + if (len == 0) { + return AVERROR_EOF; + } + return len; + } + return AVERROR_EOF; +} + +void Stream::setDeviceSocket(DeviceSocket* deviceSocket) +{ + m_deviceSocket = deviceSocket; +} + +void Stream::setRecoder(Recorder *recorder) +{ + m_recorder = recorder; +} + +qint32 Stream::recvData(quint8* buf, qint32 bufSize) +{ + if (!buf) { + return 0; + } + if (m_deviceSocket) { + qint32 len = m_deviceSocket->subThreadRecvData(buf, bufSize); + return len; + } + return 0; +} + +bool Stream::startDecode() +{ + if (!m_deviceSocket) { + return false; + } + m_quit.store(0); + start(); + return true; +} + +void Stream::stopDecode() +{ + m_quit.store(1); + if (m_decoder) { + m_decoder->interrupt(); + } + wait(); +} + +Stream::ReceiverState *Stream::getReceiverState() +{ + return &m_receiverState; +} + +void Stream::run() +{ + unsigned char *decoderBuffer = Q_NULLPTR; + AVIOContext *avioCtx = Q_NULLPTR; + AVFormatContext *formatCtx = Q_NULLPTR; + AVCodec *codec = Q_NULLPTR; + AVCodecContext *codecCtx = Q_NULLPTR; + ReadPacketFunc readPacket = Q_NULLPTR; + bool isFormatCtxOpen = false; + + // decoder buffer + decoderBuffer = (unsigned char*)av_malloc(BUFSIZE); + if (!decoderBuffer) { + qCritical("Could not allocate buffer"); + goto runQuit; + } + + // initialize the receiver state + m_receiverState.frameMetaQueue = Q_NULLPTR; + m_receiverState.remaining = 0; + + // if recording is enabled, a "header" is sent between raw packets + readPacket = m_recorder ? readPacketWithMeta: readRawPacket; + + // io context + avioCtx = avio_alloc_context(decoderBuffer, BUFSIZE, 0, this, readPacket, NULL, NULL); + if (!avioCtx) { + qCritical("Could not allocate avio context"); + // avformat_open_input takes ownership of 'decoderBuffer' + // so only free the buffer before avformat_open_input() + av_free(decoderBuffer); + goto runQuit; + } + + // format context + formatCtx = avformat_alloc_context(); + if (!formatCtx) { + qCritical("Could not allocate format context"); + goto runQuit; + } + formatCtx->pb = avioCtx; + if (avformat_open_input(&formatCtx, NULL, NULL, NULL) < 0) { + qCritical("Could not open video stream"); + goto runQuit; + } + isFormatCtxOpen = true; + + // codec + codec = avcodec_find_decoder(AV_CODEC_ID_H264); + if (!codec) { + qCritical("H.264 decoder not found"); + goto runQuit; + } + + if (m_decoder && !m_decoder->open(codec)) { + qCritical("Could not open m_decoder"); + goto runQuit; + } + + if (m_recorder && !m_recorder->open(codec)) { + qCritical("Could not open recorder"); + goto runQuit; + } + + AVPacket packet; + av_init_packet(&packet); + packet.data = Q_NULLPTR; + packet.size = 0; + + while (!m_quit.load() && !av_read_frame(formatCtx, &packet)) { + if (m_decoder && !m_decoder->push(&packet)) { + av_packet_unref(&packet); + goto runQuit; + } + if (m_recorder) { + // we retrieve the PTS in order they were received, so they will + // be assigned to the correct frame + quint64 pts = receiverStateTakeMeta(&m_receiverState); + packet.pts = pts; + packet.dts = pts; + // no need to rescale with av_packet_rescale_ts(), the timestamps + // are in microseconds both in input and output + if (!m_recorder->write(&packet)) { + qCritical("Could not write frame to output file"); + av_packet_unref(&packet); + goto runQuit; + } + } + + av_packet_unref(&packet); + + if (avioCtx->eof_reached) { + break; + } + } + qDebug() << "End of frames"; + +runQuit: + if (m_recorder) { + m_recorder->close(); + } + if (avioCtx) { + av_free(avioCtx->buffer); + av_freep(&avioCtx); + } + if (formatCtx && isFormatCtxOpen) { + avformat_close_input(&formatCtx); + } + if (formatCtx) { + avformat_free_context(formatCtx); + } + if (m_decoder) { + m_decoder->close(); + } + if (codecCtx) { + avcodec_free_context(&codecCtx); + } + + emit onDecodeStop(); +} diff --git a/QtScrcpy/decoder/stream.h b/QtScrcpy/decoder/stream.h new file mode 100644 index 0000000..b51eb39 --- /dev/null +++ b/QtScrcpy/decoder/stream.h @@ -0,0 +1,64 @@ +#ifndef STREAM_H +#define STREAM_H + +#include +#include +#include +#include + +extern "C" +{ +#include "libavcodec/avcodec.h" +#include "libavformat/avformat.h" +} + +class DeviceSocket; +class Recorder; +class Decoder; +class Stream : public QThread +{ + Q_OBJECT +public: + typedef struct FrameMeta { + quint64 pts; + struct FrameMeta* next; + } FrameMeta; + + typedef struct ReceiverState { + // meta (in order) for frames not consumed yet + FrameMeta* frameMetaQueue; + qint32 remaining; // remaining bytes to receive for the current frame + } ReceiverState; + + Stream(); + virtual ~Stream(); + +public: + static bool init(); + static void deInit(); + + void setDecoder(Decoder* vb); + void setDeviceSocket(DeviceSocket* deviceSocket); + void setRecoder(Recorder* recorder); + qint32 recvData(quint8* buf, qint32 bufSize); + bool startDecode(); + void stopDecode(); + ReceiverState* getReceiverState(); + +signals: + void onDecodeStop(); + +protected: + void run(); + +private: + QPointer m_deviceSocket; + QAtomicInteger m_quit; + + // for recorder + Recorder* m_recorder = Q_NULLPTR; + ReceiverState m_receiverState; + Decoder* m_decoder = Q_NULLPTR; +}; + +#endif // STREAM_H diff --git a/QtScrcpy/main.cpp b/QtScrcpy/main.cpp index e0628d6..c1ede4b 100644 --- a/QtScrcpy/main.cpp +++ b/QtScrcpy/main.cpp @@ -5,7 +5,7 @@ #include #include "dialog.h" -#include "decoder.h" +#include "stream.h" #include "mousetap/mousetap.h" Dialog* g_mainDlg = Q_NULLPTR; @@ -21,7 +21,7 @@ int main(int argc, char *argv[]) //QApplication::setAttribute(Qt::AA_UseSoftwareOpenGL); g_oldMessageHandler = qInstallMessageHandler(myMessageOutput); - Decoder::init(); + Stream::init(); QApplication a(argc, argv); installTranslator(); @@ -54,7 +54,7 @@ int main(int argc, char *argv[]) int ret = a.exec(); MouseTap::getInstance()->quitMouseEventTap(); - Decoder::deInit(); + Stream::deInit(); return ret; } diff --git a/QtScrcpy/videoform.cpp b/QtScrcpy/videoform.cpp index e9f12c5..0e22e93 100644 --- a/QtScrcpy/videoform.cpp +++ b/QtScrcpy/videoform.cpp @@ -33,9 +33,10 @@ VideoForm::VideoForm(const QString& serial, quint16 maxSize, quint32 bitRate, co m_server = new Server(); m_vb.init(); m_decoder.setVideoBuffer(&m_vb); + m_stream.setDecoder(&m_decoder); if (!fileName.trimmed().isEmpty()) { m_recorder = new Recorder(fileName.trimmed()); - m_decoder.setRecoder(m_recorder); + m_stream.setRecoder(m_recorder); } initSignals(); @@ -63,7 +64,7 @@ VideoForm::~VideoForm() { m_server->stop(); // server must stop before decoder, because decoder block main thread - m_decoder.stopDecode(); + m_stream.stopDecode(); delete m_server; if (m_recorder) { delete m_recorder; @@ -167,8 +168,8 @@ void VideoForm::initSignals() } // init decoder - m_decoder.setDeviceSocket(m_server->getDeviceSocket()); - m_decoder.startDecode(); + m_stream.setDeviceSocket(m_server->getDeviceSocket()); + m_stream.startDecode(); // init controller m_inputConvert.setDeviceSocket(m_server->getDeviceSocket()); @@ -180,9 +181,9 @@ void VideoForm::initSignals() qDebug() << "server process stop"; }); - connect(&m_decoder, &Decoder::onDecodeStop, this, [this](){ + connect(&m_stream, &Stream::onStreamStop, this, [this](){ close(); - qDebug() << "decoder thread stop"; + qDebug() << "stream thread stop"; }); // must be Qt::QueuedConnection, ui update must be main thread diff --git a/QtScrcpy/videoform.h b/QtScrcpy/videoform.h index 6915e17..9744884 100644 --- a/QtScrcpy/videoform.h +++ b/QtScrcpy/videoform.h @@ -6,8 +6,9 @@ #include #include "server.h" -#include "decoder.h" +#include "stream.h" #include "videobuffer.h" +#include "decoder.h" #include "inputconvertnormal.h" #include "inputconvertgame.h" #include "filehandler.h" @@ -70,7 +71,7 @@ private: Ui::videoForm *ui; QSize frameSize; Server* m_server = Q_NULLPTR; - Decoder m_decoder; + Stream m_stream; VideoBuffer m_vb; //InputConvertNormal m_inputConvert; InputConvertGame m_inputConvert; @@ -82,6 +83,7 @@ private: float m_widthHeightRatio = 0.5f; QPointer m_toolForm; Recorder* m_recorder = Q_NULLPTR; + Decoder m_decoder; QTime m_startTimeCount; QPointer m_loadingWidget; };