mirror of
https://github.com/Genymobile/scrcpy.git
synced 2025-08-12 19:19:04 +00:00
Merge 02047ff102
into 939c8e7f68
This commit is contained in:
commit
4da9722c17
20 changed files with 406 additions and 145 deletions
|
@ -30,7 +30,10 @@ sc_audio_player_frame_sink_push(struct sc_frame_sink *sink,
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
sc_audio_player_frame_sink_open(struct sc_frame_sink *sink,
|
sc_audio_player_frame_sink_open(struct sc_frame_sink *sink,
|
||||||
const AVCodecContext *ctx) {
|
const AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
|
(void) session;
|
||||||
|
|
||||||
struct sc_audio_player *ap = DOWNCAST(sink);
|
struct sc_audio_player *ap = DOWNCAST(sink);
|
||||||
|
|
||||||
#ifdef SCRCPY_LAVU_HAS_CHLAYOUT
|
#ifdef SCRCPY_LAVU_HAS_CHLAYOUT
|
||||||
|
|
|
@ -10,20 +10,30 @@
|
||||||
#define DOWNCAST(SINK) container_of(SINK, struct sc_decoder, packet_sink)
|
#define DOWNCAST(SINK) container_of(SINK, struct sc_decoder, packet_sink)
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
sc_decoder_open(struct sc_decoder *decoder, AVCodecContext *ctx) {
|
sc_decoder_open(struct sc_decoder *decoder, AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
decoder->frame = av_frame_alloc();
|
decoder->frame = av_frame_alloc();
|
||||||
if (!decoder->frame) {
|
if (!decoder->frame) {
|
||||||
LOG_OOM();
|
LOG_OOM();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!sc_frame_source_sinks_open(&decoder->frame_source, ctx)) {
|
if (!sc_frame_source_sinks_open(&decoder->frame_source, ctx, session)) {
|
||||||
av_frame_free(&decoder->frame);
|
av_frame_free(&decoder->frame);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
decoder->ctx = ctx;
|
decoder->ctx = ctx;
|
||||||
|
|
||||||
|
// A video stream must have a session
|
||||||
|
assert(session || ctx->codec_type != AVMEDIA_TYPE_VIDEO);
|
||||||
|
|
||||||
|
if (session) {
|
||||||
|
decoder->session = *session;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(&decoder->frame_size, 0, sizeof(decoder->frame_size));
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,6 +71,32 @@ sc_decoder_push(struct sc_decoder *decoder, const AVPacket *packet) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// a frame was received
|
// a frame was received
|
||||||
|
|
||||||
|
if (decoder->ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||||
|
assert(decoder->frame->width >= 0);
|
||||||
|
assert(decoder->frame->height >= 0);
|
||||||
|
struct sc_size frame_size = {
|
||||||
|
.width = decoder->frame->width,
|
||||||
|
.height = decoder->frame->height,
|
||||||
|
};
|
||||||
|
if (decoder->frame_size.width != frame_size.width
|
||||||
|
|| decoder->frame_size.height != frame_size.height) {
|
||||||
|
// The frame size has changed, check if it matches the session
|
||||||
|
uint32_t sw = decoder->session.video.width;
|
||||||
|
uint32_t sh = decoder->session.video.height;
|
||||||
|
if (frame_size.width != sw || frame_size.height != sh) {
|
||||||
|
LOGW("Unexpected video size: %" PRIu32 "x%" PRIu32
|
||||||
|
" (expected %" PRIu32 "x%" PRIu32 ")",
|
||||||
|
frame_size.width, frame_size.height, sw, sh);
|
||||||
|
|
||||||
|
LOGW("The encoder did not respect the requested size, "
|
||||||
|
"please retry with a lower resolution (-m/--max-size)");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
decoder->frame_size = frame_size;
|
||||||
|
}
|
||||||
|
|
||||||
bool ok = sc_frame_source_sinks_push(&decoder->frame_source,
|
bool ok = sc_frame_source_sinks_push(&decoder->frame_source,
|
||||||
decoder->frame);
|
decoder->frame);
|
||||||
av_frame_unref(decoder->frame);
|
av_frame_unref(decoder->frame);
|
||||||
|
@ -74,9 +110,17 @@ sc_decoder_push(struct sc_decoder *decoder, const AVPacket *packet) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
sc_decoder_packet_sink_open(struct sc_packet_sink *sink, AVCodecContext *ctx) {
|
sc_decoder_push_session(struct sc_decoder *decoder,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
|
decoder->session = *session;
|
||||||
|
return sc_frame_source_sinks_push_session(&decoder->frame_source, session);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
sc_decoder_packet_sink_open(struct sc_packet_sink *sink, AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
struct sc_decoder *decoder = DOWNCAST(sink);
|
struct sc_decoder *decoder = DOWNCAST(sink);
|
||||||
return sc_decoder_open(decoder, ctx);
|
return sc_decoder_open(decoder, ctx, session);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -92,6 +136,14 @@ sc_decoder_packet_sink_push(struct sc_packet_sink *sink,
|
||||||
return sc_decoder_push(decoder, packet);
|
return sc_decoder_push(decoder, packet);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
sc_decoder_packet_sink_push_session(struct sc_packet_sink *sink,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
|
|
||||||
|
struct sc_decoder *decoder = DOWNCAST(sink);
|
||||||
|
return sc_decoder_push_session(decoder, session);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
sc_decoder_init(struct sc_decoder *decoder, const char *name) {
|
sc_decoder_init(struct sc_decoder *decoder, const char *name) {
|
||||||
decoder->name = name; // statically allocated
|
decoder->name = name; // statically allocated
|
||||||
|
@ -101,6 +153,7 @@ sc_decoder_init(struct sc_decoder *decoder, const char *name) {
|
||||||
.open = sc_decoder_packet_sink_open,
|
.open = sc_decoder_packet_sink_open,
|
||||||
.close = sc_decoder_packet_sink_close,
|
.close = sc_decoder_packet_sink_close,
|
||||||
.push = sc_decoder_packet_sink_push,
|
.push = sc_decoder_packet_sink_push,
|
||||||
|
.push_session = sc_decoder_packet_sink_push_session,
|
||||||
};
|
};
|
||||||
|
|
||||||
decoder->packet_sink.ops = &ops;
|
decoder->packet_sink.ops = &ops;
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
|
|
||||||
#include <libavcodec/avcodec.h>
|
#include <libavcodec/avcodec.h>
|
||||||
|
|
||||||
|
#include "coords.h"
|
||||||
#include "trait/frame_source.h"
|
#include "trait/frame_source.h"
|
||||||
#include "trait/packet_sink.h"
|
#include "trait/packet_sink.h"
|
||||||
|
|
||||||
|
@ -16,6 +17,9 @@ struct sc_decoder {
|
||||||
|
|
||||||
AVCodecContext *ctx;
|
AVCodecContext *ctx;
|
||||||
AVFrame *frame;
|
AVFrame *frame;
|
||||||
|
|
||||||
|
struct sc_stream_session session; // only initialized for video stream
|
||||||
|
struct sc_size frame_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
// The name must be statically allocated (e.g. a string literal)
|
// The name must be statically allocated (e.g. a string literal)
|
||||||
|
|
|
@ -10,16 +10,18 @@
|
||||||
#define DOWNCAST(SINK) container_of(SINK, struct sc_delay_buffer, frame_sink)
|
#define DOWNCAST(SINK) container_of(SINK, struct sc_delay_buffer, frame_sink)
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
sc_delayed_frame_init(struct sc_delayed_frame *dframe, const AVFrame *frame) {
|
sc_delayed_packet_init_frame(struct sc_delayed_packet *dpacket,
|
||||||
dframe->frame = av_frame_alloc();
|
const AVFrame *frame) {
|
||||||
if (!dframe->frame) {
|
dpacket->type = SC_DELAYED_PACKET_TYPE_FRAME;
|
||||||
|
dpacket->frame = av_frame_alloc();
|
||||||
|
if (!dpacket->frame) {
|
||||||
LOG_OOM();
|
LOG_OOM();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (av_frame_ref(dframe->frame, frame)) {
|
if (av_frame_ref(dpacket->frame, frame)) {
|
||||||
LOG_OOM();
|
LOG_OOM();
|
||||||
av_frame_free(&dframe->frame);
|
av_frame_free(&dpacket->frame);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,9 +29,18 @@ sc_delayed_frame_init(struct sc_delayed_frame *dframe, const AVFrame *frame) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
sc_delayed_frame_destroy(struct sc_delayed_frame *dframe) {
|
sc_delayed_packet_init_session(struct sc_delayed_packet *dpacket,
|
||||||
av_frame_unref(dframe->frame);
|
const struct sc_stream_session *session) {
|
||||||
av_frame_free(&dframe->frame);
|
dpacket->type = SC_DELAYED_PACKET_TYPE_SESSION;
|
||||||
|
dpacket->session = *session;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
sc_delayed_packet_destroy(struct sc_delayed_packet *dpacket) {
|
||||||
|
if (dpacket->type == SC_DELAYED_PACKET_TYPE_FRAME) {
|
||||||
|
av_frame_unref(dpacket->frame);
|
||||||
|
av_frame_free(&dpacket->frame);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -50,43 +61,52 @@ run_buffering(void *data) {
|
||||||
goto stopped;
|
goto stopped;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sc_delayed_frame dframe = sc_vecdeque_pop(&db->queue);
|
struct sc_delayed_packet dpacket = sc_vecdeque_pop(&db->queue);
|
||||||
|
|
||||||
sc_tick max_deadline = sc_tick_now() + db->delay;
|
bool ok;
|
||||||
// PTS (written by the server) are expressed in microseconds
|
if (dpacket.type == SC_DELAYED_PACKET_TYPE_FRAME) {
|
||||||
sc_tick pts = SC_TICK_FROM_US(dframe.frame->pts);
|
sc_tick max_deadline = sc_tick_now() + db->delay;
|
||||||
|
// PTS (written by the server) are expressed in microseconds
|
||||||
|
sc_tick pts = SC_TICK_FROM_US(dpacket.frame->pts);
|
||||||
|
|
||||||
bool timed_out = false;
|
bool timed_out = false;
|
||||||
while (!db->stopped && !timed_out) {
|
while (!db->stopped && !timed_out) {
|
||||||
sc_tick deadline = sc_clock_to_system_time(&db->clock, pts)
|
sc_tick deadline = sc_clock_to_system_time(&db->clock, pts)
|
||||||
+ db->delay;
|
+ db->delay;
|
||||||
if (deadline > max_deadline) {
|
if (deadline > max_deadline) {
|
||||||
deadline = max_deadline;
|
deadline = max_deadline;
|
||||||
|
}
|
||||||
|
|
||||||
|
timed_out =
|
||||||
|
!sc_cond_timedwait(&db->wait_cond, &db->mutex, deadline);
|
||||||
}
|
}
|
||||||
|
|
||||||
timed_out =
|
bool stopped = db->stopped;
|
||||||
!sc_cond_timedwait(&db->wait_cond, &db->mutex, deadline);
|
sc_mutex_unlock(&db->mutex);
|
||||||
}
|
|
||||||
|
|
||||||
bool stopped = db->stopped;
|
if (stopped) {
|
||||||
sc_mutex_unlock(&db->mutex);
|
sc_delayed_packet_destroy(&dpacket);
|
||||||
|
goto stopped;
|
||||||
if (stopped) {
|
}
|
||||||
sc_delayed_frame_destroy(&dframe);
|
|
||||||
goto stopped;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef SC_BUFFERING_DEBUG
|
#ifdef SC_BUFFERING_DEBUG
|
||||||
LOGD("Buffering: %" PRItick ";%" PRItick ";%" PRItick,
|
LOGD("Buffering: %" PRItick ";%" PRItick ";%" PRItick,
|
||||||
pts, dframe.push_date, sc_tick_now());
|
pts, dframe.push_date, sc_tick_now());
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool ok = sc_frame_source_sinks_push(&db->frame_source, dframe.frame);
|
ok = sc_frame_source_sinks_push(&db->frame_source, dpacket.frame);
|
||||||
sc_delayed_frame_destroy(&dframe);
|
} else {
|
||||||
|
assert(dpacket.type == SC_DELAYED_PACKET_TYPE_SESSION);
|
||||||
|
sc_mutex_unlock(&db->mutex);
|
||||||
|
ok = sc_frame_source_sinks_push_session(&db->frame_source,
|
||||||
|
&dpacket.session);
|
||||||
|
}
|
||||||
|
|
||||||
|
sc_delayed_packet_destroy(&dpacket);
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
LOGE("Delayed frame could not be pushed, stopping");
|
LOGE("Delayed packet could not be pushed, stopping");
|
||||||
sc_mutex_lock(&db->mutex);
|
sc_mutex_lock(&db->mutex);
|
||||||
// Prevent to push any new frame
|
// Prevent to push any new packet
|
||||||
db->stopped = true;
|
db->stopped = true;
|
||||||
sc_mutex_unlock(&db->mutex);
|
sc_mutex_unlock(&db->mutex);
|
||||||
goto stopped;
|
goto stopped;
|
||||||
|
@ -98,8 +118,8 @@ stopped:
|
||||||
|
|
||||||
// Flush queue
|
// Flush queue
|
||||||
while (!sc_vecdeque_is_empty(&db->queue)) {
|
while (!sc_vecdeque_is_empty(&db->queue)) {
|
||||||
struct sc_delayed_frame *dframe = sc_vecdeque_popref(&db->queue);
|
struct sc_delayed_packet *dpacket = sc_vecdeque_popref(&db->queue);
|
||||||
sc_delayed_frame_destroy(dframe);
|
sc_delayed_packet_destroy(dpacket);
|
||||||
}
|
}
|
||||||
|
|
||||||
LOGD("Buffering thread ended");
|
LOGD("Buffering thread ended");
|
||||||
|
@ -109,9 +129,11 @@ stopped:
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
sc_delay_buffer_frame_sink_open(struct sc_frame_sink *sink,
|
sc_delay_buffer_frame_sink_open(struct sc_frame_sink *sink,
|
||||||
const AVCodecContext *ctx) {
|
const AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
struct sc_delay_buffer *db = DOWNCAST(sink);
|
struct sc_delay_buffer *db = DOWNCAST(sink);
|
||||||
(void) ctx;
|
(void) ctx;
|
||||||
|
(void) session;
|
||||||
|
|
||||||
bool ok = sc_mutex_init(&db->mutex);
|
bool ok = sc_mutex_init(&db->mutex);
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
|
@ -132,7 +154,7 @@ sc_delay_buffer_frame_sink_open(struct sc_frame_sink *sink,
|
||||||
sc_vecdeque_init(&db->queue);
|
sc_vecdeque_init(&db->queue);
|
||||||
db->stopped = false;
|
db->stopped = false;
|
||||||
|
|
||||||
if (!sc_frame_source_sinks_open(&db->frame_source, ctx)) {
|
if (!sc_frame_source_sinks_open(&db->frame_source, ctx, session)) {
|
||||||
goto error_destroy_wait_cond;
|
goto error_destroy_wait_cond;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,24 +218,56 @@ sc_delay_buffer_frame_sink_push(struct sc_frame_sink *sink,
|
||||||
return sc_frame_source_sinks_push(&db->frame_source, frame);
|
return sc_frame_source_sinks_push(&db->frame_source, frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sc_delayed_frame dframe;
|
struct sc_delayed_packet *dpacket = sc_vecdeque_push_hole(&db->queue);
|
||||||
bool ok = sc_delayed_frame_init(&dframe, frame);
|
if (!dpacket) {
|
||||||
if (!ok) {
|
|
||||||
sc_mutex_unlock(&db->mutex);
|
sc_mutex_unlock(&db->mutex);
|
||||||
|
LOG_OOM();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef SC_BUFFERING_DEBUG
|
bool ok = sc_delayed_packet_init_frame(dpacket, frame);
|
||||||
dframe.push_date = sc_tick_now();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ok = sc_vecdeque_push(&db->queue, dframe);
|
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
sc_mutex_unlock(&db->mutex);
|
sc_mutex_unlock(&db->mutex);
|
||||||
LOG_OOM();
|
LOG_OOM();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef SC_BUFFERING_DEBUG
|
||||||
|
dpacket->push_date = sc_tick_now();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
sc_cond_signal(&db->queue_cond);
|
||||||
|
|
||||||
|
sc_mutex_unlock(&db->mutex);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
sc_delay_buffer_frame_sink_push_session(struct sc_frame_sink *sink,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
|
struct sc_delay_buffer *db = DOWNCAST(sink);
|
||||||
|
|
||||||
|
sc_mutex_lock(&db->mutex);
|
||||||
|
|
||||||
|
if (db->stopped) {
|
||||||
|
sc_mutex_unlock(&db->mutex);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct sc_delayed_packet *dpacket = sc_vecdeque_push_hole(&db->queue);
|
||||||
|
if (!dpacket) {
|
||||||
|
sc_mutex_unlock(&db->mutex);
|
||||||
|
LOG_OOM();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
sc_delayed_packet_init_session(dpacket, session);
|
||||||
|
|
||||||
|
#ifdef SC_BUFFERING_DEBUG
|
||||||
|
dpacket->push_date = sc_tick_now();
|
||||||
|
#endif
|
||||||
|
|
||||||
sc_cond_signal(&db->queue_cond);
|
sc_cond_signal(&db->queue_cond);
|
||||||
|
|
||||||
sc_mutex_unlock(&db->mutex);
|
sc_mutex_unlock(&db->mutex);
|
||||||
|
@ -235,6 +289,7 @@ sc_delay_buffer_init(struct sc_delay_buffer *db, sc_tick delay,
|
||||||
.open = sc_delay_buffer_frame_sink_open,
|
.open = sc_delay_buffer_frame_sink_open,
|
||||||
.close = sc_delay_buffer_frame_sink_close,
|
.close = sc_delay_buffer_frame_sink_close,
|
||||||
.push = sc_delay_buffer_frame_sink_push,
|
.push = sc_delay_buffer_frame_sink_push,
|
||||||
|
.push_session = sc_delay_buffer_frame_sink_push_session,
|
||||||
};
|
};
|
||||||
|
|
||||||
db->frame_sink.ops = &ops;
|
db->frame_sink.ops = &ops;
|
||||||
|
|
|
@ -18,14 +18,23 @@
|
||||||
// forward declarations
|
// forward declarations
|
||||||
typedef struct AVFrame AVFrame;
|
typedef struct AVFrame AVFrame;
|
||||||
|
|
||||||
struct sc_delayed_frame {
|
enum sc_delayed_packet_type {
|
||||||
AVFrame *frame;
|
SC_DELAYED_PACKET_TYPE_FRAME,
|
||||||
|
SC_DELAYED_PACKET_TYPE_SESSION,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct sc_delayed_packet {
|
||||||
|
enum sc_delayed_packet_type type;
|
||||||
|
union {
|
||||||
|
AVFrame *frame;
|
||||||
|
struct sc_stream_session session;
|
||||||
|
};
|
||||||
#ifdef SC_BUFFERING_DEBUG
|
#ifdef SC_BUFFERING_DEBUG
|
||||||
sc_tick push_date;
|
sc_tick push_date;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
struct sc_delayed_frame_queue SC_VECDEQUE(struct sc_delayed_frame);
|
struct sc_delayed_packet_queue SC_VECDEQUE(struct sc_delayed_packet);
|
||||||
|
|
||||||
struct sc_delay_buffer {
|
struct sc_delay_buffer {
|
||||||
struct sc_frame_source frame_source; // frame source trait
|
struct sc_frame_source frame_source; // frame source trait
|
||||||
|
@ -40,7 +49,7 @@ struct sc_delay_buffer {
|
||||||
sc_cond wait_cond;
|
sc_cond wait_cond;
|
||||||
|
|
||||||
struct sc_clock clock;
|
struct sc_clock clock;
|
||||||
struct sc_delayed_frame_queue queue;
|
struct sc_delayed_packet_queue queue;
|
||||||
bool stopped;
|
bool stopped;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -11,8 +11,8 @@
|
||||||
|
|
||||||
#define SC_PACKET_HEADER_SIZE 12
|
#define SC_PACKET_HEADER_SIZE 12
|
||||||
|
|
||||||
#define SC_PACKET_FLAG_CONFIG (UINT64_C(1) << 63)
|
#define SC_PACKET_FLAG_CONFIG (UINT64_C(1) << 62)
|
||||||
#define SC_PACKET_FLAG_KEY_FRAME (UINT64_C(1) << 62)
|
#define SC_PACKET_FLAG_KEY_FRAME (UINT64_C(1) << 61)
|
||||||
|
|
||||||
#define SC_PACKET_PTS_MASK (SC_PACKET_FLAG_KEY_FRAME - 1)
|
#define SC_PACKET_PTS_MASK (SC_PACKET_FLAG_KEY_FRAME - 1)
|
||||||
|
|
||||||
|
@ -63,48 +63,75 @@ sc_demuxer_recv_codec_id(struct sc_demuxer *demuxer, uint32_t *codec_id) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static inline bool
|
||||||
sc_demuxer_recv_video_size(struct sc_demuxer *demuxer, uint32_t *width,
|
sc_demuxer_recv_header(struct sc_demuxer *demuxer,
|
||||||
uint32_t *height) {
|
uint8_t buf[static SC_PACKET_HEADER_SIZE]) {
|
||||||
uint8_t data[8];
|
|
||||||
ssize_t r = net_recv_all(demuxer->socket, data, 8);
|
|
||||||
if (r < 8) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
*width = sc_read32be(data);
|
|
||||||
*height = sc_read32be(data + 4);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool
|
|
||||||
sc_demuxer_recv_packet(struct sc_demuxer *demuxer, AVPacket *packet) {
|
|
||||||
// The video and audio streams contain a sequence of raw packets (as
|
// The video and audio streams contain a sequence of raw packets (as
|
||||||
// provided by MediaCodec), each prefixed with a "meta" header.
|
// provided by MediaCodec), each prefixed with a "meta" header.
|
||||||
//
|
//
|
||||||
// The "meta" header length is 12 bytes:
|
// The "meta" header length is 12 bytes.
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// If the MSB is 1, then it is a session packet (for a video stream only),
|
||||||
|
// which only contains a 12-byte header:
|
||||||
|
//
|
||||||
|
// byte 0 byte 1 byte 2 byte 3
|
||||||
|
// 10000000 00000000 00000000 00000000
|
||||||
|
// ^<-------------------------------->
|
||||||
|
// | padding
|
||||||
|
// `- session packet flag
|
||||||
|
//
|
||||||
|
// byte 4 byte 5 byte 6 byte 7 byte 8 byte 9 byte 10 byte 11
|
||||||
|
// ........ ........ ........ ........ ........ ........ ........ ........
|
||||||
|
// <---------------------------------> <--------------------------------->
|
||||||
|
// video width video height
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// If the MSB is 0, then it is a media packet, comprised of a 12-byte header
|
||||||
|
// followed by <packet_size> bytes containing the packet/frame:
|
||||||
|
//
|
||||||
// [. . . . . . . .|. . . .]. . . . . . . . . . . . . . . ...
|
// [. . . . . . . .|. . . .]. . . . . . . . . . . . . . . ...
|
||||||
// <-------------> <-----> <-----------------------------...
|
// <-------------> <-----> <-----------------------------...
|
||||||
// PTS packet raw packet
|
// PTS packet raw packet
|
||||||
// size
|
// size
|
||||||
//
|
//
|
||||||
// It is followed by <packet_size> bytes containing the packet/frame.
|
|
||||||
//
|
|
||||||
// The most significant bits of the PTS are used for packet flags:
|
// The most significant bits of the PTS are used for packet flags:
|
||||||
//
|
//
|
||||||
// byte 7 byte 6 byte 5 byte 4 byte 3 byte 2 byte 1 byte 0
|
// byte 0 byte 1 byte 2 byte 3 byte 4 byte 5 byte 6 byte 7
|
||||||
// CK...... ........ ........ ........ ........ ........ ........ ........
|
// 0CK..... ........ ........ ........ ........ ........ ........ ........
|
||||||
// ^^<------------------------------------------------------------------->
|
// ^^^<------------------------------------------------------------------>
|
||||||
// || PTS
|
// ||| PTS
|
||||||
// | `- key frame
|
// || `- key frame
|
||||||
// `-- config packet
|
// | `-- config packet
|
||||||
|
// `--- media packet flag
|
||||||
|
//
|
||||||
|
// byte 8 byte 9 byte 10 byte 11
|
||||||
|
// ........ ........ ........ ........ ........ ........ . . .
|
||||||
|
// <---------------------------------> <---------------- . . .
|
||||||
|
// packet size raw packet
|
||||||
|
//
|
||||||
|
ssize_t r = net_recv_all(demuxer->socket, buf, SC_PACKET_HEADER_SIZE);
|
||||||
|
assert(r <= SC_PACKET_HEADER_SIZE);
|
||||||
|
return r == SC_PACKET_HEADER_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
uint8_t header[SC_PACKET_HEADER_SIZE];
|
static bool
|
||||||
ssize_t r = net_recv_all(demuxer->socket, header, SC_PACKET_HEADER_SIZE);
|
sc_demuxer_is_session(const uint8_t *header) {
|
||||||
if (r < SC_PACKET_HEADER_SIZE) {
|
return header[0] & 0x80;
|
||||||
return false;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
static void
|
||||||
|
sc_demuxer_parse_session(const uint8_t *header,
|
||||||
|
struct sc_stream_session *session) {
|
||||||
|
assert(sc_demuxer_is_session(header));
|
||||||
|
session->video.width = sc_read32be(&header[4]);
|
||||||
|
session->video.height = sc_read32be(&header[8]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
sc_demuxer_recv_packet(struct sc_demuxer *demuxer, const uint8_t *header,
|
||||||
|
AVPacket *packet) {
|
||||||
|
assert(!sc_demuxer_is_session(header));
|
||||||
uint64_t pts_flags = sc_read64be(header);
|
uint64_t pts_flags = sc_read64be(header);
|
||||||
uint32_t len = sc_read32be(&header[8]);
|
uint32_t len = sc_read32be(&header[8]);
|
||||||
assert(len);
|
assert(len);
|
||||||
|
@ -114,7 +141,7 @@ sc_demuxer_recv_packet(struct sc_demuxer *demuxer, AVPacket *packet) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = net_recv_all(demuxer->socket, packet->data, len);
|
ssize_t r = net_recv_all(demuxer->socket, packet->data, len);
|
||||||
if (r < 0 || ((uint32_t) r) < len) {
|
if (r < 0 || ((uint32_t) r) < len) {
|
||||||
av_packet_unref(packet);
|
av_packet_unref(packet);
|
||||||
return false;
|
return false;
|
||||||
|
@ -187,17 +214,28 @@ run_demuxer(void *data) {
|
||||||
|
|
||||||
codec_ctx->flags |= AV_CODEC_FLAG_LOW_DELAY;
|
codec_ctx->flags |= AV_CODEC_FLAG_LOW_DELAY;
|
||||||
|
|
||||||
|
uint8_t header[SC_PACKET_HEADER_SIZE];
|
||||||
|
struct sc_stream_session session_data;
|
||||||
|
|
||||||
|
struct sc_stream_session *session = NULL;
|
||||||
if (codec->type == AVMEDIA_TYPE_VIDEO) {
|
if (codec->type == AVMEDIA_TYPE_VIDEO) {
|
||||||
uint32_t width;
|
bool ok = sc_demuxer_recv_header(demuxer, header);
|
||||||
uint32_t height;
|
|
||||||
ok = sc_demuxer_recv_video_size(demuxer, &width, &height);
|
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
goto finally_free_context;
|
goto finally_free_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
codec_ctx->width = width;
|
if (!sc_demuxer_is_session(header)) {
|
||||||
codec_ctx->height = height;
|
LOGE("Unexpected packet (not a session header)");
|
||||||
|
goto finally_free_context;
|
||||||
|
}
|
||||||
|
|
||||||
|
session = &session_data;
|
||||||
|
sc_demuxer_parse_session(header, session);
|
||||||
|
|
||||||
|
codec_ctx->width = session_data.video.width;
|
||||||
|
codec_ctx->height = session_data.video.height;
|
||||||
codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// Hardcoded audio properties
|
// Hardcoded audio properties
|
||||||
#ifdef SCRCPY_LAVU_HAS_CHLAYOUT
|
#ifdef SCRCPY_LAVU_HAS_CHLAYOUT
|
||||||
|
@ -219,7 +257,8 @@ run_demuxer(void *data) {
|
||||||
goto finally_free_context;
|
goto finally_free_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!sc_packet_source_sinks_open(&demuxer->packet_source, codec_ctx)) {
|
if (!sc_packet_source_sinks_open(&demuxer->packet_source, codec_ctx,
|
||||||
|
session)) {
|
||||||
goto finally_free_context;
|
goto finally_free_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,27 +280,39 @@ run_demuxer(void *data) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
bool ok = sc_demuxer_recv_packet(demuxer, packet);
|
bool ok = sc_demuxer_recv_header(demuxer, header);
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
// end of stream
|
// end of stream
|
||||||
status = SC_DEMUXER_STATUS_EOS;
|
status = SC_DEMUXER_STATUS_EOS;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (must_merge_config_packet) {
|
if (sc_demuxer_is_session(header)) {
|
||||||
// Prepend any config packet to the next media packet
|
sc_demuxer_parse_session(header, &session_data);
|
||||||
ok = sc_packet_merger_merge(&merger, packet);
|
ok = sc_packet_source_sinks_push_session(&demuxer->packet_source,
|
||||||
|
&session_data);
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
av_packet_unref(packet);
|
// The sink already logged its concrete error
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
|
sc_demuxer_recv_packet(demuxer, header, packet);
|
||||||
|
|
||||||
ok = sc_packet_source_sinks_push(&demuxer->packet_source, packet);
|
if (must_merge_config_packet) {
|
||||||
av_packet_unref(packet);
|
// Prepend any config packet to the next media packet
|
||||||
if (!ok) {
|
ok = sc_packet_merger_merge(&merger, packet);
|
||||||
// The sink already logged its concrete error
|
if (!ok) {
|
||||||
break;
|
av_packet_unref(packet);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ok = sc_packet_source_sinks_push(&demuxer->packet_source, packet);
|
||||||
|
av_packet_unref(packet);
|
||||||
|
if (!ok) {
|
||||||
|
// The sink already logged its concrete error
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -541,7 +541,10 @@ sc_recorder_set_orientation(AVStream *stream, enum sc_orientation orientation) {
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
sc_recorder_video_packet_sink_open(struct sc_packet_sink *sink,
|
sc_recorder_video_packet_sink_open(struct sc_packet_sink *sink,
|
||||||
AVCodecContext *ctx) {
|
AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
|
(void) session;
|
||||||
|
|
||||||
struct sc_recorder *recorder = DOWNCAST_VIDEO(sink);
|
struct sc_recorder *recorder = DOWNCAST_VIDEO(sink);
|
||||||
// only written from this thread, no need to lock
|
// only written from this thread, no need to lock
|
||||||
assert(!recorder->video_init);
|
assert(!recorder->video_init);
|
||||||
|
@ -635,7 +638,10 @@ sc_recorder_video_packet_sink_push(struct sc_packet_sink *sink,
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
sc_recorder_audio_packet_sink_open(struct sc_packet_sink *sink,
|
sc_recorder_audio_packet_sink_open(struct sc_packet_sink *sink,
|
||||||
AVCodecContext *ctx) {
|
AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
|
(void) session;
|
||||||
|
|
||||||
struct sc_recorder *recorder = DOWNCAST_AUDIO(sink);
|
struct sc_recorder *recorder = DOWNCAST_AUDIO(sink);
|
||||||
assert(recorder->audio);
|
assert(recorder->audio);
|
||||||
// only written from this thread, no need to lock
|
// only written from this thread, no need to lock
|
||||||
|
|
|
@ -252,9 +252,11 @@ event_watcher(void *data, SDL_Event *event) {
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
sc_screen_frame_sink_open(struct sc_frame_sink *sink,
|
sc_screen_frame_sink_open(struct sc_frame_sink *sink,
|
||||||
const AVCodecContext *ctx) {
|
const AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
assert(ctx->pix_fmt == AV_PIX_FMT_YUV420P);
|
assert(ctx->pix_fmt == AV_PIX_FMT_YUV420P);
|
||||||
(void) ctx;
|
(void) ctx;
|
||||||
|
(void) session;
|
||||||
|
|
||||||
struct sc_screen *screen = DOWNCAST(sink);
|
struct sc_screen *screen = DOWNCAST(sink);
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,8 @@
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <libavcodec/avcodec.h>
|
#include <libavcodec/avcodec.h>
|
||||||
|
|
||||||
|
#include "trait/packet_sink.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Frame sink trait.
|
* Frame sink trait.
|
||||||
*
|
*
|
||||||
|
@ -17,9 +19,16 @@ struct sc_frame_sink {
|
||||||
|
|
||||||
struct sc_frame_sink_ops {
|
struct sc_frame_sink_ops {
|
||||||
/* The codec context is valid until the sink is closed */
|
/* The codec context is valid until the sink is closed */
|
||||||
bool (*open)(struct sc_frame_sink *sink, const AVCodecContext *ctx);
|
bool (*open)(struct sc_frame_sink *sink, const AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session);
|
||||||
void (*close)(struct sc_frame_sink *sink);
|
void (*close)(struct sc_frame_sink *sink);
|
||||||
bool (*push)(struct sc_frame_sink *sink, const AVFrame *frame);
|
bool (*push)(struct sc_frame_sink *sink, const AVFrame *frame);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Optional callback to be notified of a new stream session.
|
||||||
|
*/
|
||||||
|
bool (*push_session)(struct sc_frame_sink *sink,
|
||||||
|
const struct sc_stream_session *session);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -27,11 +27,12 @@ sc_frame_source_sinks_close_firsts(struct sc_frame_source *source,
|
||||||
|
|
||||||
bool
|
bool
|
||||||
sc_frame_source_sinks_open(struct sc_frame_source *source,
|
sc_frame_source_sinks_open(struct sc_frame_source *source,
|
||||||
const AVCodecContext *ctx) {
|
const AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
assert(source->sink_count);
|
assert(source->sink_count);
|
||||||
for (unsigned i = 0; i < source->sink_count; ++i) {
|
for (unsigned i = 0; i < source->sink_count; ++i) {
|
||||||
struct sc_frame_sink *sink = source->sinks[i];
|
struct sc_frame_sink *sink = source->sinks[i];
|
||||||
if (!sink->ops->open(sink, ctx)) {
|
if (!sink->ops->open(sink, ctx, session)) {
|
||||||
sc_frame_source_sinks_close_firsts(source, i);
|
sc_frame_source_sinks_close_firsts(source, i);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -59,3 +60,18 @@ sc_frame_source_sinks_push(struct sc_frame_source *source,
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
sc_frame_source_sinks_push_session(struct sc_frame_source *source,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
|
assert(source->sink_count);
|
||||||
|
for (unsigned i = 0; i < source->sink_count; ++i) {
|
||||||
|
struct sc_frame_sink *sink = source->sinks[i];
|
||||||
|
if (sink->ops->push_session &&
|
||||||
|
!sink->ops->push_session(sink, session)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
|
@ -28,7 +28,8 @@ sc_frame_source_add_sink(struct sc_frame_source *source,
|
||||||
|
|
||||||
bool
|
bool
|
||||||
sc_frame_source_sinks_open(struct sc_frame_source *source,
|
sc_frame_source_sinks_open(struct sc_frame_source *source,
|
||||||
const AVCodecContext *ctx);
|
const AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session);
|
||||||
|
|
||||||
void
|
void
|
||||||
sc_frame_source_sinks_close(struct sc_frame_source *source);
|
sc_frame_source_sinks_close(struct sc_frame_source *source);
|
||||||
|
@ -37,4 +38,8 @@ bool
|
||||||
sc_frame_source_sinks_push(struct sc_frame_source *source,
|
sc_frame_source_sinks_push(struct sc_frame_source *source,
|
||||||
const AVFrame *frame);
|
const AVFrame *frame);
|
||||||
|
|
||||||
|
bool
|
||||||
|
sc_frame_source_sinks_push_session(struct sc_frame_source *source,
|
||||||
|
const struct sc_stream_session *session);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -15,12 +15,28 @@ struct sc_packet_sink {
|
||||||
const struct sc_packet_sink_ops *ops;
|
const struct sc_packet_sink_ops *ops;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct sc_stream_session_video {
|
||||||
|
uint32_t width;
|
||||||
|
uint32_t height;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct sc_stream_session {
|
||||||
|
struct sc_stream_session_video video;
|
||||||
|
};
|
||||||
|
|
||||||
struct sc_packet_sink_ops {
|
struct sc_packet_sink_ops {
|
||||||
/* The codec context is valid until the sink is closed */
|
/* The codec context is valid until the sink is closed */
|
||||||
bool (*open)(struct sc_packet_sink *sink, AVCodecContext *ctx);
|
bool (*open)(struct sc_packet_sink *sink, AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session);
|
||||||
void (*close)(struct sc_packet_sink *sink);
|
void (*close)(struct sc_packet_sink *sink);
|
||||||
bool (*push)(struct sc_packet_sink *sink, const AVPacket *packet);
|
bool (*push)(struct sc_packet_sink *sink, const AVPacket *packet);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Optional callback to be notified of a new stream session.
|
||||||
|
*/
|
||||||
|
bool (*push_session)(struct sc_packet_sink *sink,
|
||||||
|
const struct sc_stream_session *session);
|
||||||
|
|
||||||
/*/
|
/*/
|
||||||
* Called when the input stream has been disabled at runtime.
|
* Called when the input stream has been disabled at runtime.
|
||||||
*
|
*
|
||||||
|
|
|
@ -27,11 +27,12 @@ sc_packet_source_sinks_close_firsts(struct sc_packet_source *source,
|
||||||
|
|
||||||
bool
|
bool
|
||||||
sc_packet_source_sinks_open(struct sc_packet_source *source,
|
sc_packet_source_sinks_open(struct sc_packet_source *source,
|
||||||
AVCodecContext *ctx) {
|
AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
assert(source->sink_count);
|
assert(source->sink_count);
|
||||||
for (unsigned i = 0; i < source->sink_count; ++i) {
|
for (unsigned i = 0; i < source->sink_count; ++i) {
|
||||||
struct sc_packet_sink *sink = source->sinks[i];
|
struct sc_packet_sink *sink = source->sinks[i];
|
||||||
if (!sink->ops->open(sink, ctx)) {
|
if (!sink->ops->open(sink, ctx, session)) {
|
||||||
sc_packet_source_sinks_close_firsts(source, i);
|
sc_packet_source_sinks_close_firsts(source, i);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -60,6 +61,20 @@ sc_packet_source_sinks_push(struct sc_packet_source *source,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
sc_packet_source_sinks_push_session(struct sc_packet_source *source,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
|
assert(source->sink_count);
|
||||||
|
for (unsigned i = 0; i < source->sink_count; ++i) {
|
||||||
|
struct sc_packet_sink *sink = source->sinks[i];
|
||||||
|
if (!sink->ops->push_session(sink, session)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
sc_packet_source_sinks_disable(struct sc_packet_source *source) {
|
sc_packet_source_sinks_disable(struct sc_packet_source *source) {
|
||||||
assert(source->sink_count);
|
assert(source->sink_count);
|
||||||
|
|
|
@ -28,7 +28,8 @@ sc_packet_source_add_sink(struct sc_packet_source *source,
|
||||||
|
|
||||||
bool
|
bool
|
||||||
sc_packet_source_sinks_open(struct sc_packet_source *source,
|
sc_packet_source_sinks_open(struct sc_packet_source *source,
|
||||||
AVCodecContext *ctx);
|
AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session);
|
||||||
|
|
||||||
void
|
void
|
||||||
sc_packet_source_sinks_close(struct sc_packet_source *source);
|
sc_packet_source_sinks_close(struct sc_packet_source *source);
|
||||||
|
@ -37,6 +38,10 @@ bool
|
||||||
sc_packet_source_sinks_push(struct sc_packet_source *source,
|
sc_packet_source_sinks_push(struct sc_packet_source *source,
|
||||||
const AVPacket *packet);
|
const AVPacket *packet);
|
||||||
|
|
||||||
|
bool
|
||||||
|
sc_packet_source_sinks_push_session(struct sc_packet_source *source,
|
||||||
|
const struct sc_stream_session *session);
|
||||||
|
|
||||||
void
|
void
|
||||||
sc_packet_source_sinks_disable(struct sc_packet_source *source);
|
sc_packet_source_sinks_disable(struct sc_packet_source *source);
|
||||||
|
|
||||||
|
|
|
@ -146,9 +146,11 @@ run_v4l2_sink(void *data) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
sc_v4l2_sink_open(struct sc_v4l2_sink *vs, const AVCodecContext *ctx) {
|
sc_v4l2_sink_open(struct sc_v4l2_sink *vs, const AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
assert(ctx->pix_fmt == AV_PIX_FMT_YUV420P);
|
assert(ctx->pix_fmt == AV_PIX_FMT_YUV420P);
|
||||||
(void) ctx;
|
(void) ctx;
|
||||||
|
(void) session;
|
||||||
|
|
||||||
bool ok = sc_frame_buffer_init(&vs->fb);
|
bool ok = sc_frame_buffer_init(&vs->fb);
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
|
@ -326,9 +328,10 @@ sc_v4l2_sink_push(struct sc_v4l2_sink *vs, const AVFrame *frame) {
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
sc_v4l2_frame_sink_open(struct sc_frame_sink *sink, const AVCodecContext *ctx) {
|
sc_v4l2_frame_sink_open(struct sc_frame_sink *sink, const AVCodecContext *ctx,
|
||||||
|
const struct sc_stream_session *session) {
|
||||||
struct sc_v4l2_sink *vs = DOWNCAST(sink);
|
struct sc_v4l2_sink *vs = DOWNCAST(sink);
|
||||||
return sc_v4l2_sink_open(vs, ctx);
|
return sc_v4l2_sink_open(vs, ctx, session);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
|
@ -409,12 +409,11 @@ with any client which uses the same protocol.
|
||||||
|
|
||||||
For simplicity, some [server-specific options] have been added to produce raw
|
For simplicity, some [server-specific options] have been added to produce raw
|
||||||
streams easily:
|
streams easily:
|
||||||
- `send_device_meta=false`: disable the device metata (in practice, the device
|
- `send_device_meta=false`: disable device metadata (in practice, the device
|
||||||
name) sent on the _first_ socket
|
name) sent on the _first_ socket
|
||||||
- `send_frame_meta=false`: disable the 12-byte header for each packet
|
- `send_frame_meta=false`: disable the 12-byte header for each packet
|
||||||
- `send_dummy_byte`: disable the dummy byte sent on forward connections
|
- `send_dummy_byte`: disable the dummy byte sent on forward connections
|
||||||
- `send_codec_meta`: disable the codec information (and initial device size for
|
- `send_stream_meta`: disable codec and video size metadata
|
||||||
video)
|
|
||||||
- `raw_stream`: disable all the above
|
- `raw_stream`: disable all the above
|
||||||
|
|
||||||
[server-specific options]: https://github.com/Genymobile/scrcpy/blob/a3cdf1a6b86ea22786e1f7d09b9c202feabc6949/server/src/main/java/com/genymobile/scrcpy/Options.java#L309-L329
|
[server-specific options]: https://github.com/Genymobile/scrcpy/blob/a3cdf1a6b86ea22786e1f7d09b9c202feabc6949/server/src/main/java/com/genymobile/scrcpy/Options.java#L309-L329
|
||||||
|
|
|
@ -78,7 +78,7 @@ public class Options {
|
||||||
private boolean sendDeviceMeta = true; // send device name and size
|
private boolean sendDeviceMeta = true; // send device name and size
|
||||||
private boolean sendFrameMeta = true; // send PTS so that the client may record properly
|
private boolean sendFrameMeta = true; // send PTS so that the client may record properly
|
||||||
private boolean sendDummyByte = true; // write a byte on start to detect connection issues
|
private boolean sendDummyByte = true; // write a byte on start to detect connection issues
|
||||||
private boolean sendCodecMeta = true; // write the codec metadata before the stream
|
private boolean sendStreamMeta = true; // write the stream metadata (codec and session)
|
||||||
|
|
||||||
public Ln.Level getLogLevel() {
|
public Ln.Level getLogLevel() {
|
||||||
return logLevel;
|
return logLevel;
|
||||||
|
@ -284,8 +284,8 @@ public class Options {
|
||||||
return sendDummyByte;
|
return sendDummyByte;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean getSendCodecMeta() {
|
public boolean getSendStreamMeta() {
|
||||||
return sendCodecMeta;
|
return sendStreamMeta;
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("MethodLength")
|
@SuppressWarnings("MethodLength")
|
||||||
|
@ -500,8 +500,8 @@ public class Options {
|
||||||
case "send_dummy_byte":
|
case "send_dummy_byte":
|
||||||
options.sendDummyByte = Boolean.parseBoolean(value);
|
options.sendDummyByte = Boolean.parseBoolean(value);
|
||||||
break;
|
break;
|
||||||
case "send_codec_meta":
|
case "send_stream_meta":
|
||||||
options.sendCodecMeta = Boolean.parseBoolean(value);
|
options.sendStreamMeta = Boolean.parseBoolean(value);
|
||||||
break;
|
break;
|
||||||
case "raw_stream":
|
case "raw_stream":
|
||||||
boolean rawStream = Boolean.parseBoolean(value);
|
boolean rawStream = Boolean.parseBoolean(value);
|
||||||
|
@ -509,7 +509,7 @@ public class Options {
|
||||||
options.sendDeviceMeta = false;
|
options.sendDeviceMeta = false;
|
||||||
options.sendFrameMeta = false;
|
options.sendFrameMeta = false;
|
||||||
options.sendDummyByte = false;
|
options.sendDummyByte = false;
|
||||||
options.sendCodecMeta = false;
|
options.sendStreamMeta = false;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -125,7 +125,7 @@ public final class Server {
|
||||||
audioCapture = new AudioPlaybackCapture(options.getAudioDup());
|
audioCapture = new AudioPlaybackCapture(options.getAudioDup());
|
||||||
}
|
}
|
||||||
|
|
||||||
Streamer audioStreamer = new Streamer(connection.getAudioFd(), audioCodec, options.getSendCodecMeta(), options.getSendFrameMeta());
|
Streamer audioStreamer = new Streamer(connection.getAudioFd(), audioCodec, options.getSendStreamMeta(), options.getSendFrameMeta());
|
||||||
AsyncProcessor audioRecorder;
|
AsyncProcessor audioRecorder;
|
||||||
if (audioCodec == AudioCodec.RAW) {
|
if (audioCodec == AudioCodec.RAW) {
|
||||||
audioRecorder = new AudioRawRecorder(audioCapture, audioStreamer);
|
audioRecorder = new AudioRawRecorder(audioCapture, audioStreamer);
|
||||||
|
@ -136,7 +136,7 @@ public final class Server {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (video) {
|
if (video) {
|
||||||
Streamer videoStreamer = new Streamer(connection.getVideoFd(), options.getVideoCodec(), options.getSendCodecMeta(),
|
Streamer videoStreamer = new Streamer(connection.getVideoFd(), options.getVideoCodec(), options.getSendStreamMeta(),
|
||||||
options.getSendFrameMeta());
|
options.getSendFrameMeta());
|
||||||
SurfaceCapture surfaceCapture;
|
SurfaceCapture surfaceCapture;
|
||||||
if (options.getVideoSource() == VideoSource.DISPLAY) {
|
if (options.getVideoSource() == VideoSource.DISPLAY) {
|
||||||
|
|
|
@ -3,6 +3,7 @@ package com.genymobile.scrcpy.device;
|
||||||
import com.genymobile.scrcpy.audio.AudioCodec;
|
import com.genymobile.scrcpy.audio.AudioCodec;
|
||||||
import com.genymobile.scrcpy.util.Codec;
|
import com.genymobile.scrcpy.util.Codec;
|
||||||
import com.genymobile.scrcpy.util.IO;
|
import com.genymobile.scrcpy.util.IO;
|
||||||
|
import com.genymobile.scrcpy.util.Ln;
|
||||||
|
|
||||||
import android.media.MediaCodec;
|
import android.media.MediaCodec;
|
||||||
|
|
||||||
|
@ -14,12 +15,13 @@ import java.util.Arrays;
|
||||||
|
|
||||||
public final class Streamer {
|
public final class Streamer {
|
||||||
|
|
||||||
private static final long PACKET_FLAG_CONFIG = 1L << 63;
|
private static final long PACKET_FLAG_SESSION = 1L << 63;
|
||||||
private static final long PACKET_FLAG_KEY_FRAME = 1L << 62;
|
private static final long PACKET_FLAG_CONFIG = 1L << 62;
|
||||||
|
private static final long PACKET_FLAG_KEY_FRAME = 1L << 61;
|
||||||
|
|
||||||
private final FileDescriptor fd;
|
private final FileDescriptor fd;
|
||||||
private final Codec codec;
|
private final Codec codec;
|
||||||
private final boolean sendCodecMeta;
|
private final boolean sendStreamMeta;
|
||||||
private final boolean sendFrameMeta;
|
private final boolean sendFrameMeta;
|
||||||
|
|
||||||
private final ByteBuffer headerBuffer = ByteBuffer.allocate(12);
|
private final ByteBuffer headerBuffer = ByteBuffer.allocate(12);
|
||||||
|
@ -27,7 +29,7 @@ public final class Streamer {
|
||||||
public Streamer(FileDescriptor fd, Codec codec, boolean sendCodecMeta, boolean sendFrameMeta) {
|
public Streamer(FileDescriptor fd, Codec codec, boolean sendCodecMeta, boolean sendFrameMeta) {
|
||||||
this.fd = fd;
|
this.fd = fd;
|
||||||
this.codec = codec;
|
this.codec = codec;
|
||||||
this.sendCodecMeta = sendCodecMeta;
|
this.sendStreamMeta = sendCodecMeta;
|
||||||
this.sendFrameMeta = sendFrameMeta;
|
this.sendFrameMeta = sendFrameMeta;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,7 +38,7 @@ public final class Streamer {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void writeAudioHeader() throws IOException {
|
public void writeAudioHeader() throws IOException {
|
||||||
if (sendCodecMeta) {
|
if (sendStreamMeta) {
|
||||||
ByteBuffer buffer = ByteBuffer.allocate(4);
|
ByteBuffer buffer = ByteBuffer.allocate(4);
|
||||||
buffer.putInt(codec.getId());
|
buffer.putInt(codec.getId());
|
||||||
buffer.flip();
|
buffer.flip();
|
||||||
|
@ -44,12 +46,10 @@ public final class Streamer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void writeVideoHeader(Size videoSize) throws IOException {
|
public void writeVideoHeader() throws IOException {
|
||||||
if (sendCodecMeta) {
|
if (sendStreamMeta) {
|
||||||
ByteBuffer buffer = ByteBuffer.allocate(12);
|
ByteBuffer buffer = ByteBuffer.allocate(4);
|
||||||
buffer.putInt(codec.getId());
|
buffer.putInt(codec.getId());
|
||||||
buffer.putInt(videoSize.getWidth());
|
|
||||||
buffer.putInt(videoSize.getHeight());
|
|
||||||
buffer.flip();
|
buffer.flip();
|
||||||
IO.writeFully(fd, buffer);
|
IO.writeFully(fd, buffer);
|
||||||
}
|
}
|
||||||
|
@ -89,6 +89,18 @@ public final class Streamer {
|
||||||
writePacket(codecBuffer, pts, config, keyFrame);
|
writePacket(codecBuffer, pts, config, keyFrame);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void writeSessionMeta(int width, int height) throws IOException {
|
||||||
|
if (sendStreamMeta) {
|
||||||
|
headerBuffer.clear();
|
||||||
|
|
||||||
|
headerBuffer.putInt((int) (PACKET_FLAG_SESSION >> 32)); // Set the first bit to 1
|
||||||
|
headerBuffer.putInt(width);
|
||||||
|
headerBuffer.putInt(height);
|
||||||
|
headerBuffer.flip();
|
||||||
|
IO.writeFully(fd, headerBuffer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private void writeFrameMeta(FileDescriptor fd, int packetSize, long pts, boolean config, boolean keyFrame) throws IOException {
|
private void writeFrameMeta(FileDescriptor fd, int packetSize, long pts, boolean config, boolean keyFrame) throws IOException {
|
||||||
headerBuffer.clear();
|
headerBuffer.clear();
|
||||||
|
|
||||||
|
|
|
@ -71,16 +71,13 @@ public class SurfaceEncoder implements AsyncProcessor {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
boolean alive;
|
boolean alive;
|
||||||
boolean headerWritten = false;
|
|
||||||
|
streamer.writeVideoHeader();
|
||||||
|
|
||||||
do {
|
do {
|
||||||
reset.consumeReset(); // If a capture reset was requested, it is implicitly fulfilled
|
reset.consumeReset(); // If a capture reset was requested, it is implicitly fulfilled
|
||||||
capture.prepare();
|
capture.prepare();
|
||||||
Size size = capture.getSize();
|
Size size = capture.getSize();
|
||||||
if (!headerWritten) {
|
|
||||||
streamer.writeVideoHeader(size);
|
|
||||||
headerWritten = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
format.setInteger(MediaFormat.KEY_WIDTH, size.getWidth());
|
format.setInteger(MediaFormat.KEY_WIDTH, size.getWidth());
|
||||||
format.setInteger(MediaFormat.KEY_HEIGHT, size.getHeight());
|
format.setInteger(MediaFormat.KEY_HEIGHT, size.getHeight());
|
||||||
|
@ -107,6 +104,7 @@ public class SurfaceEncoder implements AsyncProcessor {
|
||||||
boolean resetRequested = reset.consumeReset();
|
boolean resetRequested = reset.consumeReset();
|
||||||
if (!resetRequested) {
|
if (!resetRequested) {
|
||||||
// If a reset is requested during encode(), it will interrupt the encoding by an EOS
|
// If a reset is requested during encode(), it will interrupt the encoding by an EOS
|
||||||
|
streamer.writeSessionMeta(size.getWidth(), size.getHeight());
|
||||||
encode(mediaCodec, streamer);
|
encode(mediaCodec, streamer);
|
||||||
}
|
}
|
||||||
// The capture might have been closed internally (for example if the camera is disconnected)
|
// The capture might have been closed internally (for example if the camera is disconnected)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue