mirror of
https://github.com/Genymobile/scrcpy.git
synced 2025-04-21 20:15:05 +00:00
hw_dec
This commit is contained in:
parent
b1dbc30072
commit
be6c6325d9
3 changed files with 61 additions and 6 deletions
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
|
||||
#include "events.h"
|
||||
#include "video_buffer.h"
|
||||
|
@ -11,6 +12,39 @@
|
|||
/** Downcast packet_sink to decoder */
|
||||
#define DOWNCAST(SINK) container_of(SINK, struct sc_decoder, packet_sink)
|
||||
|
||||
static int hw_decoder_init(struct sc_decoder *decoder, const enum AVHWDeviceType type)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
AVBufferRef *hw_device_ctx;
|
||||
if ((err = av_hwdevice_ctx_create(&hw_device_ctx, type,
|
||||
NULL, NULL, 0)) < 0) {
|
||||
LOGE("Failed to create specified HW device.");
|
||||
return err;
|
||||
}
|
||||
decoder->codec_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
|
||||
const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
(void) ctx;
|
||||
const enum AVPixelFormat *p;
|
||||
|
||||
LOGD("== get_hw_format ==");
|
||||
|
||||
for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
|
||||
LOGD("==== %s (%d)", av_get_pix_fmt_name(*p), *p);
|
||||
if (*p == AV_PIX_FMT_VAAPI)
|
||||
return *p;
|
||||
}
|
||||
|
||||
LOGE("Failed to get HW surface format.");
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static void
|
||||
sc_decoder_close_first_sinks(struct sc_decoder *decoder, unsigned count) {
|
||||
while (count) {
|
||||
|
@ -48,6 +82,10 @@ sc_decoder_open(struct sc_decoder *decoder, const AVCodec *codec) {
|
|||
|
||||
decoder->codec_ctx->flags |= AV_CODEC_FLAG_LOW_DELAY;
|
||||
|
||||
int r = hw_decoder_init(decoder, AV_HWDEVICE_TYPE_VAAPI);
|
||||
assert(!r);
|
||||
decoder->codec_ctx->get_format = get_hw_format;
|
||||
|
||||
if (avcodec_open2(decoder->codec_ctx, codec, NULL) < 0) {
|
||||
LOGE("Could not open codec");
|
||||
avcodec_free_context(&decoder->codec_ctx);
|
||||
|
@ -62,6 +100,9 @@ sc_decoder_open(struct sc_decoder *decoder, const AVCodec *codec) {
|
|||
return false;
|
||||
}
|
||||
|
||||
decoder->hw_frame = av_frame_alloc();
|
||||
assert(decoder->hw_frame);
|
||||
|
||||
if (!sc_decoder_open_sinks(decoder)) {
|
||||
LOGE("Could not open decoder sinks");
|
||||
av_frame_free(&decoder->frame);
|
||||
|
@ -76,6 +117,7 @@ sc_decoder_open(struct sc_decoder *decoder, const AVCodec *codec) {
|
|||
static void
|
||||
sc_decoder_close(struct sc_decoder *decoder) {
|
||||
sc_decoder_close_sinks(decoder);
|
||||
av_frame_free(&decoder->hw_frame);
|
||||
av_frame_free(&decoder->frame);
|
||||
avcodec_close(decoder->codec_ctx);
|
||||
avcodec_free_context(&decoder->codec_ctx);
|
||||
|
@ -107,15 +149,26 @@ sc_decoder_push(struct sc_decoder *decoder, const AVPacket *packet) {
|
|||
LOGE("Could not send video packet: %d", ret);
|
||||
return false;
|
||||
}
|
||||
ret = avcodec_receive_frame(decoder->codec_ctx, decoder->frame);
|
||||
ret = avcodec_receive_frame(decoder->codec_ctx, decoder->hw_frame);
|
||||
if (!ret) {
|
||||
// a frame was received
|
||||
|
||||
sc_tick t = sc_tick_now();
|
||||
ret = av_hwframe_transfer_data(decoder->frame, decoder->hw_frame, 0);
|
||||
if (ret < 0) {
|
||||
LOGE("HWFRAME transfer fail");
|
||||
return false;
|
||||
}
|
||||
|
||||
LOGD("av_hwframe_transfer_data: %ld", sc_tick_now() - t);
|
||||
|
||||
bool ok = push_frame_to_sinks(decoder, decoder->frame);
|
||||
// A frame lost should not make the whole pipeline fail. The error, if
|
||||
// any, is already logged.
|
||||
(void) ok;
|
||||
|
||||
av_frame_unref(decoder->frame);
|
||||
av_frame_unref(decoder->hw_frame);
|
||||
} else if (ret != AVERROR(EAGAIN)) {
|
||||
LOGE("Could not receive video frame: %d", ret);
|
||||
return false;
|
||||
|
|
|
@ -19,6 +19,7 @@ struct sc_decoder {
|
|||
|
||||
AVCodecContext *codec_ctx;
|
||||
AVFrame *frame;
|
||||
AVFrame *hw_frame;
|
||||
};
|
||||
|
||||
void
|
||||
|
|
|
@ -244,7 +244,7 @@ static inline SDL_Texture *
|
|||
create_texture(struct sc_screen *screen) {
|
||||
SDL_Renderer *renderer = screen->renderer;
|
||||
struct sc_size size = screen->frame_size;
|
||||
SDL_Texture *texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12,
|
||||
SDL_Texture *texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_NV12,
|
||||
SDL_TEXTUREACCESS_STREAMING,
|
||||
size.width, size.height);
|
||||
if (!texture) {
|
||||
|
@ -711,10 +711,11 @@ prepare_for_frame(struct sc_screen *screen, struct sc_size new_frame_size) {
|
|||
// write the frame into the texture
|
||||
static void
|
||||
update_texture(struct sc_screen *screen, const AVFrame *frame) {
|
||||
SDL_UpdateYUVTexture(screen->texture, NULL,
|
||||
frame->data[0], frame->linesize[0],
|
||||
frame->data[1], frame->linesize[1],
|
||||
frame->data[2], frame->linesize[2]);
|
||||
// SDL_UpdateYUVTexture(screen->texture, NULL,
|
||||
// frame->data[0], frame->linesize[0],
|
||||
// frame->data[1], frame->linesize[1],
|
||||
// frame->data[2], frame->linesize[2]);
|
||||
SDL_UpdateTexture(screen->texture, NULL, frame->data[0], frame->linesize[0]);
|
||||
|
||||
if (screen->mipmaps) {
|
||||
SDL_GL_BindTexture(screen->texture, NULL, NULL);
|
||||
|
|
Loading…
Add table
Reference in a new issue