1.更换ffmpeg版本

2.增加视频帧转换
This commit is contained in:
rankun 2018-10-17 18:25:08 +08:00
parent c1d0b3ac54
commit 1fb5252448
23 changed files with 1152 additions and 96 deletions

View file

@ -48,5 +48,6 @@ INCLUDEPATH += \
LIBS += \
-L$$PWD/ffmpeg/lib -lavcodec \
-L$$PWD/ffmpeg/lib -lavformat \
-L$$PWD/ffmpeg/lib -lavutil
-L$$PWD/ffmpeg/lib -lavutil \
-L$$PWD/ffmpeg/lib -lswscale

View file

@ -1,3 +1,5 @@
#include <QDebug>
#include "convert.h"
Convert::Convert()
@ -9,3 +11,71 @@ Convert::~Convert()
{
}
void Convert::setSrcFrameInfo(quint32 srcWidth, quint32 srcHeight, AVPixelFormat srcFormat)
{
m_srcWidth = srcWidth;
m_srcHeight = srcHeight;
m_srcFormat = srcFormat;
qDebug() << "Convert::src frame info " << srcWidth << "x" << srcHeight;
}
void Convert::getSrcFrameInfo(quint32& srcWidth, quint32& srcHeight, AVPixelFormat& srcFormat)
{
srcWidth = m_srcWidth;
srcHeight = m_srcHeight;
srcFormat = m_srcFormat;
}
void Convert::setDstFrameInfo(quint32 dstWidth, quint32 dstHeight, AVPixelFormat dstFormat)
{
m_dstWidth = dstWidth;
m_dstHeight = dstHeight;
m_dstFormat = dstFormat;
}
void Convert::getDstFrameInfo(quint32& dstWidth, quint32& dstHeight, AVPixelFormat& dstFormat)
{
dstWidth = m_dstWidth;
dstHeight = m_dstHeight;
dstFormat = m_dstFormat;
}
bool Convert::init()
{
if (m_convertCtx) {
return true;
}
m_convertCtx = sws_getContext(m_srcWidth, m_srcHeight, m_srcFormat, m_dstWidth, m_dstHeight, m_dstFormat,
SWS_BICUBIC, Q_NULLPTR, Q_NULLPTR, Q_NULLPTR);
if (!m_convertCtx) {
return false;
}
return true;
}
bool Convert::isInit()
{
return m_convertCtx ? true : false;
}
void Convert::deInit()
{
if (m_convertCtx) {
sws_freeContext(m_convertCtx);
m_convertCtx = Q_NULLPTR;
}
}
bool Convert::convert(AVFrame* srcFrame, AVFrame* dstFrame)
{
if(!m_convertCtx || !srcFrame || !dstFrame) {
return false;
}
qint32 ret = sws_scale(m_convertCtx, (const uint8_t* const*)srcFrame->data, srcFrame->linesize, 0, m_srcHeight, dstFrame->data, dstFrame->linesize);
qDebug() << "Convert::convert sws_scale return " << ret;
if (0 == ret) {
return false;
}
return true;
}

View file

@ -1,12 +1,40 @@
#ifndef CONVERT_H
#define CONVERT_H
#include <QtGlobal>
extern "C"
{
#include "libswscale/swscale.h"
#include "libavutil/frame.h"
}
class Convert
{
public:
Convert();
virtual ~Convert();
public:
void setSrcFrameInfo(quint32 srcWidth, quint32 srcHeight, AVPixelFormat srcFormat);
void getSrcFrameInfo(quint32& srcWidth, quint32& srcHeight, AVPixelFormat& srcFormat);
void setDstFrameInfo(quint32 dstWidth, quint32 dstHeight, AVPixelFormat dstFormat);
void getDstFrameInfo(quint32& dstWidth, quint32& dstHeight, AVPixelFormat& dstFormat);
bool init();
bool isInit();
void deInit();
bool convert(AVFrame* srcFrame, AVFrame* dstFrame);
//int srcW, int srcH, enum AVPixelFormat srcFormat,int dstW, int dstH, enum AVPixelFormat dstFormat,
private:
quint32 m_srcWidth = 0;
quint32 m_srcHeight = 0;
AVPixelFormat m_srcFormat = AV_PIX_FMT_NONE;
quint32 m_dstWidth = 0;
quint32 m_dstHeight = 0;
AVPixelFormat m_dstFormat = AV_PIX_FMT_NONE;
struct SwsContext *m_convertCtx = Q_NULLPTR;
};
#endif // CONVERT_H

View file

@ -55,7 +55,7 @@ qint32 Decoder::recvData(quint8* buf, qint32 bufSize)
break;
}
if (QTcpSocket::SocketTimeoutError == m_deviceSocket->error()) {
qDebug() << "QTcpSocket::SocketTimeoutError";
//qDebug() << "QTcpSocket::SocketTimeoutError";
}
}
qDebug() << "recv data " << bufSize;
@ -89,8 +89,10 @@ void Decoder::run()
AVCodecContext *codecCtx = Q_NULLPTR;
// frame is stand alone
AVFrame* decoderFrame = Q_NULLPTR;
decoderFrame = av_frame_alloc();
AVFrame* yuvDecoderFrame = Q_NULLPTR;
AVFrame* rgbDecoderFrame = Q_NULLPTR;
yuvDecoderFrame = av_frame_alloc();
rgbDecoderFrame = av_frame_alloc();
bool isFormatCtxOpen = false;
bool isCodecCtxOpen = false;
@ -158,9 +160,15 @@ void Decoder::run()
qCritical("Could not send video packet: %d", ret);
goto runQuit;
}
ret = avcodec_receive_frame(codecCtx, decoderFrame);
ret = avcodec_receive_frame(codecCtx, yuvDecoderFrame);
if (!ret) {
// a frame was received
if (!m_conver.isInit()) {
m_conver.setSrcFrameInfo(codecCtx->width, codecCtx->height, AV_PIX_FMT_YUV420P);
m_conver.setDstFrameInfo(codecCtx->width, codecCtx->height, AV_PIX_FMT_RGB24);
m_conver.init();
}
m_conver.convert(yuvDecoderFrame, rgbDecoderFrame);
//push_frame(decoder);
} else if (ret != AVERROR(EAGAIN)) {
qCritical("Could not receive video frame: %d", ret);
@ -170,7 +178,7 @@ void Decoder::run()
#else
while (packet.size > 0) {
int gotPicture = 0;
int len = avcodec_decode_video2(codecCtx, decoderFrame, &gotpicture, &packet);
int len = avcodec_decode_video2(codecCtx, yuvDecoderFrame, &gotpicture, &packet);
if (len < 0) {
qCritical("Could not decode video packet: %d", len);
goto runQuit;
@ -207,6 +215,14 @@ runQuit:
avcodec_free_context(&codecCtx);
}
if (yuvDecoderFrame) {
av_free(yuvDecoderFrame);
}
if (rgbDecoderFrame) {
av_free(rgbDecoderFrame);
}
m_conver.deInit();
if (m_deviceSocket) {
m_deviceSocket->disconnectFromHost();
delete m_deviceSocket;

View file

@ -5,6 +5,7 @@
#include <QTcpSocket>
#include <QPointer>
#include "convert.h"
extern "C"
{
#include "libavcodec/avcodec.h"
@ -33,6 +34,7 @@ protected:
private:
QPointer<QTcpSocket> m_deviceSocket = Q_NULLPTR;
bool m_quit = false;
Convert m_conver;
};
#endif // DECODER_H

View file

@ -409,7 +409,6 @@ enum AVCodecID {
AV_CODEC_ID_DXV,
AV_CODEC_ID_SCREENPRESSO,
AV_CODEC_ID_RSCC,
AV_CODEC_ID_AVS2,
AV_CODEC_ID_Y41P = 0x8000,
AV_CODEC_ID_AVRP,
@ -447,11 +446,6 @@ enum AVCodecID {
AV_CODEC_ID_SVG,
AV_CODEC_ID_GDV,
AV_CODEC_ID_FITS,
AV_CODEC_ID_IMM4,
AV_CODEC_ID_PROSUMER,
AV_CODEC_ID_MWSC,
AV_CODEC_ID_WCMV,
AV_CODEC_ID_RASC,
/* various PCM "codecs" */
AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
@ -643,7 +637,6 @@ enum AVCodecID {
AV_CODEC_ID_APTX,
AV_CODEC_ID_APTX_HD,
AV_CODEC_ID_SBC,
AV_CODEC_ID_ATRAC9,
/* subtitle codecs */
AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
@ -672,7 +665,6 @@ enum AVCodecID {
AV_CODEC_ID_PJS,
AV_CODEC_ID_ASS,
AV_CODEC_ID_HDMV_TEXT_SUBTITLE,
AV_CODEC_ID_TTML,
/* other specific kind of codecs (generally used for attachments) */
AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
@ -1365,12 +1357,6 @@ enum AVPacketSideDataType {
*/
AV_PKT_DATA_ENCRYPTION_INFO,
/**
* Active Format Description data consisting of a single byte as specified
* in ETSI TS 101 154 using AVActiveFormatDescription enum.
*/
AV_PKT_DATA_AFD,
/**
* The number of side data types.
* This is not part of the public API/ABI in the sense that it may
@ -1626,7 +1612,6 @@ typedef struct AVCodecContext {
* The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger
* than extradata_size to avoid problems if it is read with the bitstream reader.
* The bytewise contents of extradata must not depend on the architecture or CPU endianness.
* Must be allocated with the av_malloc() family of functions.
* - encoding: Set/allocated/freed by libavcodec.
* - decoding: Set/allocated/freed by user.
*/
@ -5781,7 +5766,6 @@ typedef struct AVBitStreamFilter {
int (*init)(AVBSFContext *ctx);
int (*filter)(AVBSFContext *ctx, AVPacket *pkt);
void (*close)(AVBSFContext *ctx);
void (*flush)(AVBSFContext *ctx);
} AVBitStreamFilter;
#if FF_API_OLD_BSF
@ -5908,11 +5892,6 @@ int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt);
*/
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt);
/**
* Reset the internal bitstream filter state / flush internal buffers.
*/
void av_bsf_flush(AVBSFContext *ctx);
/**
* Free a bitstream filter context and everything associated with it; write NULL
* into the supplied pointer.

View file

@ -28,7 +28,7 @@
#include "libavutil/version.h"
#define LIBAVCODEC_VERSION_MAJOR 58
#define LIBAVCODEC_VERSION_MINOR 32
#define LIBAVCODEC_VERSION_MINOR 18
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \

View file

@ -846,7 +846,6 @@ typedef struct AVStreamInternal AVStreamInternal;
#define AV_DISPOSITION_DESCRIPTIONS 0x20000
#define AV_DISPOSITION_METADATA 0x40000
#define AV_DISPOSITION_DEPENDENT 0x80000 ///< dependent audio stream (mix_type=0 in mpegts)
#define AV_DISPOSITION_STILL_IMAGE 0x100000 ///< still images in video stream (still_picture_flag=1 in mpegts)
/**
* Options for behavior on timestamp wrap detection.
@ -1103,13 +1102,6 @@ typedef struct AVStream {
*/
int stream_identifier;
/**
* Details of the MPEG-TS program which created this stream.
*/
int program_num;
int pmt_version;
int pmt_stream_idx;
int64_t interleaver_chunk_size;
int64_t interleaver_chunk_duration;
@ -1267,7 +1259,6 @@ typedef struct AVProgram {
int program_num;
int pmt_pid;
int pcr_pid;
int pmt_version;
/*****************************************************************
* All fields below this line are not part of the public API. They
@ -1483,9 +1474,7 @@ typedef struct AVFormatContext {
* This flag is mainly intended for testing.
*/
#define AVFMT_FLAG_BITEXACT 0x0400
#if FF_API_LAVF_MP4A_LATM
#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Deprecated, does nothing.
#endif
#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload
#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down)
#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted)
#if FF_API_LAVF_KEEPSIDE_FLAG
@ -1937,13 +1926,6 @@ typedef struct AVFormatContext {
* - decoding: set by user
*/
int max_streams;
/**
* Skip duration calcuation in estimate_timings_from_pts.
* - encoding: unused
* - decoding: set by user
*/
int skip_estimate_duration_from_pts;
} AVFormatContext;
#if FF_API_FORMAT_GET_SET

View file

@ -32,8 +32,8 @@
// Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium)
// Also please add any ticket numbers that you believe might be affected here
#define LIBAVFORMAT_VERSION_MAJOR 58
#define LIBAVFORMAT_VERSION_MINOR 18
#define LIBAVFORMAT_VERSION_MICRO 104
#define LIBAVFORMAT_VERSION_MINOR 12
#define LIBAVFORMAT_VERSION_MICRO 100
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
LIBAVFORMAT_VERSION_MINOR, \
@ -70,9 +70,6 @@
#ifndef FF_API_HLS_WRAP
#define FF_API_HLS_WRAP (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_HLS_USE_LOCALTIME
#define FF_API_HLS_USE_LOCALTIME (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_LAVF_KEEPSIDE_FLAG
#define FF_API_LAVF_KEEPSIDE_FLAG (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
@ -97,12 +94,6 @@
#ifndef FF_API_NEXT
#define FF_API_NEXT (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_DASH_MIN_SEG_DURATION
#define FF_API_DASH_MIN_SEG_DURATION (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_LAVF_MP4A_LATM
#define FF_API_LAVF_MP4A_LATM (LIBAVFORMAT_VERSION_MAJOR < 59)
#endif
#ifndef FF_API_R_FRAME_RATE

View file

@ -66,7 +66,7 @@
#endif
/**
* Assert that floating point operations can be executed.
* Assert that floating point opperations can be executed.
*
* This will av_assert0() that the cpu is not in MMX state on X86
*/

View file

@ -41,7 +41,7 @@ typedef struct AVSubsampleEncryptionInfo {
* The size of this struct is not part of the public ABI.
*/
typedef struct AVEncryptionInfo {
/** The fourcc encryption scheme, in big-endian byte order. */
/** The fourcc encryption scheme. */
uint32_t scheme;
/**
@ -115,11 +115,6 @@ typedef struct AVEncryptionInitInfo {
*/
uint8_t* data;
uint32_t data_size;
/**
* An optional pointer to the next initialization info in the list.
*/
struct AVEncryptionInitInfo *next;
} AVEncryptionInitInfo;
/**
@ -129,7 +124,7 @@ typedef struct AVEncryptionInitInfo {
*
* @param subsample_count The number of subsamples.
* @param key_id_size The number of bytes in the key ID, should be 16.
* @param iv_size The number of bytes in the IV, should be 16.
* @param key_id_size The number of bytes in the IV, should be 16.
*
* @return The new AVEncryptionInfo structure, or NULL on error.
*/

View file

@ -1,5 +1,5 @@
/* Automatically generated by version.sh, do not manually edit! */
#ifndef AVUTIL_FFVERSION_H
#define AVUTIL_FFVERSION_H
#define FFMPEG_VERSION "N-92132-g0a41a8bf29"
#define FFMPEG_VERSION "4.0.2"
#endif /* AVUTIL_FFVERSION_H */

View file

@ -33,8 +33,6 @@
* allocated buffer or map it with mmap() when available.
* In case of success set *bufptr to the read or mmapped buffer, and
* *size to the size in bytes of the buffer in *bufptr.
* Unlike mmap this function succeeds with zero sized files, in this
* case *bufptr will be set to NULL and *size will be set to 0.
* The returned buffer must be released with av_file_unmap().
*
* @param log_offset loglevel offset used for logging

View file

@ -41,7 +41,6 @@ typedef struct AVCUDADeviceContextInternal AVCUDADeviceContextInternal;
*/
typedef struct AVCUDADeviceContext {
CUcontext cuda_ctx;
CUstream stream;
AVCUDADeviceContextInternal *internal;
} AVCUDADeviceContext;

View file

@ -289,7 +289,6 @@ typedef struct AVOption {
#define AV_OPT_FLAG_READONLY 128
#define AV_OPT_FLAG_BSF_PARAM (1<<8) ///< a generic parameter which can be set by the user for bit stream filtering
#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering
#define AV_OPT_FLAG_DEPRECATED (1<<17) ///< set if option is deprecated, users should refer to AVOption.help text for more information
//FIXME think about enc-audio, ... style flags
/**

View file

@ -167,8 +167,12 @@ typedef struct AVPixFmtDescriptor {
/**
* The pixel format has an alpha channel. This is set on all formats that
* support alpha in some way, including AV_PIX_FMT_PAL8. The alpha is always
* straight, never pre-multiplied.
* support alpha in some way. The exception is AV_PIX_FMT_PAL8, which can
* carry alpha as part of the palette. Details are explained in the
* AVPixelFormat enum, and are also encoded in the corresponding
* AVPixFmtDescriptor.
*
* The alpha is always straight, never pre-multiplied.
*
* If a codec or a filter does not support alpha, it should set all alpha to
* opaque, or use the equivalent pixel formats without alpha component, e.g.

View file

@ -42,10 +42,6 @@
* This is stored as BGRA on little-endian CPU architectures and ARGB on
* big-endian CPUs.
*
* @note
* If the resolution is not a multiple of the chroma subsampling factor
* then the chroma plane resolution must be rounded up.
*
* @par
* When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized
* image data is stored in AVFrame.data[0]. The palette is transported in
@ -334,12 +330,6 @@ enum AVPixelFormat {
*/
AV_PIX_FMT_OPENCL,
AV_PIX_FMT_GRAY14BE, ///< Y , 14bpp, big-endian
AV_PIX_FMT_GRAY14LE, ///< Y , 14bpp, little-endian
AV_PIX_FMT_GRAYF32BE, ///< IEEE-754 single precision Y, 32bpp, big-endian
AV_PIX_FMT_GRAYF32LE, ///< IEEE-754 single precision Y, 32bpp, little-endian
AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
};
@ -359,7 +349,6 @@ enum AVPixelFormat {
#define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE)
#define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE)
#define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE)
#define AV_PIX_FMT_GRAY14 AV_PIX_FMT_NE(GRAY14BE, GRAY14LE)
#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)
#define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE)
#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE)
@ -408,8 +397,6 @@ enum AVPixelFormat {
#define AV_PIX_FMT_GBRPF32 AV_PIX_FMT_NE(GBRPF32BE, GBRPF32LE)
#define AV_PIX_FMT_GBRAPF32 AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE)
#define AV_PIX_FMT_GRAYF32 AV_PIX_FMT_NE(GRAYF32BE, GRAYF32LE)
#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)
#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)
#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)

View file

@ -95,14 +95,6 @@ void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq,
void av_thread_message_queue_set_free_func(AVThreadMessageQueue *mq,
void (*free_func)(void *msg));
/**
* Return the current number of messages in the queue.
*
* @return the current number of messages or AVERROR(ENOSYS) if lavu was built
* without thread support
*/
int av_thread_message_queue_nb_elems(AVThreadMessageQueue *mq);
/**
* Flush the message queue
*

View file

@ -79,8 +79,8 @@
*/
#define LIBAVUTIL_VERSION_MAJOR 56
#define LIBAVUTIL_VERSION_MINOR 19
#define LIBAVUTIL_VERSION_MICRO 101
#define LIBAVUTIL_VERSION_MINOR 14
#define LIBAVUTIL_VERSION_MICRO 100
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
LIBAVUTIL_VERSION_MINOR, \

View file

@ -0,0 +1,579 @@
/*
* Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at)
*
* This file is part of libswresample
*
* libswresample is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* libswresample is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with libswresample; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef SWRESAMPLE_SWRESAMPLE_H
#define SWRESAMPLE_SWRESAMPLE_H
/**
* @file
* @ingroup lswr
* libswresample public header
*/
/**
* @defgroup lswr libswresample
* @{
*
* Audio resampling, sample format conversion and mixing library.
*
* Interaction with lswr is done through SwrContext, which is
* allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters
* must be set with the @ref avoptions API.
*
* The first thing you will need to do in order to use lswr is to allocate
* SwrContext. This can be done with swr_alloc() or swr_alloc_set_opts(). If you
* are using the former, you must set options through the @ref avoptions API.
* The latter function provides the same feature, but it allows you to set some
* common options in the same statement.
*
* For example the following code will setup conversion from planar float sample
* format to interleaved signed 16-bit integer, downsampling from 48kHz to
* 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing
* matrix). This is using the swr_alloc() function.
* @code
* SwrContext *swr = swr_alloc();
* av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0);
* av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
* av_opt_set_int(swr, "in_sample_rate", 48000, 0);
* av_opt_set_int(swr, "out_sample_rate", 44100, 0);
* av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
* av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
* @endcode
*
* The same job can be done using swr_alloc_set_opts() as well:
* @code
* SwrContext *swr = swr_alloc_set_opts(NULL, // we're allocating a new context
* AV_CH_LAYOUT_STEREO, // out_ch_layout
* AV_SAMPLE_FMT_S16, // out_sample_fmt
* 44100, // out_sample_rate
* AV_CH_LAYOUT_5POINT1, // in_ch_layout
* AV_SAMPLE_FMT_FLTP, // in_sample_fmt
* 48000, // in_sample_rate
* 0, // log_offset
* NULL); // log_ctx
* @endcode
*
* Once all values have been set, it must be initialized with swr_init(). If
* you need to change the conversion parameters, you can change the parameters
* using @ref AVOptions, as described above in the first example; or by using
* swr_alloc_set_opts(), but with the first argument the allocated context.
* You must then call swr_init() again.
*
* The conversion itself is done by repeatedly calling swr_convert().
* Note that the samples may get buffered in swr if you provide insufficient
* output space or if sample rate conversion is done, which requires "future"
* samples. Samples that do not require future input can be retrieved at any
* time by using swr_convert() (in_count can be set to 0).
* At the end of conversion the resampling buffer can be flushed by calling
* swr_convert() with NULL in and 0 in_count.
*
* The samples used in the conversion process can be managed with the libavutil
* @ref lavu_sampmanip "samples manipulation" API, including av_samples_alloc()
* function used in the following example.
*
* The delay between input and output, can at any time be found by using
* swr_get_delay().
*
* The following code demonstrates the conversion loop assuming the parameters
* from above and caller-defined functions get_input() and handle_output():
* @code
* uint8_t **input;
* int in_samples;
*
* while (get_input(&input, &in_samples)) {
* uint8_t *output;
* int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) +
* in_samples, 44100, 48000, AV_ROUND_UP);
* av_samples_alloc(&output, NULL, 2, out_samples,
* AV_SAMPLE_FMT_S16, 0);
* out_samples = swr_convert(swr, &output, out_samples,
* input, in_samples);
* handle_output(output, out_samples);
* av_freep(&output);
* }
* @endcode
*
* When the conversion is finished, the conversion
* context and everything associated with it must be freed with swr_free().
* A swr_close() function is also available, but it exists mainly for
* compatibility with libavresample, and is not required to be called.
*
* There will be no memory leak if the data is not completely flushed before
* swr_free().
*/
#include <stdint.h>
#include "libavutil/channel_layout.h"
#include "libavutil/frame.h"
#include "libavutil/samplefmt.h"
#include "libswresample/version.h"
/**
* @name Option constants
* These constants are used for the @ref avoptions interface for lswr.
* @{
*
*/
#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate
//TODO use int resample ?
//long term TODO can we enable this dynamically?
/** Dithering algorithms */
enum SwrDitherType {
SWR_DITHER_NONE = 0,
SWR_DITHER_RECTANGULAR,
SWR_DITHER_TRIANGULAR,
SWR_DITHER_TRIANGULAR_HIGHPASS,
SWR_DITHER_NS = 64, ///< not part of API/ABI
SWR_DITHER_NS_LIPSHITZ,
SWR_DITHER_NS_F_WEIGHTED,
SWR_DITHER_NS_MODIFIED_E_WEIGHTED,
SWR_DITHER_NS_IMPROVED_E_WEIGHTED,
SWR_DITHER_NS_SHIBATA,
SWR_DITHER_NS_LOW_SHIBATA,
SWR_DITHER_NS_HIGH_SHIBATA,
SWR_DITHER_NB, ///< not part of API/ABI
};
/** Resampling Engines */
enum SwrEngine {
SWR_ENGINE_SWR, /**< SW Resampler */
SWR_ENGINE_SOXR, /**< SoX Resampler */
SWR_ENGINE_NB, ///< not part of API/ABI
};
/** Resampling Filter Types */
enum SwrFilterType {
SWR_FILTER_TYPE_CUBIC, /**< Cubic */
SWR_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall windowed sinc */
SWR_FILTER_TYPE_KAISER, /**< Kaiser windowed sinc */
};
/**
* @}
*/
/**
* The libswresample context. Unlike libavcodec and libavformat, this structure
* is opaque. This means that if you would like to set options, you must use
* the @ref avoptions API and cannot directly set values to members of the
* structure.
*/
typedef struct SwrContext SwrContext;
/**
* Get the AVClass for SwrContext. It can be used in combination with
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
*
* @see av_opt_find().
* @return the AVClass of SwrContext
*/
const AVClass *swr_get_class(void);
/**
* @name SwrContext constructor functions
* @{
*/
/**
* Allocate SwrContext.
*
* If you use this function you will need to set the parameters (manually or
* with swr_alloc_set_opts()) before calling swr_init().
*
* @see swr_alloc_set_opts(), swr_init(), swr_free()
* @return NULL on error, allocated context otherwise
*/
struct SwrContext *swr_alloc(void);
/**
* Initialize context after user parameters have been set.
* @note The context must be configured using the AVOption API.
*
* @see av_opt_set_int()
* @see av_opt_set_dict()
*
* @param[in,out] s Swr context to initialize
* @return AVERROR error code in case of failure.
*/
int swr_init(struct SwrContext *s);
/**
* Check whether an swr context has been initialized or not.
*
* @param[in] s Swr context to check
* @see swr_init()
* @return positive if it has been initialized, 0 if not initialized
*/
int swr_is_initialized(struct SwrContext *s);
/**
* Allocate SwrContext if needed and set/reset common parameters.
*
* This function does not require s to be allocated with swr_alloc(). On the
* other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters
* on the allocated context.
*
* @param s existing Swr context if available, or NULL if not
* @param out_ch_layout output channel layout (AV_CH_LAYOUT_*)
* @param out_sample_fmt output sample format (AV_SAMPLE_FMT_*).
* @param out_sample_rate output sample rate (frequency in Hz)
* @param in_ch_layout input channel layout (AV_CH_LAYOUT_*)
* @param in_sample_fmt input sample format (AV_SAMPLE_FMT_*).
* @param in_sample_rate input sample rate (frequency in Hz)
* @param log_offset logging level offset
* @param log_ctx parent logging context, can be NULL
*
* @see swr_init(), swr_free()
* @return NULL on error, allocated context otherwise
*/
struct SwrContext *swr_alloc_set_opts(struct SwrContext *s,
int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,
int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate,
int log_offset, void *log_ctx);
/**
* @}
*
* @name SwrContext destructor functions
* @{
*/
/**
* Free the given SwrContext and set the pointer to NULL.
*
* @param[in] s a pointer to a pointer to Swr context
*/
void swr_free(struct SwrContext **s);
/**
* Closes the context so that swr_is_initialized() returns 0.
*
* The context can be brought back to life by running swr_init(),
* swr_init() can also be used without swr_close().
* This function is mainly provided for simplifying the usecase
* where one tries to support libavresample and libswresample.
*
* @param[in,out] s Swr context to be closed
*/
void swr_close(struct SwrContext *s);
/**
* @}
*
* @name Core conversion functions
* @{
*/
/** Convert audio.
*
* in and in_count can be set to 0 to flush the last few samples out at the
* end.
*
* If more input is provided than output space, then the input will be buffered.
* You can avoid this buffering by using swr_get_out_samples() to retrieve an
* upper bound on the required number of output samples for the given number of
* input samples. Conversion will run directly without copying whenever possible.
*
* @param s allocated Swr context, with parameters set
* @param out output buffers, only the first one need be set in case of packed audio
* @param out_count amount of space available for output in samples per channel
* @param in input buffers, only the first one need to be set in case of packed audio
* @param in_count number of input samples available in one channel
*
* @return number of samples output per channel, negative value on error
*/
int swr_convert(struct SwrContext *s, uint8_t **out, int out_count,
const uint8_t **in , int in_count);
/**
* Convert the next timestamp from input to output
* timestamps are in 1/(in_sample_rate * out_sample_rate) units.
*
* @note There are 2 slightly differently behaving modes.
* @li When automatic timestamp compensation is not used, (min_compensation >= FLT_MAX)
* in this case timestamps will be passed through with delays compensated
* @li When automatic timestamp compensation is used, (min_compensation < FLT_MAX)
* in this case the output timestamps will match output sample numbers.
* See ffmpeg-resampler(1) for the two modes of compensation.
*
* @param s[in] initialized Swr context
* @param pts[in] timestamp for the next input sample, INT64_MIN if unknown
* @see swr_set_compensation(), swr_drop_output(), and swr_inject_silence() are
* function used internally for timestamp compensation.
* @return the output timestamp for the next output sample
*/
int64_t swr_next_pts(struct SwrContext *s, int64_t pts);
/**
* @}
*
* @name Low-level option setting functions
* These functons provide a means to set low-level options that is not possible
* with the AVOption API.
* @{
*/
/**
* Activate resampling compensation ("soft" compensation). This function is
* internally called when needed in swr_next_pts().
*
* @param[in,out] s allocated Swr context. If it is not initialized,
* or SWR_FLAG_RESAMPLE is not set, swr_init() is
* called with the flag set.
* @param[in] sample_delta delta in PTS per sample
* @param[in] compensation_distance number of samples to compensate for
* @return >= 0 on success, AVERROR error codes if:
* @li @c s is NULL,
* @li @c compensation_distance is less than 0,
* @li @c compensation_distance is 0 but sample_delta is not,
* @li compensation unsupported by resampler, or
* @li swr_init() fails when called.
*/
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance);
/**
* Set a customized input channel mapping.
*
* @param[in,out] s allocated Swr context, not yet initialized
* @param[in] channel_map customized input channel mapping (array of channel
* indexes, -1 for a muted channel)
* @return >= 0 on success, or AVERROR error code in case of failure.
*/
int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map);
/**
* Generate a channel mixing matrix.
*
* This function is the one used internally by libswresample for building the
* default mixing matrix. It is made public just as a utility function for
* building custom matrices.
*
* @param in_layout input channel layout
* @param out_layout output channel layout
* @param center_mix_level mix level for the center channel
* @param surround_mix_level mix level for the surround channel(s)
* @param lfe_mix_level mix level for the low-frequency effects channel
* @param rematrix_maxval if 1.0, coefficients will be normalized to prevent
* overflow. if INT_MAX, coefficients will not be
* normalized.
* @param[out] matrix mixing coefficients; matrix[i + stride * o] is
* the weight of input channel i in output channel o.
* @param stride distance between adjacent input channels in the
* matrix array
* @param matrix_encoding matrixed stereo downmix mode (e.g. dplii)
* @param log_ctx parent logging context, can be NULL
* @return 0 on success, negative AVERROR code on failure
*/
int swr_build_matrix(uint64_t in_layout, uint64_t out_layout,
double center_mix_level, double surround_mix_level,
double lfe_mix_level, double rematrix_maxval,
double rematrix_volume, double *matrix,
int stride, enum AVMatrixEncoding matrix_encoding,
void *log_ctx);
/**
* Set a customized remix matrix.
*
* @param s allocated Swr context, not yet initialized
* @param matrix remix coefficients; matrix[i + stride * o] is
* the weight of input channel i in output channel o
* @param stride offset between lines of the matrix
* @return >= 0 on success, or AVERROR error code in case of failure.
*/
int swr_set_matrix(struct SwrContext *s, const double *matrix, int stride);
/**
* @}
*
* @name Sample handling functions
* @{
*/
/**
* Drops the specified number of output samples.
*
* This function, along with swr_inject_silence(), is called by swr_next_pts()
* if needed for "hard" compensation.
*
* @param s allocated Swr context
* @param count number of samples to be dropped
*
* @return >= 0 on success, or a negative AVERROR code on failure
*/
int swr_drop_output(struct SwrContext *s, int count);
/**
* Injects the specified number of silence samples.
*
* This function, along with swr_drop_output(), is called by swr_next_pts()
* if needed for "hard" compensation.
*
* @param s allocated Swr context
* @param count number of samples to be dropped
*
* @return >= 0 on success, or a negative AVERROR code on failure
*/
int swr_inject_silence(struct SwrContext *s, int count);
/**
* Gets the delay the next input sample will experience relative to the next output sample.
*
* Swresample can buffer data if more input has been provided than available
* output space, also converting between sample rates needs a delay.
* This function returns the sum of all such delays.
* The exact delay is not necessarily an integer value in either input or
* output sample rate. Especially when downsampling by a large value, the
* output sample rate may be a poor choice to represent the delay, similarly
* for upsampling and the input sample rate.
*
* @param s swr context
* @param base timebase in which the returned delay will be:
* @li if it's set to 1 the returned delay is in seconds
* @li if it's set to 1000 the returned delay is in milliseconds
* @li if it's set to the input sample rate then the returned
* delay is in input samples
* @li if it's set to the output sample rate then the returned
* delay is in output samples
* @li if it's the least common multiple of in_sample_rate and
* out_sample_rate then an exact rounding-free delay will be
* returned
* @returns the delay in 1 / @c base units.
*/
int64_t swr_get_delay(struct SwrContext *s, int64_t base);
/**
* Find an upper bound on the number of samples that the next swr_convert
* call will output, if called with in_samples of input samples. This
* depends on the internal state, and anything changing the internal state
* (like further swr_convert() calls) will may change the number of samples
* swr_get_out_samples() returns for the same number of input samples.
*
* @param in_samples number of input samples.
* @note any call to swr_inject_silence(), swr_convert(), swr_next_pts()
* or swr_set_compensation() invalidates this limit
* @note it is recommended to pass the correct available buffer size
* to all functions like swr_convert() even if swr_get_out_samples()
* indicates that less would be used.
* @returns an upper bound on the number of samples that the next swr_convert
* will output or a negative value to indicate an error
*/
int swr_get_out_samples(struct SwrContext *s, int in_samples);
/**
* @}
*
* @name Configuration accessors
* @{
*/
/**
* Return the @ref LIBSWRESAMPLE_VERSION_INT constant.
*
* This is useful to check if the build-time libswresample has the same version
* as the run-time one.
*
* @returns the unsigned int-typed version
*/
unsigned swresample_version(void);
/**
* Return the swr build-time configuration.
*
* @returns the build-time @c ./configure flags
*/
const char *swresample_configuration(void);
/**
* Return the swr license.
*
* @returns the license of libswresample, determined at build-time
*/
const char *swresample_license(void);
/**
* @}
*
* @name AVFrame based API
* @{
*/
/**
* Convert the samples in the input AVFrame and write them to the output AVFrame.
*
* Input and output AVFrames must have channel_layout, sample_rate and format set.
*
* If the output AVFrame does not have the data pointers allocated the nb_samples
* field will be set using av_frame_get_buffer()
* is called to allocate the frame.
*
* The output AVFrame can be NULL or have fewer allocated samples than required.
* In this case, any remaining samples not written to the output will be added
* to an internal FIFO buffer, to be returned at the next call to this function
* or to swr_convert().
*
* If converting sample rate, there may be data remaining in the internal
* resampling delay buffer. swr_get_delay() tells the number of
* remaining samples. To get this data as output, call this function or
* swr_convert() with NULL input.
*
* If the SwrContext configuration does not match the output and
* input AVFrame settings the conversion does not take place and depending on
* which AVFrame is not matching AVERROR_OUTPUT_CHANGED, AVERROR_INPUT_CHANGED
* or the result of a bitwise-OR of them is returned.
*
* @see swr_delay()
* @see swr_convert()
* @see swr_get_delay()
*
* @param swr audio resample context
* @param output output AVFrame
* @param input input AVFrame
* @return 0 on success, AVERROR on failure or nonmatching
* configuration.
*/
int swr_convert_frame(SwrContext *swr,
AVFrame *output, const AVFrame *input);
/**
* Configure or reconfigure the SwrContext using the information
* provided by the AVFrames.
*
* The original resampling context is reset even on failure.
* The function calls swr_close() internally if the context is open.
*
* @see swr_close();
*
* @param swr audio resample context
* @param output output AVFrame
* @param input input AVFrame
* @return 0 on success, AVERROR on failure.
*/
int swr_config_frame(SwrContext *swr, const AVFrame *out, const AVFrame *in);
/**
* @}
* @}
*/
#endif /* SWRESAMPLE_SWRESAMPLE_H */

View file

@ -0,0 +1,45 @@
/*
* Version macros.
*
* This file is part of libswresample
*
* libswresample is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* libswresample is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with libswresample; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef SWRESAMPLE_VERSION_H
#define SWRESAMPLE_VERSION_H
/**
* @file
* Libswresample version macros
*/
#include "libavutil/avutil.h"
#define LIBSWRESAMPLE_VERSION_MAJOR 3
#define LIBSWRESAMPLE_VERSION_MINOR 1
#define LIBSWRESAMPLE_VERSION_MICRO 100
#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \
LIBSWRESAMPLE_VERSION_MINOR, \
LIBSWRESAMPLE_VERSION_MICRO)
#define LIBSWRESAMPLE_VERSION AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \
LIBSWRESAMPLE_VERSION_MINOR, \
LIBSWRESAMPLE_VERSION_MICRO)
#define LIBSWRESAMPLE_BUILD LIBSWRESAMPLE_VERSION_INT
#define LIBSWRESAMPLE_IDENT "SwR" AV_STRINGIFY(LIBSWRESAMPLE_VERSION)
#endif /* SWRESAMPLE_VERSION_H */

View file

@ -0,0 +1,336 @@
/*
* Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef SWSCALE_SWSCALE_H
#define SWSCALE_SWSCALE_H
/**
* @file
* @ingroup libsws
* external API header
*/
#include <stdint.h>
#include "libavutil/avutil.h"
#include "libavutil/log.h"
#include "libavutil/pixfmt.h"
#include "version.h"
/**
* @defgroup libsws libswscale
* Color conversion and scaling library.
*
* @{
*
* Return the LIBSWSCALE_VERSION_INT constant.
*/
unsigned swscale_version(void);
/**
* Return the libswscale build-time configuration.
*/
const char *swscale_configuration(void);
/**
* Return the libswscale license.
*/
const char *swscale_license(void);
/* values for the flags, the stuff on the command line is different */
#define SWS_FAST_BILINEAR 1
#define SWS_BILINEAR 2
#define SWS_BICUBIC 4
#define SWS_X 8
#define SWS_POINT 0x10
#define SWS_AREA 0x20
#define SWS_BICUBLIN 0x40
#define SWS_GAUSS 0x80
#define SWS_SINC 0x100
#define SWS_LANCZOS 0x200
#define SWS_SPLINE 0x400
#define SWS_SRC_V_CHR_DROP_MASK 0x30000
#define SWS_SRC_V_CHR_DROP_SHIFT 16
#define SWS_PARAM_DEFAULT 123456
#define SWS_PRINT_INFO 0x1000
//the following 3 flags are not completely implemented
//internal chrominance subsampling info
#define SWS_FULL_CHR_H_INT 0x2000
//input subsampling info
#define SWS_FULL_CHR_H_INP 0x4000
#define SWS_DIRECT_BGR 0x8000
#define SWS_ACCURATE_RND 0x40000
#define SWS_BITEXACT 0x80000
#define SWS_ERROR_DIFFUSION 0x800000
#define SWS_MAX_REDUCE_CUTOFF 0.002
#define SWS_CS_ITU709 1
#define SWS_CS_FCC 4
#define SWS_CS_ITU601 5
#define SWS_CS_ITU624 5
#define SWS_CS_SMPTE170M 5
#define SWS_CS_SMPTE240M 7
#define SWS_CS_DEFAULT 5
#define SWS_CS_BT2020 9
/**
* Return a pointer to yuv<->rgb coefficients for the given colorspace
* suitable for sws_setColorspaceDetails().
*
* @param colorspace One of the SWS_CS_* macros. If invalid,
* SWS_CS_DEFAULT is used.
*/
const int *sws_getCoefficients(int colorspace);
// when used for filters they must have an odd number of elements
// coeffs cannot be shared between vectors
typedef struct SwsVector {
double *coeff; ///< pointer to the list of coefficients
int length; ///< number of coefficients in the vector
} SwsVector;
// vectors can be shared
typedef struct SwsFilter {
SwsVector *lumH;
SwsVector *lumV;
SwsVector *chrH;
SwsVector *chrV;
} SwsFilter;
struct SwsContext;
/**
* Return a positive value if pix_fmt is a supported input format, 0
* otherwise.
*/
int sws_isSupportedInput(enum AVPixelFormat pix_fmt);
/**
* Return a positive value if pix_fmt is a supported output format, 0
* otherwise.
*/
int sws_isSupportedOutput(enum AVPixelFormat pix_fmt);
/**
* @param[in] pix_fmt the pixel format
* @return a positive value if an endianness conversion for pix_fmt is
* supported, 0 otherwise.
*/
int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt);
/**
* Allocate an empty SwsContext. This must be filled and passed to
* sws_init_context(). For filling see AVOptions, options.c and
* sws_setColorspaceDetails().
*/
struct SwsContext *sws_alloc_context(void);
/**
* Initialize the swscaler context sws_context.
*
* @return zero or positive value on success, a negative value on
* error
*/
av_warn_unused_result
int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter);
/**
* Free the swscaler context swsContext.
* If swsContext is NULL, then does nothing.
*/
void sws_freeContext(struct SwsContext *swsContext);
/**
* Allocate and return an SwsContext. You need it to perform
* scaling/conversion operations using sws_scale().
*
* @param srcW the width of the source image
* @param srcH the height of the source image
* @param srcFormat the source image format
* @param dstW the width of the destination image
* @param dstH the height of the destination image
* @param dstFormat the destination image format
* @param flags specify which algorithm and options to use for rescaling
* @param param extra parameters to tune the used scaler
* For SWS_BICUBIC param[0] and [1] tune the shape of the basis
* function, param[0] tunes f(1) and param[1] f´(1)
* For SWS_GAUSS param[0] tunes the exponent and thus cutoff
* frequency
* For SWS_LANCZOS param[0] tunes the width of the window function
* @return a pointer to an allocated context, or NULL in case of error
* @note this function is to be removed after a saner alternative is
* written
*/
struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags, SwsFilter *srcFilter,
SwsFilter *dstFilter, const double *param);
/**
* Scale the image slice in srcSlice and put the resulting scaled
* slice in the image in dst. A slice is a sequence of consecutive
* rows in an image.
*
* Slices have to be provided in sequential order, either in
* top-bottom or bottom-top order. If slices are provided in
* non-sequential order the behavior of the function is undefined.
*
* @param c the scaling context previously created with
* sws_getContext()
* @param srcSlice the array containing the pointers to the planes of
* the source slice
* @param srcStride the array containing the strides for each plane of
* the source image
* @param srcSliceY the position in the source image of the slice to
* process, that is the number (counted starting from
* zero) in the image of the first row of the slice
* @param srcSliceH the height of the source slice, that is the number
* of rows in the slice
* @param dst the array containing the pointers to the planes of
* the destination image
* @param dstStride the array containing the strides for each plane of
* the destination image
* @return the height of the output slice
*/
int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
const int srcStride[], int srcSliceY, int srcSliceH,
uint8_t *const dst[], const int dstStride[]);
/**
* @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg)
* @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg)
* @param table the yuv2rgb coefficients describing the output yuv space, normally ff_yuv2rgb_coeffs[x]
* @param inv_table the yuv2rgb coefficients describing the input yuv space, normally ff_yuv2rgb_coeffs[x]
* @param brightness 16.16 fixed point brightness correction
* @param contrast 16.16 fixed point contrast correction
* @param saturation 16.16 fixed point saturation correction
* @return -1 if not supported
*/
int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
int srcRange, const int table[4], int dstRange,
int brightness, int contrast, int saturation);
/**
* @return -1 if not supported
*/
int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,
int *srcRange, int **table, int *dstRange,
int *brightness, int *contrast, int *saturation);
/**
* Allocate and return an uninitialized vector with length coefficients.
*/
SwsVector *sws_allocVec(int length);
/**
* Return a normalized Gaussian curve used to filter stuff
* quality = 3 is high quality, lower is lower quality.
*/
SwsVector *sws_getGaussianVec(double variance, double quality);
/**
* Scale all the coefficients of a by the scalar value.
*/
void sws_scaleVec(SwsVector *a, double scalar);
/**
* Scale all the coefficients of a so that their sum equals height.
*/
void sws_normalizeVec(SwsVector *a, double height);
#if FF_API_SWS_VECTOR
attribute_deprecated SwsVector *sws_getConstVec(double c, int length);
attribute_deprecated SwsVector *sws_getIdentityVec(void);
attribute_deprecated void sws_convVec(SwsVector *a, SwsVector *b);
attribute_deprecated void sws_addVec(SwsVector *a, SwsVector *b);
attribute_deprecated void sws_subVec(SwsVector *a, SwsVector *b);
attribute_deprecated void sws_shiftVec(SwsVector *a, int shift);
attribute_deprecated SwsVector *sws_cloneVec(SwsVector *a);
attribute_deprecated void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level);
#endif
void sws_freeVec(SwsVector *a);
SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
float lumaSharpen, float chromaSharpen,
float chromaHShift, float chromaVShift,
int verbose);
void sws_freeFilter(SwsFilter *filter);
/**
* Check if context can be reused, otherwise reallocate a new one.
*
* If context is NULL, just calls sws_getContext() to get a new
* context. Otherwise, checks if the parameters are the ones already
* saved in context. If that is the case, returns the current
* context. Otherwise, frees context and gets a new context with
* the new parameters.
*
* Be warned that srcFilter and dstFilter are not checked, they
* are assumed to remain the same.
*/
struct SwsContext *sws_getCachedContext(struct SwsContext *context,
int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags, SwsFilter *srcFilter,
SwsFilter *dstFilter, const double *param);
/**
* Convert an 8-bit paletted frame into a frame with a color depth of 32 bits.
*
* The output frame will have the same packed format as the palette.
*
* @param src source frame buffer
* @param dst destination frame buffer
* @param num_pixels number of pixels to convert
* @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src
*/
void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);
/**
* Convert an 8-bit paletted frame into a frame with a color depth of 24 bits.
*
* With the palette format "ABCD", the destination frame ends up with the format "ABC".
*
* @param src source frame buffer
* @param dst destination frame buffer
* @param num_pixels number of pixels to convert
* @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src
*/
void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);
/**
* Get the AVClass for swsContext. It can be used in combination with
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
*
* @see av_opt_find().
*/
const AVClass *sws_get_class(void);
/**
* @}
*/
#endif /* SWSCALE_SWSCALE_H */

View file

@ -0,0 +1,53 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef SWSCALE_VERSION_H
#define SWSCALE_VERSION_H
/**
* @file
* swscale version macros
*/
#include "libavutil/version.h"
#define LIBSWSCALE_VERSION_MAJOR 5
#define LIBSWSCALE_VERSION_MINOR 1
#define LIBSWSCALE_VERSION_MICRO 100
#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \
LIBSWSCALE_VERSION_MINOR, \
LIBSWSCALE_VERSION_MICRO)
#define LIBSWSCALE_VERSION AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \
LIBSWSCALE_VERSION_MINOR, \
LIBSWSCALE_VERSION_MICRO)
#define LIBSWSCALE_BUILD LIBSWSCALE_VERSION_INT
#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION)
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*/
#ifndef FF_API_SWS_VECTOR
#define FF_API_SWS_VECTOR (LIBSWSCALE_VERSION_MAJOR < 6)
#endif
#endif /* SWSCALE_VERSION_H */