大橙子网站建设,新征程启航
为企业提供网站建设、域名注册、服务器等服务
知识准备
AVPacket:存储压缩数据(视频对应H.264等码流数据,音频对应AAC/MP3等码流数据),简单来说就是携带一个NAL视频单元,或者多个NAL音频单元。 AVPacket保存一个NAL单元的解码前数据,该结构本身不直接包含数据,其有一个指向数据域的指针。传递给avcodec_send_packet函数的AVPacket结构体data中的数据前面是00 00 00 01开头,说明是NALU格式的数据
创新互联公司拥有十年成都网站建设工作经验,为各大企业提供成都网站制作、做网站、外贸营销网站建设服务,对于网页设计、PC网站建设(电脑版网站建设)、app开发定制、wap网站建设(手机版网站建设)、程序开发、网站优化(SEO优化)、微网站、申请域名等,凭借多年来在互联网的打拼,我们在互联网网站建设行业积累了很多网站制作、网站设计、网络营销经验,集策划、开发、设计、营销、管理等网站化运作于一体,具备承接各种规模类型的网站建设项目的能力。
重要结构体成员分析
AVBufferRef *buf; //当前AVPacket中压缩数据的引用计数,以及保存压缩数据的指针地址(压缩数据申请的空间在这里)
uint8_t *data;//保存压缩数据的指针地址(data同时指向了buf中的data)
int size;//压缩数据的长度
int stream_index;//视频还是音频的索引
实战(构建包含一个NAL单元(长度为nLen)的AVPacket)
AVPacket pkt1, *packet = &pkt1;
av_new_packet(packet, nLen);
memcpy(packet->data, data, nLen);
packet->size = nLen;
packet->stream_index = 0;
然后就可以将packet加入链表等待解码出一帧数据,或者调用avcodec_decode_video2进行解码,解码之后,可以调用av_free_packet或者 av_packet_unref释放资源
释疑
1)为什么不直接对packet->data申请内存,然后进行数据的拷贝?按结构体中定义说明 AVBufferRef只是数据的引用计数,可以为NULL,代表没有任何的引用
所以上面的代码修改(不推荐):
av_init_packet(packet);//初始化结构体,尤其是AVBufferRef *buf,避免
packet->data = (uint8_t *)malloc(sizeof(uint8_t)* nByte);
memcpy(packet->data, data, nLen);
packet->size = nLen;
packet->stream_index = 0;
注意:av_init_packet(packet);//初始化结构体,尤其是AVBufferRef *buf,避免在解码的时候,访问到非法的指针地址。
不推荐:无法使用av_free_packet或者 av_packet_unref进行资源的释放,必须手动释放掉packet->data申请的内存,因为这两个函数释放资源针对的都是AVPacket结构体中的buf,而不是data
void av_free_packet(AVPacket *pkt)
{
if (pkt) {
if (pkt->buf)
av_buffer_unref(&pkt->buf);
pkt->data = NULL;
pkt->size = 0;
av_packet_free_side_data(pkt);
}
}
void av_packet_unref(AVPacket *pkt)
{
av_packet_free_side_data(pkt);
av_buffer_unref(&pkt->buf);
av_init_packet(pkt);
pkt->data = NULL;
pkt->size = 0;
}
2)为什么 AVBufferRef里面中的data跟外层的data指针是一样的,但是size长度不一样
根据
int av_new_packet(AVPacket *pkt, int size)
{
AVBufferRef *buf = NULL;
int ret = packet_alloc(&buf, size);
if (ret < 0)
return ret;
av_init_packet(pkt);
pkt->buf = buf;
pkt->data = buf->data;
pkt->size = size;
return 0;
}
可以得知两个data指针指向的是同一块内存,但是在申请内存的时候,进行了字节的对齐
多申请AV_INPUT_BUFFER_PADDING_SIZE个字节的数据作为结尾的填充
void av_init_packet(AVPacket *pkt)
{
pkt->pts = AV_NOPTS_VALUE;
pkt->dts = AV_NOPTS_VALUE;
pkt->pos = -1;
pkt->duration = 0;
#if FF_API_CONVERGENCE_DURATION
FF_DISABLE_DEPRECATION_WARNINGS
pkt->convergence_duration = 0;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
pkt->flags = 0;
pkt->stream_index = 0;
pkt->buf = NULL;
pkt->side_data = NULL;
pkt->side_data_elems = 0;
}
//AV_INPUT_BUFFER_PADDING_SIZE是为了进行数据的对齐,方便数据的访问
static int packet_alloc(AVBufferRef **buf, int size)
{
int ret;
if (size < 0 || size >= INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
return AVERROR(EINVAL);
ret = av_buffer_realloc(buf, size + AV_INPUT_BUFFER_PADDING_SIZE);
if (ret < 0)
return ret;
memset((*buf)->data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
return 0;
}
创建一个AVBufferRef实例,然后申请size长度的内存分配给AVBufferRef实例中的data
int av_buffer_realloc(AVBufferRef **pbuf, int size)
{
AVBufferRef *buf = *pbuf;
uint8_t *tmp;
if (!buf) {
/* allocate a new buffer with av_realloc(), so it will be reallocatable
* later */
uint8_t *data = av_realloc(NULL, size);
if (!data)
return AVERROR(ENOMEM);
buf = av_buffer_create(data, size, av_buffer_default_free, NULL, 0);
if (!buf) {
av_freep(&data);
return AVERROR(ENOMEM);
}
buf->buffer->flags |= BUFFER_FLAG_REALLOCATABLE;
*pbuf = buf;
return 0;
} else if (buf->size == size)
return 0;
if (!(buf->buffer->flags & BUFFER_FLAG_REALLOCATABLE) ||
!av_buffer_is_writable(buf) || buf->data != buf->buffer->data) {
/* cannot realloc, allocate a new reallocable buffer and copy data */
AVBufferRef *new = NULL;
av_buffer_realloc(&new, size);
if (!new)
return AVERROR(ENOMEM);
memcpy(new->data, buf->data, FFMIN(size, buf->size));
buffer_replace(pbuf, &new);
return 0;
}
tmp = av_realloc(buf->buffer->data, size);
if (!tmp)
return AVERROR(ENOMEM);
buf->buffer->data = buf->data = tmp;
buf->buffer->size = buf->size = size;
return 0;
}
//释放AVBufferRef申请内存
void av_buffer_unref(AVBufferRef **buf)
{
if (!buf || !*buf)
return;
buffer_replace(buf, NULL);
}
static void buffer_replace(AVBufferRef **dst, AVBufferRef **src)
{
AVBuffer *b;
b = (*dst)->buffer;
if (src) {
**dst = **src;
av_freep(src);
} else
av_freep(dst);
if (atomic_fetch_add_explicit(&b->refcount, -1, memory_order_acq_rel) == 1) {
b->free(b->opaque, b->data);
av_freep(&b);
}
}
3)
数据简单如下:
00 00 00 01 61 e1 40 01 58 2b fb 22 ff 29 7b 3f 6f 67 2f 29 fa 25 53 68 78 46 b1
在调用avcodec_send_packet函数的时候打印错误如下:
I:2018-01-06 15:06:05 ms:887:nal_unit_type: 1, nal_ref_idc: 3
I:2018-01-06 15:06:05 ms:888:non-existing PPS 0 referenced
I:2018-01-06 15:06:05 ms:888:decode_slice_header error
I:2018-01-06 15:06:05 ms:888:no frame!
当数据如下,可以正确的解析出一帧图像
00 00 00 01 67 42 00 2a 96 35 40 f0 04 4f cb 37 01 01 01 40 00 01 c2 00 00 57 e4
01 00 00 00 01 68 ce 3c 80 00 00 00 01 06 e5 01 ef 80 00 00 03 00 00 00 01 65 b8
00 00 52 58 00 00 27 f5 d4 48 7e b4 41 07 24 60 95 2c 92 37 68 75 63 4c ad 3f b1
很显然,67是SPS,68是PPS,然后65是关键帧,开始出来图像
结构体定义
/**
* A reference to a data buffer.
*
* The size of this struct is not a part of the public ABI and it is not meant
* to be allocated directly.
*/
typedef struct AVBufferRef {
AVBuffer *buffer;
/**
* The data buffer. It is considered writable if and only if
* this is the only reference to the buffer, in which case
* av_buffer_is_writable() returns 1.
*/
uint8_t *data;
/**
* Size of data in bytes.
*/
int size;
} AVBufferRef;
/**
* This structure stores compressed data. It is typically exported by demuxers
* and then passed as input to decoders, or received as output from encoders and
* then passed to muxers.
*
* For video, it should typically contain one compressed frame. For audio it may
* contain several compressed frames.
*
* AVPacket is one of the few structs in FFmpeg, whose size is a part of public
* ABI. Thus it may be allocated on stack and no new fields can be added to it
* without libavcodec and libavformat major bump.
*
* The semantics of data ownership depends on the buf or destruct (deprecated)
* fields. If either is set, the packet data is dynamically allocated and is
* valid indefinitely until av_free_packet() is called (which in turn calls
* av_buffer_unref()/the destruct callback to free the data). If neither is set,
* the packet data is typically backed by some static buffer somewhere and is
* only valid for a limited time (e.g. until the next read call when demuxing).
*
* The side data is always allocated with av_malloc() and is freed in
* av_free_packet().
*/
typedef struct AVPacket {
/**
* A reference to the reference-counted buffer where the packet data is
* stored.
* May be NULL, then the packet data is not reference-counted.
*/
AVBufferRef *buf;
/**
* Presentation timestamp in AVStream->time_base units; the time at which
* the decompressed packet will be presented to the user.
* Can be AV_NOPTS_VALUE if it is not stored in the file.
* pts MUST be larger or equal to dts as presentation cannot happen before
* decompression, unless one wants to view hex dumps. Some formats misuse
* the terms dts and pts/cts to mean something different. Such timestamps
* must be converted to true pts/dts before they are stored in AVPacket.
*/
int64_t pts;
/**
* Decompression timestamp in AVStream->time_base units; the time at which
* the packet is decompressed.
* Can be AV_NOPTS_VALUE if it is not stored in the file.
*/
int64_t dts;
uint8_t *data;
int size;
int stream_index;
/**
* A combination of AV_PKT_FLAG values
*/
int flags;
/**
* Additional packet data that can be provided by the container.
* Packet can contain several types of side information.
*/
struct {
uint8_t *data;
int size;
enum AVPacketSideDataType type;
} *side_data;
int side_data_elems;
/**
* Duration of this packet in AVStream->time_base units, 0 if unknown.
* Equals next_pts - this_pts in presentation order.
*/
int duration;
#if FF_API_DESTRUCT_PACKET
attribute_deprecated
void (*destruct)(struct AVPacket *);
attribute_deprecated
void *priv;
#endif
int64_t pos; ///< byte position in stream, -1 if unknown
/**
* Time difference in AVStream->time_base units from the pts of this
* packet to the point at which the output from the decoder has converged
* independent from the availability of previous frames. That is, the
* frames are virtually identical no matter if decoding started from
* the very first frame or from this keyframe.
* Is AV_NOPTS_VALUE if unknown.
* This field is not the display duration of the current packet.
* This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
* set.
*
* The purpose of this field is to allow seeking in streams that have no
* keyframes in the conventional sense. It corresponds to the
* recovery point SEI in H.264 and match_time_delta in NUT. It is also
* essential for some types of subtitle streams to ensure that all
* subtitles are correctly displayed after seeking.
*/
int64_t convergence_duration;
} AVPacket;