用堆方式:
AVPacket* newpkt = av_packet_alloc(); if(!newpkt) { //Error }
//此函数先给newpkt分配空间,然后再给newpkt中的结构体成员赋初值(其实是调用av_init_packet(…)完成的)。
av_packet_unref(newpkt):减少引用计数,函数(附源码)首先检测newpkt->data是否是NULL,若非NULL,则释放内存,然后再调用av_init_packet进行重置方便下次使用;如果是NULL,则直接重置。 每次调用完avcodec_receive_packet(…)就会增加一个引用计数,因此每次调用后都要调用av_packet_unref(…)减少这个引用计数,而avcodec_receive_packet函数源码开始就调用了av_packet_unref,所以不用手动调用。
av_packet_free(…)则是释放newpkt的空间,它其实是先调用了av_packet_unref(…)减少引用计数,然后再调用av_freep(…)释放空间。
用栈内存的方式:
AVPacket send_pkt; av_init_packet(&send_pkt); avcodec_receive_packet(encodec_ctx, &send_pkt);
这时一定要调用av_init_packet()初始化,不然avcodec_receive_packet函数(后附源码)开始就调用了av_packet_unref,而此时send_ptk.data为系统随机值,av_packet_unref检测不为NULL,就会释放它,系统就会报段错误。对于老版avcodec_encode_video2(),用栈内存的方式同样需要av_init_packet,avcodec_encode_video2检测到send_pkt->data不为NULL后会释放它的内存,也会直接报段错误(实测每次调用avcodec_encode_video2后没有手动调用av_packet_unref进行减引用,但进程所占用的内存一直稳定,所有可以确定avcodec_encode_video2内存也有减引用释放内存的操作)
另一篇文章,从编解码接口来看AVPacket和AVFrame的内存分配释放问题,avcodec_send_frame和avcodec_receive_packet
栈内存方式:
AVPacket read_pkt; av_read_frame(ic, &send_pkt);
av_read_frame读到的packet数据,必须要每次手动调用av_packet_unref手动释放内存,因为av_read_frame函数并不会帮助减引用释放内存。用栈内存的方式,也不必要调用av_read_frame前,调用av_init_packet重置,因为在av_read_frame函数源码中的read_frame_internal函数中包含对av_init_packet的调用(后附两个函数源码)。
附录:
libavcodec\encode.c
int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) { av_packet_unref(avpkt); if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec)) return AVERROR(EINVAL); if (avctx->codec->receive_packet) { if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) return AVERROR_EOF; return avctx->codec->receive_packet(avctx, avpkt); } // Emulation via old API. if (!avctx->internal->buffer_pkt_valid) { int got_packet; int ret; if (!avctx->internal->draining) return AVERROR(EAGAIN); ret = do_encode(avctx, NULL, &got_packet); if (ret < 0) return ret; if (ret >= 0 && !got_packet) return AVERROR_EOF; } av_packet_move_ref(avpkt, avctx->internal->buffer_pkt); avctx->internal->buffer_pkt_valid = 0; return 0; }
void av_packet_unref(AVPacket *pkt) { av_packet_free_side_data(pkt); av_buffer_unref(&pkt->buf); av_init_packet(pkt); pkt->data = NULL; pkt->size = 0; }
int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { int ret; AVPacket user_pkt = *avpkt; int needs_realloc = !user_pkt.data; *got_packet_ptr = 0; if(CONFIG_FRAME_THREAD_ENCODER && avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME)) return ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr); if ((avctx->flags&CODEC_FLAG_PASS1) && avctx->stats_out) avctx->stats_out[0] = '\0'; if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { av_free_packet(avpkt); av_init_packet(avpkt); avpkt->size = 0; return 0; } //检查输入 if (av_image_check_size(avctx->width, avctx->height, 0, avctx)) return AVERROR(EINVAL); av_assert0(avctx->codec->encode2); //编码 ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); av_assert0(ret <= 0); if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) { needs_realloc = 0; if (user_pkt.data) { if (user_pkt.size >= avpkt->size) { memcpy(user_pkt.data, avpkt->data, avpkt->size); } else { av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size); avpkt->size = user_pkt.size; ret = -1; } avpkt->buf = user_pkt.buf; avpkt->data = user_pkt.data; #if FF_API_DESTRUCT_PACKET FF_DISABLE_DEPRECATION_WARNINGS avpkt->destruct = user_pkt.destruct; FF_ENABLE_DEPRECATION_WARNINGS #endif } else { if (av_dup_packet(avpkt) < 0) { ret = AVERROR(ENOMEM); } } } if (!ret) { if (!*got_packet_ptr) avpkt->size = 0; else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) avpkt->pts = avpkt->dts = frame->pts; if (needs_realloc && avpkt->data) { ret = av_buffer_realloc(&avpkt->buf, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE); if (ret >= 0) avpkt->data = avpkt->buf->data; } avctx->frame_number++; } if (ret < 0 || !*got_packet_ptr) av_free_packet(avpkt); else av_packet_merge_side_data(avpkt); emms_c(); return ret; }
int av_read_frame(AVFormatContext *s, AVPacket *pkt) { const int genpts = s->flags & AVFMT_FLAG_GENPTS; int eof = 0; int ret; AVStream *st; if (!genpts) { 如果没有设定生成pts标志,从包缓冲区中或io读取包 ret = s->internal->packet_buffer ? read_from_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end, pkt) : read_frame_internal(s, pkt); if (ret < 0) return ret; goto return_packet; } for (;;) { AVPacketList *pktl = s->internal->packet_buffer; if (pktl) { AVPacket *next_pkt = &pktl->pkt; if (next_pkt->dts != AV_NOPTS_VALUE) { int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits; // last dts seen for this stream. if any of packets following // current one had no dts, we will set this to AV_NOPTS_VALUE. int64_t last_dts = next_pkt->dts; while (pktl && next_pkt->pts == AV_NOPTS_VALUE) { if (pktl->pkt.stream_index == next_pkt->stream_index && (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) { if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { // not B-frame next_pkt->pts = pktl->pkt.dts; } if (last_dts != AV_NOPTS_VALUE) { // Once last dts was set to AV_NOPTS_VALUE, we don't change it. last_dts = pktl->pkt.dts; } } pktl = pktl->next; } if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) { // Fixing the last reference frame had none pts issue (For MXF etc). // We only do this when // 1. eof. // 2. we are not able to resolve a pts value for current packet. // 3. the packets for this stream at the end of the files had valid dts. next_pkt->pts = last_dts + next_pkt->duration; } pktl = s->internal->packet_buffer; } /* read packet from packet buffer, if there is data */ st = s->streams[next_pkt->stream_index]; if (!(next_pkt->pts == AV_NOPTS_VALUE && st->discard < AVDISCARD_ALL && next_pkt->dts != AV_NOPTS_VALUE && !eof)) { ret = read_from_packet_buffer(&s->internal->packet_buffer, &s->internal->packet_buffer_end, pkt); goto return_packet; } } ret = read_frame_internal(s, pkt); if (ret < 0) { if (pktl && ret != AVERROR(EAGAIN)) { eof = 1; continue; } else return ret; } ret = add_to_pktbuf(&s->internal->packet_buffer, pkt, &s->internal->packet_buffer_end, 1); av_packet_unref(pkt); if (ret < 0) return ret; } return_packet: st = s->streams[pkt->stream_index]; if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) { ff_reduce_index(s, st->index); av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME); } if (is_relative(pkt->dts)) pkt->dts -= RELATIVE_TS_BASE; if (is_relative(pkt->pts)) pkt->pts -= RELATIVE_TS_BASE; return ret; }
static int read_frame_internal(AVFormatContext *s, AVPacket *pkt) { int ret = 0, i, got_packet = 0; AVDictionary *metadata = NULL; //初始化packet av_init_packet(pkt); //如果还没读取包或解析队列为空 //从io中读取数据,送入相应的解析器中进行解析,直到解析器认为得到一帧完整的视频流数据为止 while (!got_packet && !s->internal->parse_queue) { AVStream *st; AVPacket cur_pkt; /* read next packet */ //从io中读取包 ret = ff_read_packet(s, &cur_pkt); if (ret < 0) { if (ret == AVERROR(EAGAIN)) return ret; /* flush the parsers */ //释放解析器中的数据 for (i = 0; i < s->nb_streams; i++) { st = s->streams[i]; if (st->parser && st->need_parsing) parse_packet(s, NULL, st->index); } /* all remaining packets are now in parse_queue => * really terminate parsing */ break; } ret = 0; st = s->streams[cur_pkt.stream_index]; /* update context if required */ if (st->internal->need_context_update) { if (avcodec_is_open(st->internal->avctx)) { av_log(s, AV_LOG_DEBUG, "Demuxer context update while decoder is open, closing and trying to re-open\n"); avcodec_close(st->internal->avctx); st->info->found_decoder = 0; } /* close parser, because it depends on the codec */ if (st->parser && st->internal->avctx->codec_id != st->codecpar->codec_id) { av_parser_close(st->parser); st->parser = NULL; } ret = avcodec_parameters_to_context(st->internal->avctx, st->codecpar); if (ret < 0) return ret; #if FF_API_LAVF_AVCTX FF_DISABLE_DEPRECATION_WARNINGS /* update deprecated public codec context */ ret = avcodec_parameters_to_context(st->codec, st->codecpar); if (ret < 0) return ret; FF_ENABLE_DEPRECATION_WARNINGS #endif st->internal->need_context_update = 0; } if (cur_pkt.pts != AV_NOPTS_VALUE && cur_pkt.dts != AV_NOPTS_VALUE && cur_pkt.pts < cur_pkt.dts) { av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n", cur_pkt.stream_index, av_ts2str(cur_pkt.pts), av_ts2str(cur_pkt.dts), cur_pkt.size); } if (s->debug & FF_FDEBUG_TS) av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%"PRId64", flags=%d\n", cur_pkt.stream_index, av_ts2str(cur_pkt.pts), av_ts2str(cur_pkt.dts), cur_pkt.size, cur_pkt.duration, cur_pkt.flags); //如果流需要解析,并且解析器没有初始化,并且格式上下文标志没有设置为AVFMT_FLAG_NOPARSE,则初始化解析器。 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) { st->parser = av_parser_init(st->codecpar->codec_id); if (!st->parser) { av_log(s, AV_LOG_VERBOSE, "parser not found for codec " "%s, packets or times may be invalid.\n", avcodec_get_name(st->codecpar->codec_id)); /* no parser available: just output the raw packets */ st->need_parsing = AVSTREAM_PARSE_NONE; } else if (st->need_parsing == AVSTREAM_PARSE_HEADERS) st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) st->parser->flags |= PARSER_FLAG_ONCE; else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW) st->parser->flags |= PARSER_FLAG_USE_CODEC_TS; } //如果stream不需要解析或者解析器为空 if (!st->need_parsing || !st->parser) { /* no parsing needed: we just output the packet as is */ *pkt = cur_pkt; compute_pkt_fields(s, st, NULL, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE); if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) { ff_reduce_index(s, st->index); av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME); } got_packet = 1; } else if (st->discard < AVDISCARD_ALL) { //解析包,这里会调用相应的视频或音频解析器 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0) return ret; //如果是音频,得到采样率、比特率、通道数量、通道layout st->codecpar->sample_rate = st->internal->avctx->sample_rate; st->codecpar->bit_rate = st->internal->avctx->bit_rate; st->codecpar->channels = st->internal->avctx->channels; st->codecpar->channel_layout = st->internal->avctx->channel_layout; st->codecpar->codec_id = st->internal->avctx->codec_id; } else { /* free packet */ //释放packet av_packet_unref(&cur_pkt); } if (pkt->flags & AV_PKT_FLAG_KEY) //如果packet为关键packet st->skip_to_keyframe = 0; if (st->skip_to_keyframe) { av_packet_unref(&cur_pkt); if (got_packet) { *pkt = cur_pkt; } got_packet = 0; } } //如果没有得到完整的packet,并且解析队列不为空,从packet队列读取packet if (!got_packet && s->internal->parse_queue) ret = read_from_packet_buffer(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt); //读取成功,计算时间戳等参数 if (ret >= 0) { AVStream *st = s->streams[pkt->stream_index]; int discard_padding = 0; if (st->first_discard_sample && pkt->pts != AV_NOPTS_VALUE) { int64_t pts = pkt->pts - (is_relative(pkt->pts) ? RELATIVE_TS_BASE : 0); int64_t sample = ts_to_samples(st, pts); int duration = ts_to_samples(st, pkt->duration); int64_t end_sample = sample + duration; if (duration > 0 && end_sample >= st->first_discard_sample && sample < st->last_discard_sample) discard_padding = FFMIN(end_sample - st->first_discard_sample, duration); } if (st->start_skip_samples && (pkt->pts == 0 || pkt->pts == RELATIVE_TS_BASE)) st->skip_samples = st->start_skip_samples; if (st->skip_samples || discard_padding) { uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10); if (p) { AV_WL32(p, st->skip_samples); AV_WL32(p + 4, discard_padding); av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d / discard %d\n", st->skip_samples, discard_padding); } st->skip_samples = 0; } if (st->inject_global_side_data) { for (i = 0; i < st->nb_side_data; i++) { AVPacketSideData *src_sd = &st->side_data[i]; uint8_t *dst_data; if (av_packet_get_side_data(pkt, src_sd->type, NULL)) continue; dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size); if (!dst_data) { av_log(s, AV_LOG_WARNING, "Could not inject global side data\n"); continue; } memcpy(dst_data, src_sd->data, src_sd->size); } st->inject_global_side_data = 0; } #if FF_API_LAVF_MERGE_SD FF_DISABLE_DEPRECATION_WARNINGS if (!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA)) av_packet_merge_side_data(pkt); FF_ENABLE_DEPRECATION_WARNINGS #endif } av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata); if (metadata) { s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED; av_dict_copy(&s->metadata, metadata, 0); av_dict_free(&metadata); av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN); } #if FF_API_LAVF_AVCTX update_stream_avctx(s); #endif if (s->debug & FF_FDEBUG_TS) av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, " "size=%d, duration=%"PRId64", flags=%d\n", pkt->stream_index, av_ts2str(pkt->pts), av_ts2str(pkt->dts), pkt->size, pkt->duration, pkt->flags); return ret; }