标签:output strdup gen lse spl mux example ESS 生成
代码在 最简单的基于FFMPEG的转码程序(雷霄骅) 基础上简单修改
https://pan.baidu.com/s/1w-fF5Ojz8M1ajKsc4DKdrg
1 /* 2 *最简单的基于FFmpeg的转码器 3 *Simplest FFmpeg Transcoder 4 * 5 *雷霄骅 Lei Xiaohua 6 *leixiaohua1020@126.com 7 *中国传媒大学/数字电视技术 8 *Communication University of China / DigitalTV Technology 9 *http://blog.csdn.net/leixiaohua1020 10 * 11 *本程序实现了视频格式之间的转换。是一个最简单的视频转码程序。 12 * 13 */ 14 15 #define _CRT_SECURE_NO_WARNINGS 16 extern "C" 17 { 18 #include "libavcodec/avcodec.h" 19 #include "libavformat/avformat.h" 20 #include "libavfilter/avfiltergraph.h" 21 #include "libavfilter/avcodec.h" 22 #include "libavfilter/buffersink.h" 23 #include "libavfilter/buffersrc.h" 24 #include "libavutil/avutil.h" 25 #include "libavutil/opt.h" 26 #include "libavutil/pixdesc.h" 27 }; 28 29 30 31 static AVFormatContext *ifmt_ctx; 32 static AVFormatContext *ofmt_ctx; 33 34 typedef struct FilteringContext{ 35 AVFilterContext* buffersink_ctx; 36 AVFilterContext* buffersrc_ctx; 37 AVFilterGraph* filter_graph; 38 } FilteringContext; 39 40 static FilteringContext *filter_ctx; 41 42 43 // 解码用 44 static int open_input_file(const char *filename) 45 { 46 int ret; 47 unsigned int i; 48 ifmt_ctx = NULL; 49 if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) { 50 av_log(NULL, AV_LOG_ERROR, "Cannot openinput file\n"); 51 return ret; 52 } 53 if ((ret = avformat_find_stream_info(ifmt_ctx, NULL))< 0) { 54 av_log(NULL, AV_LOG_ERROR, "Cannot findstream information\n"); 55 return ret; 56 } 57 for (i = 0; i < ifmt_ctx->nb_streams; i++) 58 { 59 AVStream*stream; 60 AVCodecContext *codec_ctx; 61 stream = ifmt_ctx->streams[i]; 62 codec_ctx = stream->codec; 63 /* Reencode video & audio and remux subtitles etc. */ 64 if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO 65 || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) { 66 /* Open decoder */ 67 ret = avcodec_open2(codec_ctx, avcodec_find_decoder(codec_ctx->codec_id), NULL); 68 if (ret < 0) { 69 av_log(NULL, AV_LOG_ERROR, "Failed toopen decoder for stream #%u\n", i); 70 return ret; 71 } 72 } 73 } 74 av_dump_format(ifmt_ctx, 0, filename, 0); 75 return 0; 76 } 77 78 static int open_output_file(const char *filename) 79 { 80 AVStream*out_stream; 81 AVStream*in_stream; 82 AVCodecContext*dec_ctx, *enc_ctx; 83 AVCodec*encoder; 84 int ret; 85 unsigned int i; 86 ofmt_ctx = NULL; 87 avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename); 88 if (!ofmt_ctx) { 89 av_log(NULL, AV_LOG_ERROR, "Could notcreate output context\n"); 90 return AVERROR_UNKNOWN; 91 } 92 93 for (i = 0; i < ifmt_ctx->nb_streams; i++) 94 { 95 out_stream = avformat_new_stream(ofmt_ctx, NULL); 96 if (!out_stream) { 97 av_log(NULL, AV_LOG_ERROR, "Failedallocating output stream\n"); 98 return AVERROR_UNKNOWN; 99 } 100 in_stream = ifmt_ctx->streams[i]; 101 dec_ctx = in_stream->codec; 102 enc_ctx = out_stream->codec; 103 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) { 104 /* in this example, we choose transcoding to same codec */ 105 encoder = avcodec_find_encoder(dec_ctx->codec_id); 106 /* In this example, we transcode to same properties(picture size, 107 * sample rate etc.). These properties can be changed for output 108 * streams easily using filters */ 109 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { 110 enc_ctx->height = dec_ctx->height; 111 enc_ctx->width = dec_ctx->width; 112 enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio; 113 /* take first format from list of supported formats */ 114 enc_ctx->pix_fmt = encoder->pix_fmts[0]; 115 /* video time_base can be set to whatever is handy andsupported by encoder */ 116 enc_ctx->time_base = dec_ctx->time_base; 117 } 118 else { 119 enc_ctx->sample_rate = dec_ctx->sample_rate; 120 enc_ctx->channel_layout = dec_ctx->channel_layout; 121 enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout); 122 /* take first format from list of supported formats */ 123 enc_ctx->sample_fmt = encoder->sample_fmts[0]; 124 //AVRational timebase = { 1, enc_ctx->sample_rate }; 125 //enc_ctx->time_base = time_base; 126 enc_ctx->time_base.num = 1; 127 enc_ctx->time_base.den = enc_ctx->sample_rate; 128 } 129 /* Third parameter can be used to pass settings to encoder*/ 130 if (AV_CODEC_ID_H264 == encoder->id) { 131 enc_ctx->me_range = 16; 132 enc_ctx->max_qdiff = 4; 133 //enc_ctx->qmin = 10; 134 //enc_ctx->qmax = 51; 135 enc_ctx->qcompress = 1.0; 136 } 137 if (ofmt_ctx->oformat->flags &AVFMT_GLOBALHEADER) 138 enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; 139 140 ret = avcodec_open2(enc_ctx, encoder, NULL); 141 if (ret < 0) { 142 av_log(NULL, AV_LOG_ERROR, "Cannot openvideo encoder for stream #%u\n", i); 143 return ret; 144 } 145 } 146 else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) { 147 av_log(NULL, AV_LOG_FATAL, "Elementarystream #%d is of unknown type, cannot proceed\n", i); 148 return AVERROR_INVALIDDATA; 149 } 150 else { 151 /* if this stream must be remuxed */ 152 ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec, 153 ifmt_ctx->streams[i]->codec); 154 if (ret < 0) { 155 av_log(NULL, AV_LOG_ERROR, "Copyingstream context failed\n"); 156 return ret; 157 } 158 } 159 if (ofmt_ctx->oformat->flags &AVFMT_GLOBALHEADER) 160 enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; 161 } 162 163 //av_opt_set_int(ofmt_ctx->priv_data, "hls_time", 15, AV_OPT_SEARCH_CHILDREN); 或者 164 av_opt_set(ofmt_ctx->priv_data, "hls_time", "15", AV_OPT_SEARCH_CHILDREN); // 设置每个ts片段的时间 165 166 av_dump_format(ofmt_ctx, 0, filename, 1); 167 if (!(ofmt_ctx->oformat->flags &AVFMT_NOFILE)) { 168 ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE); 169 if (ret < 0) { 170 av_log(NULL, AV_LOG_ERROR, "Could notopen output file ‘%s‘", filename); 171 return ret; 172 } 173 } 174 /* init muxer, write output file header */ 175 ret = avformat_write_header(ofmt_ctx, NULL); 176 if (ret < 0) { 177 av_log(NULL, AV_LOG_ERROR, "Error occurred when openingoutput file\n"); 178 return ret; 179 } 180 return 0; 181 } 182 183 static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx, 184 AVCodecContext *enc_ctx, const char *filter_spec) 185 { 186 char args[512]; 187 int ret = 0; 188 AVFilter*buffersrc = NULL; 189 AVFilter*buffersink = NULL; 190 AVFilterContext*buffersrc_ctx = NULL; 191 AVFilterContext*buffersink_ctx = NULL; 192 AVFilterInOut*outputs = avfilter_inout_alloc(); 193 AVFilterInOut*inputs = avfilter_inout_alloc(); 194 AVFilterGraph*filter_graph = avfilter_graph_alloc(); 195 if (!outputs || !inputs || !filter_graph) { 196 ret = AVERROR(ENOMEM); 197 goto end; 198 } 199 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { 200 buffersrc = avfilter_get_by_name("buffer"); 201 buffersink = avfilter_get_by_name("buffersink"); 202 if (!buffersrc || !buffersink) { 203 av_log(NULL, AV_LOG_ERROR, "filteringsource or sink element not found\n"); 204 ret = AVERROR_UNKNOWN; 205 goto end; 206 } 207 _snprintf(args, sizeof(args), 208 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", 209 dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, 210 dec_ctx->time_base.num, dec_ctx->time_base.den, 211 dec_ctx->sample_aspect_ratio.num, 212 dec_ctx->sample_aspect_ratio.den); 213 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", 214 args, NULL, filter_graph); 215 if (ret < 0) { 216 av_log(NULL, AV_LOG_ERROR, "Cannotcreate buffer source\n"); 217 goto end; 218 } 219 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", 220 NULL, NULL, filter_graph); 221 if (ret < 0) { 222 av_log(NULL, AV_LOG_ERROR, "Cannotcreate buffer sink\n"); 223 goto end; 224 } 225 ret = av_opt_set_bin(buffersink_ctx, "pix_fmts", 226 (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt), 227 AV_OPT_SEARCH_CHILDREN); 228 if (ret < 0) { 229 av_log(NULL, AV_LOG_ERROR, "Cannot setoutput pixel format\n"); 230 goto end; 231 } 232 } 233 else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) { 234 buffersrc = avfilter_get_by_name("abuffer"); 235 buffersink = avfilter_get_by_name("abuffersink"); 236 if (!buffersrc || !buffersink) { 237 av_log(NULL, AV_LOG_ERROR, "filteringsource or sink element not found\n"); 238 ret = AVERROR_UNKNOWN; 239 goto end; 240 } 241 if (!dec_ctx->channel_layout) 242 dec_ctx->channel_layout = 243 av_get_default_channel_layout(dec_ctx->channels); 244 _snprintf(args, sizeof(args), 245 "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%I64x", 246 dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate, 247 av_get_sample_fmt_name(dec_ctx->sample_fmt), 248 dec_ctx->channel_layout); 249 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", 250 args, NULL, filter_graph); 251 if (ret < 0) { 252 av_log(NULL, AV_LOG_ERROR, "Cannotcreate audio buffer source\n"); 253 goto end; 254 } 255 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", 256 NULL, NULL, filter_graph); 257 if (ret < 0) { 258 av_log(NULL, AV_LOG_ERROR, "Cannotcreate audio buffer sink\n"); 259 goto end; 260 } 261 ret = av_opt_set_bin(buffersink_ctx, "sample_fmts", 262 (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt), 263 AV_OPT_SEARCH_CHILDREN); 264 if (ret < 0) { 265 av_log(NULL, AV_LOG_ERROR, "Cannot setoutput sample format\n"); 266 goto end; 267 } 268 ret = av_opt_set_bin(buffersink_ctx, "channel_layouts", 269 (uint8_t*)&enc_ctx->channel_layout, 270 sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN); 271 if (ret < 0) { 272 av_log(NULL, AV_LOG_ERROR, "Cannot setoutput channel layout\n"); 273 goto end; 274 } 275 ret = av_opt_set_bin(buffersink_ctx, "sample_rates", 276 (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate), 277 AV_OPT_SEARCH_CHILDREN); 278 if (ret < 0) { 279 av_log(NULL, AV_LOG_ERROR, "Cannot setoutput sample rate\n"); 280 goto end; 281 } 282 } 283 else { 284 ret = AVERROR_UNKNOWN; 285 goto end; 286 } 287 /* Endpoints for the filter graph. */ 288 outputs->name = av_strdup("in"); 289 outputs->filter_ctx = buffersrc_ctx; 290 outputs->pad_idx = 0; 291 outputs->next = NULL; 292 inputs->name = av_strdup("out"); 293 inputs->filter_ctx = buffersink_ctx; 294 inputs->pad_idx = 0; 295 inputs->next = NULL; 296 if (!outputs->name || !inputs->name) { 297 ret = AVERROR(ENOMEM); 298 goto end; 299 } 300 if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec, 301 &inputs, &outputs, NULL)) < 0) 302 goto end; 303 if ((ret = avfilter_graph_config(filter_graph, NULL))< 0) 304 goto end; 305 /* Fill FilteringContext */ 306 fctx->buffersrc_ctx = buffersrc_ctx; 307 fctx->buffersink_ctx = buffersink_ctx; 308 fctx->filter_graph = filter_graph; 309 end: 310 avfilter_inout_free(&inputs); 311 avfilter_inout_free(&outputs); 312 return ret; 313 } 314 315 static int init_filters(void) 316 { 317 const char*filter_spec; 318 unsigned int i; 319 int ret; 320 filter_ctx = (FilteringContext *)av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx)); 321 if (!filter_ctx) 322 return AVERROR(ENOMEM); 323 for (i = 0; i < ifmt_ctx->nb_streams; i++) { 324 filter_ctx[i].buffersrc_ctx = NULL; 325 filter_ctx[i].buffersink_ctx = NULL; 326 filter_ctx[i].filter_graph = NULL; 327 if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO 328 || ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)) 329 continue; 330 if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) 331 filter_spec = "null"; /* passthrough (dummy) filter for video */ 332 else 333 filter_spec = "anull"; /* passthrough (dummy) filter for audio */ 334 335 ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec, 336 ofmt_ctx->streams[i]->codec, filter_spec); 337 if (ret) 338 return ret; 339 } 340 return 0; 341 } 342 343 static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int*got_frame) { 344 int ret; 345 int got_frame_local; 346 AVPacket enc_pkt; 347 int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int*) = 348 (ifmt_ctx->streams[stream_index]->codec->codec_type == 349 AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2; 350 if (!got_frame) 351 got_frame = &got_frame_local; 352 av_log(NULL, AV_LOG_INFO, "Encoding frame\n"); 353 /* encode filtered frame */ 354 enc_pkt.data = NULL; 355 enc_pkt.size = 0; 356 av_init_packet(&enc_pkt); 357 ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt, 358 filt_frame, got_frame); 359 av_frame_free(&filt_frame); 360 if (ret < 0) 361 return ret; 362 if (!(*got_frame)) 363 return 0; 364 /* prepare packet for muxing */ 365 enc_pkt.stream_index = stream_index; 366 enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts, 367 ofmt_ctx->streams[stream_index]->codec->time_base, 368 ofmt_ctx->streams[stream_index]->time_base, 369 (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); 370 enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts, 371 ofmt_ctx->streams[stream_index]->codec->time_base, 372 ofmt_ctx->streams[stream_index]->time_base, 373 (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); 374 enc_pkt.duration = av_rescale_q(enc_pkt.duration, 375 ofmt_ctx->streams[stream_index]->codec->time_base, 376 ofmt_ctx->streams[stream_index]->time_base); 377 av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n"); 378 /* mux encoded frame */ 379 ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt); 380 return ret; 381 } 382 383 static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index) 384 { 385 int ret; 386 AVFrame*filt_frame; 387 av_log(NULL, AV_LOG_INFO, "Pushing decoded frame tofilters\n"); 388 /* push the decoded frame into the filtergraph */ 389 ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx, 390 frame, 0); 391 if (ret < 0) { 392 av_log(NULL, AV_LOG_ERROR, "Error whilefeeding the filtergraph\n"); 393 return ret; 394 } 395 /* pull filtered frames from the filtergraph */ 396 while (1) { 397 filt_frame = av_frame_alloc(); 398 if (!filt_frame) { 399 ret = AVERROR(ENOMEM); 400 break; 401 } 402 av_log(NULL, AV_LOG_INFO, "Pullingfiltered frame from filters\n"); 403 ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx, 404 filt_frame); 405 if (ret < 0) { 406 /* if nomore frames for output - returns AVERROR(EAGAIN) 407 * if flushed and no more frames for output - returns AVERROR_EOF 408 * rewrite retcode to 0 to show it as normal procedure completion 409 */ 410 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) 411 ret = 0; 412 av_frame_free(&filt_frame); 413 break; 414 } 415 filt_frame->pict_type = AV_PICTURE_TYPE_NONE; 416 ret = encode_write_frame(filt_frame, stream_index, NULL); 417 if (ret < 0) 418 break; 419 } 420 return ret; 421 } 422 423 static int flush_encoder(unsigned int stream_index) 424 { 425 int ret; 426 int got_frame; 427 if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities& 428 CODEC_CAP_DELAY)) 429 return 0; 430 while (1) { 431 av_log(NULL, AV_LOG_INFO, "Flushingstream #%u encoder\n", stream_index); 432 ret = encode_write_frame(NULL, stream_index, &got_frame); 433 if (ret < 0) 434 break; 435 if (!got_frame) 436 return 0; 437 } 438 return ret; 439 } 440 441 442 443 /** 444 输入视频的封装格式是FLV,视频编码标准是H.264,音频编码标准是AAC;输出视频的封装格式是AVI,视频编码标准是MPEG2,音频编码标准是MP3 445 */ 446 int main(int argc, char* argv[]) 447 { 448 int ret; 449 AVPacket packet; 450 AVFrame *frame = NULL; 451 enum AVMediaType type; 452 unsigned int stream_index; 453 unsigned int i; 454 int got_frame; 455 int(*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket*); 456 if (argc != 3) { 457 av_log(NULL, AV_LOG_ERROR, "Usage: %s<input file> <output file>\n", argv[0]); 458 return 1; 459 } 460 av_register_all(); 461 avfilter_register_all(); 462 463 const char* input_filename = argv[1]; 464 const char* output_filename = argv[2]; 465 466 if ((ret = open_input_file(input_filename)) < 0) 467 goto end; 468 if ((ret = open_output_file(output_filename)) < 0) 469 goto end; 470 if ((ret = init_filters()) < 0) 471 goto end; 472 /* read all packets */ 473 while (1) 474 { 475 if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0) 476 break; 477 stream_index = packet.stream_index; 478 type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type; 479 av_log(NULL, AV_LOG_DEBUG, "Demuxergave frame of stream_index %u\n", 480 stream_index); 481 if (filter_ctx[stream_index].filter_graph) { 482 av_log(NULL, AV_LOG_DEBUG, "Going toreencode&filter the frame\n"); 483 frame = av_frame_alloc(); 484 if (!frame) { 485 ret = AVERROR(ENOMEM); 486 break; 487 } 488 packet.dts = av_rescale_q_rnd(packet.dts, 489 ifmt_ctx->streams[stream_index]->time_base, 490 ifmt_ctx->streams[stream_index]->codec->time_base, 491 (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); 492 packet.pts = av_rescale_q_rnd(packet.pts, 493 ifmt_ctx->streams[stream_index]->time_base, 494 ifmt_ctx->streams[stream_index]->codec->time_base, 495 (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); 496 dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 : 497 avcodec_decode_audio4; 498 ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame, 499 &got_frame, &packet); 500 if (ret < 0) { 501 av_frame_free(&frame); 502 av_log(NULL, AV_LOG_ERROR, "Decodingfailed\n"); 503 break; 504 } 505 if (got_frame) { 506 frame->pts = av_frame_get_best_effort_timestamp(frame); 507 ret = filter_encode_write_frame(frame, stream_index); 508 av_frame_free(&frame); 509 if (ret < 0) 510 ;//goto end; 511 } 512 else { 513 av_frame_free(&frame); 514 } 515 } 516 else { 517 /* remux this frame without reencoding */ 518 packet.dts = av_rescale_q_rnd(packet.dts, 519 ifmt_ctx->streams[stream_index]->time_base, 520 ofmt_ctx->streams[stream_index]->time_base, 521 (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); 522 packet.pts = av_rescale_q_rnd(packet.pts, 523 ifmt_ctx->streams[stream_index]->time_base, 524 ofmt_ctx->streams[stream_index]->time_base, 525 (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); 526 ret = av_interleaved_write_frame(ofmt_ctx, &packet); 527 if (ret < 0) 528 goto end; 529 } 530 av_free_packet(&packet); 531 } 532 /* flush filters and encoders */ 533 for (i = 0; i < ifmt_ctx->nb_streams; i++) { 534 /* flush filter */ 535 if (!filter_ctx[i].filter_graph) 536 continue; 537 ret = filter_encode_write_frame(NULL, i); 538 if (ret < 0) { 539 av_log(NULL, AV_LOG_ERROR, "Flushingfilter failed\n"); 540 goto end; 541 } 542 /* flush encoder */ 543 ret = flush_encoder(i); 544 if (ret < 0) { 545 av_log(NULL, AV_LOG_ERROR, "Flushingencoder failed\n"); 546 goto end; 547 } 548 } 549 av_write_trailer(ofmt_ctx); 550 end: 551 av_free_packet(&packet); 552 av_frame_free(&frame); 553 for (i = 0; i < ifmt_ctx->nb_streams; i++) { 554 avcodec_close(ifmt_ctx->streams[i]->codec); 555 if (ofmt_ctx && ofmt_ctx->nb_streams >i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec) 556 avcodec_close(ofmt_ctx->streams[i]->codec); 557 if (filter_ctx && filter_ctx[i].filter_graph) 558 avfilter_graph_free(&filter_ctx[i].filter_graph); 559 } 560 av_free(filter_ctx); 561 avformat_close_input(&ifmt_ctx); 562 if (ofmt_ctx &&!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) 563 avio_close(ofmt_ctx->pb); 564 avformat_free_context(ofmt_ctx); 565 if (ret < 0) 566 av_log(NULL, AV_LOG_ERROR, "Erroroccurred\n"); 567 return (ret ? 1 : 0); 568 }
标签:output strdup gen lse spl mux example ESS 生成
原文地址:https://www.cnblogs.com/baigoogledu/p/9110038.html