使用ffmpeg库内存问题编码h264视频

时间:2015-03-30 08:03:19

标签: c ffmpeg h.264 libavcodec libavformat

我尝试使用ffmpeg的avfoundation库在OS X上进行屏幕捕获。我从屏幕捕获帧并使用H264将其编码到flv容器中。

这是程序的命令行输出:

Input #0, avfoundation, from 'Capture screen 0':
  Duration: N/A, start: 9.253649, bitrate: N/A
    Stream #0:0: Video: rawvideo (UYVY / 0x59565955), uyvy422, 1440x900, 14.58 tbr, 1000k tbn, 1000k tbc
raw video is inCodec
FLV (Flash Video)http://localhost:8090/test.flv
[libx264 @ 0x102038e00] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX
[libx264 @ 0x102038e00] profile High, level 4.0
[libx264 @ 0x102038e00] 264 - core 142 r2495 6a301b6 - H.264/MPEG-4 AVC codec - Copyleft 2003-2014 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=-2 threads=6 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=1 weightp=2 keyint=50 keyint_min=5 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=abr mbtree=1 bitrate=400 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
[tcp @ 0x101a5fe70] Connection to tcp://localhost:8090 failed (Connection refused), trying next address
[tcp @ 0x101a5fe70] Connection to tcp://localhost:8090 failed: Connection refused
url_fopen failed: Operation now in progress
[flv @ 0x102038800] Using AVStream.codec.time_base as a timebase hint to the muxer is deprecated. Set AVStream.time_base instead.
encoded frame #0
encoded frame #1
......
encoded frame #49
encoded frame #50
testmee(8404,0x7fff7e05c300) malloc: *** error for object 0x102053e08: incorrect checksum for freed object - object was probably modified after being freed.
*** set a breakpoint in malloc_error_break to debug
(lldb) bt
* thread #10: tid = 0x43873, 0x00007fff95639286 libsystem_kernel.dylib`__pthread_kill + 10, stop reason = signal SIGABRT
  * frame #0: 0x00007fff95639286 libsystem_kernel.dylib`__pthread_kill + 10
    frame #1: 0x00007fff9623742f libsystem_pthread.dylib`pthread_kill + 90
    frame #2: 0x00007fff977ceb53 libsystem_c.dylib`abort + 129
    frame #3: 0x00007fff9ab59e06 libsystem_malloc.dylib`szone_error + 625
    frame #4: 0x00007fff9ab4f799 libsystem_malloc.dylib`small_malloc_from_free_list + 1105
    frame #5: 0x00007fff9ab4d3bc libsystem_malloc.dylib`szone_malloc_should_clear + 1449
    frame #6: 0x00007fff9ab4c877 libsystem_malloc.dylib`malloc_zone_malloc + 71
    frame #7: 0x00007fff9ab4b395 libsystem_malloc.dylib`malloc + 42
    frame #8: 0x00007fff94aa63d2 IOSurface`IOSurfaceClientLookupFromMachPort + 40
    frame #9: 0x00007fff94aa6b38 IOSurface`IOSurfaceLookupFromMachPort + 12
    frame #10: 0x00007fff92bfa6b2 CoreGraphics`_CGYDisplayStreamFrameAvailable + 342
    frame #11: 0x00007fff92f6759c CoreGraphics`CGYDisplayStreamNotification_server + 336
    frame #12: 0x00007fff92bfada6 CoreGraphics`display_stream_runloop_callout + 46
    frame #13: 0x00007fff956eba07 CoreFoundation`__CFMachPortPerform + 247
    frame #14: 0x00007fff956eb8f9 CoreFoundation`__CFRUNLOOP_IS_CALLING_OUT_TO_A_SOURCE1_PERFORM_FUNCTION__ + 41
    frame #15: 0x00007fff956eb86b CoreFoundation`__CFRunLoopDoSource1 + 475
    frame #16: 0x00007fff956dd3e7 CoreFoundation`__CFRunLoopRun + 2375
    frame #17: 0x00007fff956dc858 CoreFoundation`CFRunLoopRunSpecific + 296
    frame #18: 0x00007fff95792ef1 CoreFoundation`CFRunLoopRun + 97
    frame #19: 0x0000000105f79ff1 CMIOUnits`___lldb_unnamed_function2148$$CMIOUnits + 875
    frame #20: 0x0000000105f6f2c2 CMIOUnits`___lldb_unnamed_function2127$$CMIOUnits + 14
    frame #21: 0x00007fff97051765 CoreMedia`figThreadMain + 417
    frame #22: 0x00007fff96235268 libsystem_pthread.dylib`_pthread_body + 131
    frame #23: 0x00007fff962351e5 libsystem_pthread.dylib`_pthread_start + 176
    frame #24: 0x00007fff9623341d libsystem_pthread.dylib`thread_start + 13

我已附上我在下面使用的代码。

#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavdevice/avdevice.h>
#include <libavutil/opt.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
/* compile using
gcc -g -o stream test.c -lavformat -lavutil -lavcodec -lavdevice -lswscale
*/

// void show_av_device() {

//    inFmt->get_device_list(inFmtCtx, device_list);
//    printf("Device Info=============\n");
//    //avformat_open_input(&inFmtCtx,"video=Capture screen 0",inFmt,&inOptions);
//    printf("===============================\n");
// }

void AVFAIL (int code, const char *what) {
    char msg[500];
    av_strerror(code, msg, sizeof(msg));
    fprintf(stderr, "failed: %s\nerror: %s\n", what, msg);
    exit(2);
}

#define AVCHECK(f) do { int e = (f); if (e < 0) AVFAIL(e, #f); } while (0)
#define AVCHECKPTR(p,f) do { p = (f); if (!p) AVFAIL(AVERROR_UNKNOWN, #f); } while (0)

void registerLibs() {
    av_register_all();
    avdevice_register_all();
    avformat_network_init();
    avcodec_register_all();
}

int main(int argc, char *argv[]) {

    //conversion variables
    struct SwsContext *swsCtx = NULL;
    //input stream variables
    AVFormatContext   *inFmtCtx = NULL;
    AVCodecContext    *inCodecCtx = NULL;
    AVCodec           *inCodec = NULL;
    AVInputFormat     *inFmt = NULL;
    AVFrame           *inFrame = NULL;
    AVDictionary      *inOptions = NULL;
    const char *streamURL = "http://localhost:8090/test.flv";
    const char *name = "avfoundation";

//    AVFrame           *inFrameYUV = NULL;
    AVPacket          inPacket;


    //output stream variables
    AVCodecContext    *outCodecCtx = NULL;
    AVCodec           *outCodec;
    AVFormatContext   *outFmtCtx = NULL;
    AVOutputFormat    *outFmt = NULL;
    AVFrame           *outFrameYUV = NULL;
    AVStream          *stream = NULL;

    int               i, videostream, ret;
    int               numBytes, frameFinished;

    registerLibs();
    inFmtCtx = avformat_alloc_context(); //alloc input context
    av_dict_set(&inOptions, "pixel_format", "uyvy422", 0);
    av_dict_set(&inOptions, "probesize", "7000000", 0);

    inFmt = av_find_input_format(name);
    ret = avformat_open_input(&inFmtCtx, "Capture screen 0:", inFmt, &inOptions);
    if (ret < 0) {
        printf("Could not load the context for the input device\n");
        return -1;
    }
    if (avformat_find_stream_info(inFmtCtx, NULL) < 0) {
        printf("Could not find stream info for screen\n");
        return -1;
    }
    av_dump_format(inFmtCtx, 0, "Capture screen 0", 0);
    // inFmtCtx->streams is an array of pointers of size inFmtCtx->nb_stream

    videostream = av_find_best_stream(inFmtCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &inCodec, 0);
    if (videostream == -1) {
        printf("no video stream found\n");
        return -1;
    } else {
        printf("%s is inCodec\n", inCodec->long_name);
    }
    inCodecCtx = inFmtCtx->streams[videostream]->codec;
    // open codec
    if (avcodec_open2(inCodecCtx, inCodec, NULL) > 0) {
        printf("Couldn't open codec");
        return -1;  // couldn't open codec
    }


        //setup output params
    outFmt = av_guess_format(NULL, streamURL, NULL);
    if(outFmt == NULL) {
        printf("output format was not guessed properly");
        return -1;
    }

    if((outFmtCtx = avformat_alloc_context()) < 0) {
        printf("output context not allocated. ERROR");
        return -1;
    }

    printf("%s", outFmt->long_name);

    outFmtCtx->oformat = outFmt;

    snprintf(outFmtCtx->filename, sizeof(outFmtCtx->filename), streamURL);
    printf("%s\n", outFmtCtx->filename);

    outCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if(!outCodec) {
        printf("could not find encoder for H264 \n" );
        return -1;
    }

    stream = avformat_new_stream(outFmtCtx, outCodec);
    outCodecCtx = stream->codec;
    avcodec_get_context_defaults3(outCodecCtx, outCodec);

    outCodecCtx->codec_id = AV_CODEC_ID_H264;
    outCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
    outCodecCtx->flags = CODEC_FLAG_GLOBAL_HEADER;
    outCodecCtx->width = inCodecCtx->width;
    outCodecCtx->height = inCodecCtx->height;
    outCodecCtx->time_base.den = 25;
    outCodecCtx->time_base.num = 1;
    outCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
    outCodecCtx->gop_size = 50;
    outCodecCtx->bit_rate = 400000;

    //setup output encoders etc
    if(stream) {
        ret = avcodec_open2(outCodecCtx, outCodec, NULL);
        if (ret < 0) {
            printf("Could not open output encoder");
            return -1;
        }
    }

    if (avio_open(&outFmtCtx->pb, outFmtCtx->filename, AVIO_FLAG_WRITE ) < 0) {
        perror("url_fopen failed");
    }

    avio_open_dyn_buf(&outFmtCtx->pb);
    ret = avformat_write_header(outFmtCtx, NULL);
    if (ret != 0) {
        printf("was not able to write header to output format");
        return -1;
    }
    unsigned char *pb_buffer;
    int len = avio_close_dyn_buf(outFmtCtx->pb, (unsigned char **)(&pb_buffer));
    avio_write(outFmtCtx->pb, (unsigned char *)pb_buffer, len);


    numBytes = avpicture_get_size(PIX_FMT_UYVY422, inCodecCtx->width, inCodecCtx->height);
    // Allocate video frame
    inFrame = av_frame_alloc();

    swsCtx = sws_getContext(inCodecCtx->width, inCodecCtx->height, inCodecCtx->pix_fmt, inCodecCtx->width,
                            inCodecCtx->height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);
    int frame_count = 0;
    while(av_read_frame(inFmtCtx, &inPacket) >= 0) {
        if(inPacket.stream_index == videostream) {
            avcodec_decode_video2(inCodecCtx, inFrame, &frameFinished, &inPacket);
            // 1 Frame might need more than 1 packet to be filled
            if(frameFinished) {
                outFrameYUV = av_frame_alloc();

                uint8_t *buffer = (uint8_t *)av_malloc(numBytes);

                int ret = avpicture_fill((AVPicture *)outFrameYUV, buffer, PIX_FMT_YUV420P,
                                         inCodecCtx->width, inCodecCtx->height);
                if(ret < 0){
                    printf("%d is return val for fill\n", ret);
                    return -1;
                }
                //convert image to YUV
                sws_scale(swsCtx, (uint8_t const * const* )inFrame->data,
                          inFrame->linesize, 0, inCodecCtx->height,
                          outFrameYUV->data, outFrameYUV->linesize);
                //outFrameYUV now holds the YUV scaled frame/picture
                outFrameYUV->format = outCodecCtx->pix_fmt;
                outFrameYUV->width = outCodecCtx->width;
                outFrameYUV->height = outCodecCtx->height;


                AVPacket pkt;
                int got_output;
                av_init_packet(&pkt);
                pkt.data = NULL;
                pkt.size = 0;

                outFrameYUV->pts = frame_count;

                ret = avcodec_encode_video2(outCodecCtx, &pkt, outFrameYUV, &got_output);
                if (ret < 0) {
                    fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
                    return -1;
                }

                if(got_output) {
                    if(stream->codec->coded_frame->key_frame) {
                        pkt.flags |= AV_PKT_FLAG_KEY;
                    }
                    pkt.stream_index = stream->index;
                    if(pkt.pts != AV_NOPTS_VALUE)
                        pkt.pts = av_rescale_q(pkt.pts, stream->codec->time_base, stream->time_base);
                    if(pkt.dts != AV_NOPTS_VALUE)
                        pkt.dts = av_rescale_q(pkt.dts, stream->codec->time_base, stream->time_base);
                    if(avio_open_dyn_buf(&outFmtCtx->pb)!= 0) {
                        printf("ERROR: Unable to open dynamic buffer\n");
                    }
                    ret = av_interleaved_write_frame(outFmtCtx, &pkt);
                    unsigned char *pb_buffer;
                    int len = avio_close_dyn_buf(outFmtCtx->pb, (unsigned char **)&pb_buffer);
                    avio_write(outFmtCtx->pb, (unsigned char *)pb_buffer, len);

                } else {
                    ret = 0;
                }
                if(ret != 0) {
                    fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
                    exit(1);
                }

                fprintf(stderr, "encoded frame #%d\n", frame_count);
                frame_count++;

                av_free_packet(&pkt);
                av_frame_free(&outFrameYUV);
                av_free(buffer);

            }
        }
        av_free_packet(&inPacket);
    }
    av_write_trailer(outFmtCtx);

    //close video stream
    if(stream) {
        avcodec_close(outCodecCtx);
    }
    for (i = 0; i < outFmtCtx->nb_streams; i++) {
        av_freep(&outFmtCtx->streams[i]->codec);
        av_freep(&outFmtCtx->streams[i]);
    }
    if (!(outFmt->flags & AVFMT_NOFILE))
    /* Close the output file. */
        avio_close(outFmtCtx->pb);
    /* free the output format context */
    avformat_free_context(outFmtCtx);

    // Free the YUV frame populated by the decoder
    av_free(inFrame);

    // Close the video codec (decoder)
    avcodec_close(inCodecCtx);

    // Close the input video file
    avformat_close_input(&inFmtCtx);

    return 1;

}

我不确定我在这里做错了什么。但是,我所观察到的是,对于每个被编码的帧,我的内存使用量增加了大约6MB。之后的回溯通常会导致以下两个罪魁祸首之一:

    avfoundation.m中的
  1. avf_read_frame函数
  2. avpacket.h中的av_dup_packet函数
  3. 我是否可以获得有关使用avio_open_dyn_buff函数以便能够通过http流式传输的方式的建议?我还在下面附上了我的ffmpeg库版本:

        ffmpeg version N-70876-g294bb6c Copyright (c) 2000-2015 the FFmpeg developers
      built with Apple LLVM version 6.0 (clang-600.0.56) (based on LLVM 3.5svn)
      configuration: --prefix=/usr/local --enable-gpl --enable-postproc --enable-pthreads --enable-libmp3lame --enable-libtheora --enable-libx264 --enable-libvorbis --disable-mmx --disable-ssse3 --disable-armv5te --disable-armv6 --disable-neon --enable-shared --disable-static --disable-stripping
      libavutil      54. 20.100 / 54. 20.100
      libavcodec     56. 29.100 / 56. 29.100
      libavformat    56. 26.101 / 56. 26.101
      libavdevice    56.  4.100 / 56.  4.100
      libavfilter     5. 13.101 /  5. 13.101
      libswscale      3.  1.101 /  3.  1.101
      libswresample   1.  1.100 /  1.  1.100
      libpostproc    53.  3.100 / 53.  3.100
    Hyper fast Audio and Video encoder
    

    Valgrind分析附在此处,因为我超出了堆栈溢出的字符限制。 http://pastebin.com/MPeRhjhN

1 个答案:

答案 0 :(得分:1)

您的代码包含以下内容:

uint8_t *buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

但没有匹配的av_free()。这可能是你失去6MB /帧的地方。 av_free(outFrameYUV)不会释放结构内部的内存,只是结构本身。更正确的方法是使用av_frame_unref()而不是这些单独的av_free()。​​

我也看到你在循环中调用它:

av_free(inFrame);

但是该框架的分配是在循环之外完成的,所以在第一次运行之后,它可能已经死了。您希望在循环内分配和释放帧。对于未引用,请使用av_frame_unref()而不是av_free()。​​

我建议您使用asan或valgrind运行您的程序以检测更多此类问题,它会跟踪在何时以及如果不正确的情况下访问的内存,它是免费的。

相关问题