ffmpeg记录屏幕并将视频文件保存为磁盘.mpg

时间:2015-01-05 04:05:26

标签: windows ffmpeg gdi video-encoding screen-grab

我想记录我的电脑屏幕(在我的Windows机器上使用gdigrab)并将保存的视频文件存储在我的磁盘上作为mp4或mpg文件。我找到了一个示例代码来抓取屏幕并显示它在SDL窗口中:http://xwk.iteye.com/blog/2125720(代码位于页面底部并有英文版本),ffmpeg多路复用示例https://ffmpeg.org/doxygen/trunk/muxing_8c-source.html似乎能够帮助将音频和视频编码为所需的输出视频文件。

我试图将这两者结合起来,使用格式上下文来抓取屏幕(AVFormatContext * pFormatCtx;在我的代码中)和单独的格式上下文来编写所需的视频文件(AVFormatContext * outFormatContextEncoded;)。从输入流中读取数据包(屏幕抓取流)我直接将写入数据包编码到输出文件中,如我的代码所示。我保留了SDL代码,这样我就可以看到我正在录制的内容。我的代码是我修改后的write_video_frame( )功能。

代码构建正常,但vlc无法播放输出视频。当我运行命令

ffmpeg -i filename.mpg

我得到了这个输出

[mpeg @ 003fed20] probed stream 0 failed
[mpeg @ 003fed20] Stream #0: not enough frames to estimate rate; consider increasing probesize
[mpeg @ 003fed20] Could not find codec parameters for stream 0 (Video: none): unknown codec
Consider increasing the value for the 'analyzeduration' and 'probesize' options
karamage.mpg: could not find codec parameters
Input #0, mpeg, from 'karamage.mpg':
  Duration: 19:30:09.25, start: 37545.438756, bitrate: 2 kb/s
    Stream #0:0[0x1e0]: Video: none, 90k tbr, 90k tbn
At least one output file must be specified

我在这里做错了吗?我是ffmpeg的新手,对此有任何指导都非常感谢。谢谢你的时间。

int main(int argc, char* argv[])
{

    AVFormatContext *pFormatCtx;

    int             i, videoindex;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;

    av_register_all();
    avformat_network_init();

    //Localy defined structure.
    OutputStream outVideoStream = { 0 };

    const char *filename;
    AVOutputFormat *outFormatEncoded;
    AVFormatContext *outFormatContextEncoded;
    AVCodec *videoCodec;

    filename="karamage.mpg";

    int ret1;

    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;


    AVDictionary *opt = NULL;



    //ASSIGN STH TO THE FORMAT CONTEXT.
    pFormatCtx = avformat_alloc_context();

    //
    //Use this when opening a local file.
    //char filepath[]="src01_480x272_22.h265";
    //avformat_open_input(&pFormatCtx,filepath,NULL,NULL)

    //Register Device
    avdevice_register_all();

    //Use gdigrab
    AVDictionary* options = NULL;
    //Set some options
    //grabbing frame rate
    //av_dict_set(&options,"framerate","5",0);
    //The distance from the left edge of the screen or desktop
    //av_dict_set(&options,"offset_x","20",0);
    //The distance from the top edge of the screen or desktop
    //av_dict_set(&options,"offset_y","40",0);
    //Video frame size. The default is to capture the full screen
    //av_dict_set(&options,"video_size","640x480",0);
    AVInputFormat *ifmt=av_find_input_format("gdigrab");
    if(avformat_open_input(&pFormatCtx,"desktop",ifmt,&options)!=0){
        printf("Couldn't open input stream.\n");
        return -1;
    }

    if(avformat_find_stream_info(pFormatCtx,NULL)<0)
    {
        printf("Couldn't find stream information.\n");
        return -1;
    }
    videoindex=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++) 
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
        {
            videoindex=i;
            break;
        }
    if(videoindex==-1)
    {
        printf("Didn't find a video stream.\n");
        return -1;
    }
    pCodecCtx=pFormatCtx->streams[videoindex]->codec;
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL)
    {
        printf("Codec not found.\n");
        return -1;
    }
    if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
    {
        printf("Could not open codec.\n");
        return -1;
    }


    AVFrame *pFrame,*pFrameYUV;
    pFrame=avcodec_alloc_frame();
    pFrameYUV=avcodec_alloc_frame();

    //PIX_FMT_YUV420P WHAT DOES THIS SAY ABOUT THE FORMAT??
    uint8_t *out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));


    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

    //<<<<<<<<<<<-------PREP WORK TO WRITE ENCODED VIDEO FILES-----

    avformat_alloc_output_context2(&outFormatContextEncoded, NULL, NULL, filename);
    if (!outFormatContextEncoded) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&outFormatContextEncoded, NULL, "mpeg", filename);
    }
    if (!outFormatContextEncoded)
        return 1;

    outFormatEncoded=outFormatContextEncoded->oformat;


     //THIS CREATES THE STREAMS(AUDIO AND VIDEO) ADDED TO OUR OUTPUT STREAM

    if (outFormatEncoded->video_codec != AV_CODEC_ID_NONE) {

        //YOUR VIDEO AND AUDIO PROPS ARE SET HERE.
        add_stream(&outVideoStream, outFormatContextEncoded, &videoCodec, outFormatEncoded->video_codec);
        have_video = 1;
        encode_video = 1;
    }


     // Now that all the parameters are set, we can open the audio and
     // video codecs and allocate the necessary encode buffers. 
    if (have_video)
        open_video(outFormatContextEncoded, videoCodec, &outVideoStream, opt);

     av_dump_format(outFormatContextEncoded, 0, filename, 1);


      /* open the output file, if needed */
    if (!(outFormatEncoded->flags & AVFMT_NOFILE)) {
        ret1 = avio_open(&outFormatContextEncoded->pb, filename, AVIO_FLAG_WRITE);
        if (ret1 < 0) {
            //fprintf(stderr, "Could not open '%s': %s\n", filename,
            //        av_err2str(ret));
            fprintf(stderr, "Could not open your dumb file.\n");
            return 1;
        }
    }


    /* Write the stream header, if any. */
    ret1 = avformat_write_header(outFormatContextEncoded, &opt);
    if (ret1 < 0) {
        //fprintf(stderr, "Error occurred when opening output file: %s\n",
         //       av_err2str(ret));
        fprintf(stderr, "Error occurred when opening output file\n");
        return 1;
    }


    //<<<<<<<<<<<-------PREP WORK TO WRITE ENCODED VIDEO FILES-----

    //SDL----------------------------
    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
        printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
        return -1;
    } 
    int screen_w=640,screen_h=360;
    const SDL_VideoInfo *vi = SDL_GetVideoInfo();
    //Half of the Desktop's width and height.
    screen_w = vi->current_w/2;
    screen_h = vi->current_h/2;
    SDL_Surface *screen; 
    screen = SDL_SetVideoMode(screen_w, screen_h, 0,0);

    if(!screen) {  
        printf("SDL: could not set video mode - exiting:%s\n",SDL_GetError());  
        return -1;
    }
    SDL_Overlay *bmp; 
    bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,SDL_YV12_OVERLAY, screen); 
    SDL_Rect rect;
    //SDL End------------------------
    int ret, got_picture;

    AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));

    //TRY TO INIT THE PACKET HERE
     av_init_packet(packet);


    //Output Information-----------------------------
    printf("File Information---------------------\n");
    av_dump_format(pFormatCtx,0,NULL,0);
    printf("-------------------------------------------------\n");

    struct SwsContext *img_convert_ctx;
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 
    //------------------------------
    //
    while(av_read_frame(pFormatCtx, packet)>=0)
    {

        if(packet->stream_index==videoindex)
        {
            //HERE WE DECODE THE PACKET INTO THE FRAME
            ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
            if(ret < 0)
            {
                printf("Decode Error.\n");
                return -1;
            }
            if(got_picture)
            {

                //THIS IS WHERE WE DO STH WITH THE FRAME WE JUST GOT FROM THE STREAM
                //FREE AREA--START
                //IN HERE YOU CAN WORK WITH THE FRAME OF THE PACKET.
                write_video_frame(outFormatContextEncoded, &outVideoStream,packet);


                //FREE AREA--END
                sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);


                SDL_LockYUVOverlay(bmp);
                bmp->pixels[0]=pFrameYUV->data[0];
                bmp->pixels[2]=pFrameYUV->data[1];
                bmp->pixels[1]=pFrameYUV->data[2];     
                bmp->pitches[0]=pFrameYUV->linesize[0];
                bmp->pitches[2]=pFrameYUV->linesize[1];   
                bmp->pitches[1]=pFrameYUV->linesize[2];
                SDL_UnlockYUVOverlay(bmp); 
                rect.x = 0;    
                rect.y = 0;    
                rect.w = screen_w;    
                rect.h = screen_h;  
                SDL_DisplayYUVOverlay(bmp, &rect); 
                //Delay 40ms----WHY THIS DELAY????
                SDL_Delay(40);
            }
        }
        av_free_packet(packet);
    }//THE LOOP TO PULL PACKETS FROM THE FORMAT CONTEXT ENDS HERE.

    //AFTER THE WHILE LOOP WE DO SOME CLEANING

    //av_read_pause(context);


     av_write_trailer(outFormatContextEncoded);
     close_stream(outFormatContextEncoded, &outVideoStream);
     if (!(outFormatContextEncoded->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(outFormatContextEncoded->pb);

    /* free the stream */
    avformat_free_context(outFormatContextEncoded);



    //STOP DOING YOUR CLEANING
    sws_freeContext(img_convert_ctx);



    SDL_Quit();

    av_free(out_buffer);
    av_free(pFrameYUV);
    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);

    return 0;
}



/*
 * encode one video frame and send it to the muxer
 * return 1 when encoding is finished, 0 otherwise
 */
static int write_video_frame(AVFormatContext *oc, OutputStream *ost,AVPacket * pkt11)
{
    int ret;
    AVCodecContext *c;
    AVFrame *frame;
    int got_packet = 0;

    c = ost->st->codec;

    //DO NOT NEED THIS FRAME.
    //frame = get_video_frame(ost);

    if (oc->oformat->flags & AVFMT_RAWPICTURE) {

        //IGNORE THIS FOR A MOMENT
        /* a hack to avoid data copy with some raw video muxers */
        AVPacket pkt;
        av_init_packet(&pkt);

        if (!frame)
            return 1;

        pkt.flags        |= AV_PKT_FLAG_KEY;
        pkt.stream_index  = ost->st->index;
        pkt.data          = (uint8_t *)frame;
        pkt.size          = sizeof(AVPicture);

        pkt.pts = pkt.dts = frame->pts;
        av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);

        ret = av_interleaved_write_frame(oc, &pkt);
    } else {

            ret = write_frame(oc, &c->time_base, ost->st, pkt11);

    }

    if (ret < 0) {
        fprintf(stderr, "Error while writing video frame: %s\n");
        exit(1);
    }


    return 1;
}

1 个答案:

答案 0 :(得分:0)

原来我遇到了这里讨论的问题:使用avcodec_encode_video2函数Segmentation fault while avcodec_encode_video2

修改后的完整代码如下。它可以录制并保存到磁盘但它有一些问题。录制的视频有点扭曲,不清楚,我仍在调查可能的原因。我也扯掉了SDL部分因为我不再需要它了。对此非常欢迎!

代码:

extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"


#include <libavutil/opt.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
//SDL
#include "SDL.h"
#include "SDL_thread.h"
}

//Output YUV420P
#define OUTPUT_YUV420P 0
//'1' Use Dshow
//'0' Use GDIgrab
#define USE_DSHOW 0

int main(int argc, char* argv[])
{

    //1.WE HAVE THE FORMAT CONTEXT
    //THIS IS FROM THE DESKTOP GRAB STREAM.
    AVFormatContext *pFormatCtx;
    int             i, videoindex;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;

    av_register_all();
    avformat_network_init();

    //ASSIGN STH TO THE FORMAT CONTEXT.
    pFormatCtx = avformat_alloc_context();

    //Register Device
    avdevice_register_all();
    //Windows
#ifdef _WIN32
#if USE_DSHOW
    //Use dshow
    //
    //Need to Install screen-capture-recorder
    //screen-capture-recorder
    //Website: http://sourceforge.net/projects/screencapturer/
    //
    AVInputFormat *ifmt=av_find_input_format("dshow");
    //if(avformat_open_input(&pFormatCtx,"video=screen-capture-recorder",ifmt,NULL)!=0){
    if(avformat_open_input(&pFormatCtx,"video=UScreenCapture",ifmt,NULL)!=0){
        printf("Couldn't open input stream.\n");
        return -1;
    }
#else
    //Use gdigrab
    AVDictionary* options = NULL;
    //Set some options
    //grabbing frame rate
    //av_dict_set(&options,"framerate","5",0);
    //The distance from the left edge of the screen or desktop
    //av_dict_set(&options,"offset_x","20",0);
    //The distance from the top edge of the screen or desktop
    //av_dict_set(&options,"offset_y","40",0);
    //Video frame size. The default is to capture the full screen
    //av_dict_set(&options,"video_size","640x480",0);
    AVInputFormat *ifmt=av_find_input_format("gdigrab");
    if(avformat_open_input(&pFormatCtx,"desktop",ifmt,&options)!=0){
        printf("Couldn't open input stream.\n");
        return -1;
    }

#endif
#endif//FOR THE WIN32 THING.

    if(avformat_find_stream_info(pFormatCtx,NULL)<0)
    {
        printf("Couldn't find stream information.\n");
        return -1;
    }
    videoindex=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type
                ==AVMEDIA_TYPE_VIDEO)
        {
            videoindex=i;
            break;
        }
    if(videoindex==-1)
    {
        printf("Didn't find a video stream.\n");
        return -1;
    }
    pCodecCtx=pFormatCtx->streams[videoindex]->codec;
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL)
    {
        printf("Codec not found.\n");
        return -1;
    }
    if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
    {
        printf("Could not open codec.\n");
        return -1;
    }

    //THIS IS WHERE YOU CONTROL THE FORMAT(THROUGH FRAMES).
    AVFrame *pFrame;//,*pFrameYUV;

    pFrame=av_frame_alloc();

    int ret, got_picture;

    AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));

    //TRY TO INIT THE PACKET HERE
     av_init_packet(packet);


    //Output Information-----------------------------
    printf("File Information---------------------\n");
    av_dump_format(pFormatCtx,0,NULL,0);
    printf("-------------------------------------------------\n");


//<<--FOR WRITING MPG FILES
    //<<--START:PREPARE TO WRITE YOUR MPG FILE.

    const char * filename="test.mpg";
    int codec_id=   AV_CODEC_ID_MPEG1VIDEO;


    AVCodec *codec11;
    AVCodecContext *outContext= NULL;
    int got_output;
    FILE *f;
    AVPacket pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

    printf("Encode video file %s\n", filename);

    /* find the mpeg1 video encoder */
    codec11 = avcodec_find_encoder((AVCodecID)codec_id);
    if (!codec11) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    outContext = avcodec_alloc_context3(codec11);
    if (!outContext) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    /* put sample parameters */
    outContext->bit_rate = 400000;
    /* resolution must be a multiple of two */
    outContext->width = 352;
    outContext->height = 288;


    /* frames per second */
    outContext->time_base.num=1;
    outContext->time_base.den=25;

    /* emit one intra frame every ten frames
     * check frame pict_type before passing frame
     * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
     * then gop_size is ignored and the output of encoder
     * will always be I frame irrespective to gop_size
     */
    outContext->gop_size = 10;
    outContext->max_b_frames = 1;
    outContext->pix_fmt = AV_PIX_FMT_YUV420P;

    if (codec_id == AV_CODEC_ID_H264)
        av_opt_set(outContext->priv_data, "preset", "slow", 0);

    /* open it */
    if (avcodec_open2(outContext, codec11, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }


    AVFrame *outframe = av_frame_alloc();
    int nbytes = avpicture_get_size(outContext->pix_fmt,
                                   outContext->width,
                                   outContext->height);

    uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);

   //ASSOCIATE THE FRAME TO THE ALLOCATED BUFFER.
    avpicture_fill((AVPicture*)outframe, outbuffer,
                   AV_PIX_FMT_YUV420P,
                   outContext->width, outContext->height);

    SwsContext* swsCtx_ ;
    swsCtx_= sws_getContext(pCodecCtx->width,
                            pCodecCtx->height,
                            pCodecCtx->pix_fmt,
                            outContext->width, outContext->height,
                            outContext->pix_fmt,
                            SWS_BICUBIC, NULL, NULL, NULL);


    //HERE WE START PULLING PACKETS FROM THE SPECIFIED FORMAT CONTEXT.
    while(av_read_frame(pFormatCtx, packet)>=0)
    {
        if(packet->stream_index==videoindex)
        {
            ret= avcodec_decode_video2(pCodecCtx,
                                         pFrame,
                                         &got_picture,packet );
            if(ret < 0)
            {
                printf("Decode Error.\n");
                return -1;
            }
            if(got_picture)
            {

            sws_scale(swsCtx_, pFrame->data, pFrame->linesize,
                  0, pCodecCtx->height, outframe->data,
                  outframe->linesize);


            av_init_packet(&pkt);
            pkt.data = NULL;    // packet data will be allocated by the encoder
            pkt.size = 0;


            ret = avcodec_encode_video2(outContext, &pkt, outframe, &got_output);
            if (ret < 0) {
               fprintf(stderr, "Error encoding frame\n");
               exit(1);
              }

            if (got_output) {
                printf("Write frame %3d (size=%5d)\n", i, pkt.size);
                fwrite(pkt.data, 1, pkt.size, f);
                av_free_packet(&pkt);
               }

            }
        }

        av_free_packet(packet);
    }//THE LOOP TO PULL PACKETS FROM THE FORMAT CONTEXT ENDS HERE.

    /* add sequence end code to have a real mpeg file */
    fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);

    avcodec_close(outContext);
    av_free(outContext);
    //av_freep(&frame->data[0]);
    //av_frame_free(&frame);

    //THIS WAS ADDED LATER
    av_free(outbuffer);

    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);

    return 0;
}