使用 h264 将 .png 图像编码为磁盘上的文件

时间:2021-02-21 09:58:36

标签: ffmpeg c++ h.264 libav

有人可以帮我找出为什么我在将有效的 YUV 图像发送到编解码器的同时,磁盘上的文件只有 24 kb 并且无法通过 vlc 左右读取。我添加了 .h 和 .cpp 文件。直到“avcodec_receive_packet”一切似乎都没问题。函数调用“avcodec_send_frame”返回0,所以一定没问题,但“avcodec_receive_packet”返回-11。如果我刷新编码器(当前已注释),则“avcodec_receive_packet”返回 0,如果我将其存储在磁盘上,我可以看到编码数据。此外,编码器的输入图像也是正确的(当前已注释)并已检查。我的目标是帧内编码,所以我应该取回编码的帧数据,但即使我向它发送 24 个图像,我也没有取回任何东西。

.h 文件

#ifndef MOVIECODEC_H
#define MOVIECODEC_H

#include <cv.h>

extern "C"
{
    #include "Codec/include/libavcodec/avcodec.h"
    #include "Codec/include/libavdevice/avdevice.h"
    #include "Codec/include/libavformat/avformat.h"
    #include "Codec/include/libavutil/avutil.h"
    #include "Codec/include/libavutil/imgutils.h"
    #include "Codec/include/libswscale/swscale.h"
}


class MovieCodec
{
public:

    MovieCodec(const char *filename);

    ~MovieCodec();

    void encodeImage( const cv::Mat& image );

    void close();
    
private :

    void add_stream();

    void openVideoCodec();

    void write_video_frame(const cv::Mat& image);

    void createFrame( const cv::Mat& image );

private:

    static int s_frameCount;

    int m_timeVideo = 0;

    std::string m_filename;

    FILE* m_file;

    AVCodec* m_encoder = NULL;

    AVOutputFormat* m_outputFormat = NULL;

    AVFormatContext* m_formatCtx = NULL;

    AVCodecContext* m_codecCtx = NULL;

    AVStream* m_streamOut = NULL;

    AVFrame* m_frame = NULL;

    AVPacket* m_packet = NULL;

};

.cpp 文件

#ifndef MOVIECODEC_CPP
#define MOVIECODEC_CPP

#include "moviecodec.h"


#define STREAM_DURATION   5.0
#define STREAM_FRAME_RATE 24
#define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */


static int sws_flags = SWS_BICUBIC;
int MovieCodec::s_frameCount = 0;

MovieCodec::MovieCodec( const char* filename ) :
    m_filename( filename ),
    m_encoder( avcodec_find_encoder( AV_CODEC_ID_H264 ))
{
    av_log_set_level(AV_LOG_VERBOSE);

    int ret(0);

    m_file = fopen( m_filename.c_str(), "wb");

    // allocate the output media context
    ret = avformat_alloc_output_context2( &m_formatCtx, m_outputFormat, NULL, m_filename.c_str());

    if (!m_formatCtx)
        return;

    m_outputFormat = m_formatCtx->oformat;

    // Add the video stream using H264 codec
    add_stream();

    // Open video codec and allocate the necessary encode buffers
    if (m_streamOut)
        openVideoCodec();

    av_dump_format( m_formatCtx, 0, m_filename.c_str(), 1);

    // Open the output media file, if needed
    if (!( m_outputFormat->flags & AVFMT_NOFILE))
    {
        ret = avio_open( &m_formatCtx->pb, m_filename.c_str(), AVIO_FLAG_WRITE);

        if (ret < 0)
        {
            char error[255];
            ret = av_strerror( ret, error, 255);
            fprintf(stderr, "Could not open '%s': %s\n", m_filename.c_str(), error);
            return ;
        }
    }
    else
    {
        return;
    }

    m_formatCtx->flush_packets = 1;

    ret = avformat_write_header( m_formatCtx, NULL );

    if (ret < 0)
    {
        char error[255];
        av_strerror(ret, error, 255);
        fprintf(stderr, "Error occurred when opening output file: %s\n", error);
        return;
    }


    if ( m_frame )
           m_frame->pts = 0;
}



MovieCodec::~MovieCodec()
{
    close();
}



void MovieCodec::encodeImage(const cv::Mat &image)
{
    // Compute video time from last added video frame
    m_timeVideo = (double)m_frame->pts) * av_q2d(m_streamOut->time_base);

    // Stop media if enough time
    if (!m_streamOut /*|| m_timeVideo >= STREAM_DURATION*/)
       return;

    // Add a video frame
    write_video_frame( image );

    // Increase frame pts according to time base
    m_frame->pts += av_rescale_q(1, m_codecCtx->time_base, m_streamOut->time_base);
}


void MovieCodec::close()
{
    int ret( 0 );

    // Write media trailer
//    if( m_formatCtx )
//        ret = av_write_trailer( m_formatCtx );

    /* flush the encoder */
    ret = avcodec_send_frame(m_codecCtx, NULL);

    /* Close each codec. */
    if ( m_streamOut )
    {
        av_free( m_frame->data[0]);
        av_free( m_frame );
    }

    if (!( m_outputFormat->flags & AVFMT_NOFILE))
        /* Close the output file. */
        ret = avio_close( m_formatCtx->pb);


    /* free the stream */
    avformat_free_context( m_formatCtx );

    fflush( m_file );
}


void MovieCodec::createFrame( const cv::Mat& image )
{
    /**
     * \note allocate frame
     */
    m_frame = av_frame_alloc();
    m_frame->format = STREAM_PIX_FMT;
    m_frame->width = image.cols();
    m_frame->height = image.rows();
    m_frame->pict_type = AV_PICTURE_TYPE_I;
    int ret = av_image_alloc(m_frame->data, m_frame->linesize, m_frame->width,  m_frame->height, STREAM_PIX_FMT, 1);

    if (ret < 0)
    {
        return;
    }

    struct SwsContext* sws_ctx = sws_getContext((int)image.cols(), (int)image.rows(), AV_PIX_FMT_RGB24,
                                                (int)image.cols(), (int)image.rows(), STREAM_PIX_FMT, 0, NULL, NULL, NULL);

    const uint8_t* rgbData[1] = { (uint8_t* )image.getData() };
    int rgbLineSize[1] = { 3 * image.cols() };

    sws_scale(sws_ctx, rgbData, rgbLineSize, 0, image.rows(), m_frame->data, m_frame->linesize);
}


/* Add an output stream. */
void MovieCodec::add_stream()
{
    AVCodecID codecId = AV_CODEC_ID_H264;

    if (!( m_encoder ))
    {
        fprintf(stderr, "Could not find encoder for '%s'\n",
            avcodec_get_name(codecId));
        return;
    }

    // Get the stream for codec
    m_streamOut = avformat_new_stream(m_formatCtx, m_encoder);

    if (!m_streamOut) {
        fprintf(stderr, "Could not allocate stream\n");
        return;
    }

    m_streamOut->id = m_formatCtx->nb_streams - 1;

    m_codecCtx = avcodec_alloc_context3( m_encoder);

    m_streamOut->codecpar->codec_id = codecId;
    m_streamOut->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
    m_streamOut->codecpar->bit_rate = 400000;
    m_streamOut->codecpar->width = 800;
    m_streamOut->codecpar->height = 640;
    m_streamOut->codecpar->format = STREAM_PIX_FMT;
    m_streamOut->codecpar->codec_tag = 0x31637661;
    m_streamOut->codecpar->video_delay = 0;
    m_streamOut->time_base = { 1, STREAM_FRAME_RATE };


    avcodec_parameters_to_context( m_codecCtx, m_streamOut->codecpar);
    
    m_codecCtx->gop_size = 0; /* emit one intra frame every twelve frames at most */
    m_codecCtx->max_b_frames = 0;
    m_codecCtx->time_base = { 1, STREAM_FRAME_RATE };
    m_codecCtx->framerate = { STREAM_FRAME_RATE, 1 };
    m_codecCtx->pix_fmt = STREAM_PIX_FMT;



    if (m_streamOut->codecpar->codec_id == AV_CODEC_ID_H264)
    {
      av_opt_set( m_codecCtx, "preset", "ultrafast", 0 );
      av_opt_set( m_codecCtx, "vprofile", "baseline", 0 );
      av_opt_set( m_codecCtx, "tune", "zerolatency", 0 );
    }

//    /* Some formats want stream headers to be separate. */
//    if (m_formatCtx->oformat->flags & AVFMT_GLOBALHEADER)
//            m_codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;


}


void MovieCodec::openVideoCodec()
{
    int ret;

    /* open the codec */
    ret = avcodec_open2(m_codecCtx, m_encoder, NULL);

    if (ret < 0)
    {
        char error[255];
        av_strerror(ret, error, 255);
        fprintf(stderr, "Could not open video codec: %s\n", error);
    }
}



void MovieCodec::write_video_frame( const cv::Mat& image )
{
    int ret;

    createFrame( image );


    if (m_formatCtx->oformat->flags & 0x0020 )
    {
        /* Raw video case - directly store the picture in the packet */
        AVPacket pkt;
        av_init_packet(&pkt);

        pkt.flags |= AV_PKT_FLAG_KEY;
        pkt.stream_index = m_streamOut->index;
        pkt.data = m_frame->data[0];
        pkt.size = sizeof(AVPicture);

//        ret = av_interleaved_write_frame(m_formatCtx, &pkt);
        ret = av_write_frame( m_formatCtx, &pkt );
    }
    else
    {
        AVPacket pkt;
        av_init_packet(&pkt);

        /* encode the image */

//cv::Mat yuv420p( m_frame->height + m_frame->height/2, m_frame->width, CV_8UC1, m_frame->data[0]);
//cv::Mat cvmIm;
//cv::cvtColor(yuv420p,cvmIm,CV_YUV420p2BGR);
//cv::imwrite("c:\\tmp\\YUVoriginal.png", cvmIm);

        ret = avcodec_send_frame(m_codecCtx, m_frame);

        if (ret < 0)
        {
            char error[255];
            av_strerror(ret, error, 255);
            fprintf(stderr, "Error encoding video frame: %s\n", error);
            return;
        }

        /* If size is zero, it means the image was buffered. */
//        ret = avcodec_receive_packet(m_codecCtx, &pkt);

        do
        {
            ret = avcodec_receive_packet(m_codecCtx, &pkt);

            if (ret == 0)
            {
                ret = av_write_frame( m_formatCtx, &pkt );
                av_packet_unref(&pkt);

                break;
            }
//            else if ((ret < 0) && (ret != AVERROR(EAGAIN)))
//            {
//                return;
//            }
//            else if (ret == AVERROR(EAGAIN))
//            {
//                /* flush the encoder */
//                ret = avcodec_send_frame(m_codecCtx, NULL);
//
//                if (0 > ret)
//                    return;
//            }
        } while (ret == 0);

        if( !ret && pkt.size)
        {
            pkt.stream_index = m_streamOut->index;

            /* Write the compressed frame to the media file. */
//            ret = av_interleaved_write_frame(m_formatCtx, &pkt);
            ret = av_write_frame( m_formatCtx, &pkt );
        }
        else
        {
            ret = 0;
        }
    }

    if (ret != 0)
    {
        char error[255];
        av_strerror(ret, error, 255);
        fprintf(stderr, "Error while writing video frame: %s\n", error);
        return;
    }

    s_frameCount++;
}

0 个答案:

没有答案
相关问题