FFmpeg avcodec_decode_video2将RTSP H264 HD视频数据包解码为错误的视频图像

时间:2018-05-28 15:45:04

标签: ffmpeg h.264

我使用FFmpegversion 4.0来创建简单的C ++程序,这是一个从IP摄像头接收RTSP H264视频数据并在程序窗口中显示它的线程。

此主题的代码如下:

DWORD WINAPI GrabbProcess(LPVOID lpParam)
// Grabbing thread
{
  DWORD i;
  int ret = 0, nPacket=0;
  FILE *pktFile;
  // Open video file
  pFormatCtx = avformat_alloc_context();
  if(avformat_open_input(&pFormatCtx, nameVideoStream, NULL, NULL)!=0)
      fGrabb=-1; // Couldn't open file
  else
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
      fGrabb=-2; // Couldn't find stream information
  else
  {
      // Find the first video stream
      videoStream=-1;
      for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
        {
          videoStream=i;
          break;
        }
      if(videoStream==-1)
          fGrabb=-3; // Didn't find a video stream
      else
      {
          // Get a pointer to the codec context for the video stream
          pCodecCtxOrig=pFormatCtx->streams[videoStream]->codec;
          // Find the decoder for the video stream
          pCodec=avcodec_find_decoder(pCodecCtxOrig->codec_id);
          if(pCodec==NULL)
              fGrabb=-4; // Codec not found
          else
          {
              // Copy context
              pCodecCtx = avcodec_alloc_context3(pCodec);
              if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0)
                  fGrabb=-5; // Error copying codec context
              else
              {
                  // Open codec
                  if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
                      fGrabb=-6; // Could not open codec
                  else
                  // Allocate video frame for input
                  pFrame=av_frame_alloc();
                  // Determine required buffer size and allocate buffer
                  numBytes=avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width,
                      pCodecCtx->height);
                  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
                  // Assign appropriate parts of buffer to image planes in pFrame
                  // Note that pFrame is an AVFrame, but AVFrame is a superset
                  // of AVPicture
                  avpicture_fill((AVPicture *)pFrame, buffer, pCodecCtx->pix_fmt,
                      pCodecCtx->width, pCodecCtx->height);

                  // Allocate video frame for display
                  pFrameRGB=av_frame_alloc();
                  // Determine required buffer size and allocate buffer
                  numBytes=avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width,
                      pCodecCtx->height);
                  bufferRGB=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
                  // Assign appropriate parts of buffer to image planes in pFrameRGB
                  // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
                  // of AVPicture
                  avpicture_fill((AVPicture *)pFrameRGB, bufferRGB, AV_PIX_FMT_RGB24,
                      pCodecCtx->width, pCodecCtx->height);
                  // initialize SWS context for software scaling to FMT_RGB24
                  sws_ctx_to_RGB = sws_getContext(pCodecCtx->width,
                      pCodecCtx->height,
                      pCodecCtx->pix_fmt,
                      pCodecCtx->width,
                      pCodecCtx->height,
                      AV_PIX_FMT_RGB24,
                      SWS_BILINEAR,
                      NULL,
                      NULL,
                      NULL);

                  // Allocate video frame (grayscale YUV420P) for processing
                  pFrameYUV=av_frame_alloc();
                  // Determine required buffer size and allocate buffer
                  numBytes=avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width,
                      pCodecCtx->height);
                  bufferYUV=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
                  // Assign appropriate parts of buffer to image planes in pFrameYUV
                  // Note that pFrameYUV is an AVFrame, but AVFrame is a superset
                  // of AVPicture
                  avpicture_fill((AVPicture *)pFrameYUV, bufferYUV, AV_PIX_FMT_YUV420P,
                      pCodecCtx->width, pCodecCtx->height);
                  // initialize SWS context for software scaling to FMT_YUV420P
                  sws_ctx_to_YUV = sws_getContext(pCodecCtx->width,
                      pCodecCtx->height,
                      pCodecCtx->pix_fmt,
                      pCodecCtx->width,
                      pCodecCtx->height,
                      AV_PIX_FMT_YUV420P,
                      SWS_BILINEAR,
                      NULL,
                      NULL,
                      NULL);
                RealBsqHdr.biWidth = pCodecCtx->width;
                RealBsqHdr.biHeight = -pCodecCtx->height;
              }
          }
      }
  }
  while ((fGrabb==1)||(fGrabb==100))
  {
      // Grabb a frame
      if (av_read_frame(pFormatCtx, &packet) >= 0)
      {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream)
        {
            // Decode video frame
            int len = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
            nPacket++;
            // Did we get a video frame?
            if(frameFinished)
            {
                // Convert the image from its native format to YUV
                sws_scale(sws_ctx_to_YUV, (uint8_t const * const *)pFrame->data,
                    pFrame->linesize, 0, pCodecCtx->height,
                    pFrameYUV->data, pFrameYUV->linesize);
                // Convert the image from its native format to RGB
                sws_scale(sws_ctx_to_RGB, (uint8_t const * const *)pFrame->data,
                    pFrame->linesize, 0, pCodecCtx->height,
                    pFrameRGB->data, pFrameRGB->linesize);
                HDC hdc=GetDC(hWndM);
                SetDIBitsToDevice(hdc, 0, 0, pCodecCtx->width, pCodecCtx->height,
                    0, 0, 0, pCodecCtx->height,pFrameRGB->data[0], (LPBITMAPINFO)&RealBsqHdr, DIB_RGB_COLORS);
                ReleaseDC(hWndM,hdc);
                av_frame_unref(pFrame);
            }
        }
        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
      }
   }
   // Free the org frame
  av_frame_free(&pFrame);
  // Free the RGB frame
  av_frame_free(&pFrameRGB);
  // Free the YUV frame
  av_frame_free(&pFrameYUV);

  // Close the codec
  avcodec_close(pCodecCtx);
  avcodec_close(pCodecCtxOrig);

  // Close the video file
  avformat_close_input(&pFormatCtx);
  avformat_free_context(pFormatCtx);

  if (fGrabb==1)
      sprintf(tmpstr,"Grabbing Completed %d frames", nCntTotal);
  else if (fGrabb==2)
      sprintf(tmpstr,"User break on %d frames", nCntTotal);
  else if (fGrabb==3)
      sprintf(tmpstr,"Can't Grabb at frame %d", nCntTotal);
  else if (fGrabb==-1)
      sprintf(tmpstr,"Couldn't open file");
  else if (fGrabb==-2)
      sprintf(tmpstr,"Couldn't find stream information");
  else if (fGrabb==-3)
      sprintf(tmpstr,"Didn't find a video stream");
  else if (fGrabb==-4)
      sprintf(tmpstr,"Codec not found");
  else if (fGrabb==-5)
      sprintf(tmpstr,"Error copying codec context");
  else if (fGrabb==-6)
      sprintf(tmpstr,"Could not open codec");
  i=(UINT) fGrabb;
  fGrabb=0;
  SetWindowText(hWndM,tmpstr);
  ExitThread(i);
  return 0;
}
// End Grabbing thread  

当节目收到分辨率为RTSP H264的{​​{1}}视频数据时,解码的视频图片就可以了。当收到分辨率为704x576的{​​{1}}高清视频数据时,看起来第一个视频图片被解码为OK,然后视频图片被解码但总是出现一些错误。

请帮我解决这个问题!

以下是问题简述:
我有一个IP摄像头模型RTSP H264(中国的产品) Camera可以分辨率704x576(RTSP URI“rtsp://192.168.1.18:554 / user = admin_password = tlJwpbo6_channel = 1_stream = 1.sdp?real_stream”)或1280x720(RTSP URI“rtsp:/”)提供H264视频流/192.168.1.18:554/user=admin_password=tlJwpbo6_channel=1_stream=0.sdp?real_stream“)。
使用1280x720实用程序,我可以访问并以良好的图像质量显示它们 为了测试从这台相机获取,我在VC-2005中有一个简单的(上面提到的)程序。在“Grabbing thread”程序中,使用HI3518E_50H10L_S39库版本4.0打开相机RTSP流,检索流信息,查找第一个视频流......并准备一些变量。
循环的中心是循环:抓取一个帧(函数FFplay) - 如果是视频(函数FFmpeg)解码它 - 转换为RGB格式(函数av_read_frame) - 显示到程序窗口(GDI函数avcodec_decode_video2)。
当proram使用分辨率为704x576的相机RTSP流运行时,我有很好的视频图像。这是一个例子:
704x576 sample
当程序使用分辨率为1280x720的相机RTSP流运行时,第一个视频图像是好的:
First good at res.1280x720
但后来不好:
not good at res.1280x720
它似乎是我对sws_scale的FFmpeg函数调用由于某些原因无法完全解码某些数据包。

0 个答案:

没有答案