我正在尝试使用ffmpeg将音频数据包写入文件。源设备在一段时间后发送数据包。例如
First packet has a time stamp 00:00:00
Second packet has a time stamp 00:00:00.5000000
Third packet has a time stamp 00:00:01
And so on...
每秒意味着两个数据包。
我想对这些数据包进行编码并写入文件。
我指的是链接Muxing.c
中的Ffmpeg示例编码和写入时没有错误。但输出文件的音频持续时间仅为2秒,速度也非常快。
视频帧根据设置正确。
我认为这个问题与pts,dts和数据包持续时间的计算有关。
我应该如何计算pts,dts和持续时间的正确值。或者这个问题与其他事情有关吗?
代码:
void AudioWriter::WriteAudioChunk(IntPtr chunk, int lenght, TimeSpan timestamp)
{
int buffer_size = av_samples_get_buffer_size(NULL, outputStream->tmp_frame->channels, outputStream->tmp_frame->nb_samples, outputStream->AudioStream->codec->sample_fmt, 0);
uint8_t *audioData = reinterpret_cast<uint8_t*>(static_cast<void*>(chunk));
int ret = avcodec_fill_audio_frame(outputStream->tmp_frame,outputStream->Channels, outputStream->AudioStream->codec->sample_fmt, audioData, buffer_size, 1);
if (!ret)
throw gcnew System::IO::IOException("A audio file was not opened yet.");
write_audio_frame(outputStream->FormatContext, outputStream, audioData);
}
static int write_audio_frame(AVFormatContext *oc, AudioWriterData^ ost, uint8_t *audioData)
{
AVCodecContext *c;
AVPacket pkt = { 0 };
int ret;
int got_packet;
int dst_nb_samples;
av_init_packet(&pkt);
c = ost->AudioStream->codec;
AVFrame *frame = ost->tmp_frame;
if (frame)
{
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples, c->sample_rate, c->sample_rate, AV_ROUND_UP);
if (dst_nb_samples != frame->nb_samples)
throw gcnew Exception("dst_nb_samples != frame->nb_samples");
ret = av_frame_make_writable(ost->AudioFrame);
if (ret < 0)
throw gcnew Exception("Unable to make writable.");
ret = swr_convert(ost->swr_ctx, ost->AudioFrame->data, dst_nb_samples, (const uint8_t **)frame->data, frame->nb_samples);
if (ret < 0)
throw gcnew Exception("Unable to convert to destination format.");
frame = ost->AudioFrame;
AVRational timebase = { 1, c->sample_rate };
frame->pts = av_rescale_q(ost->samples_count, timebase, c->time_base);
ost->samples_count += dst_nb_samples;
}
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (ret < 0)
throw gcnew Exception("Error encoding audio frame.");
if (got_packet)
{
ret = write_frame(oc, &c->time_base, ost->AudioStream, &pkt);
if (ret < 0)
throw gcnew Exception("Audio is not written.");
}
else
throw gcnew Exception("Audio packet encode failed.");
return (ost->AudioFrame || got_packet) ? 0 : 1;
}
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
av_packet_rescale_ts(pkt, *time_base, st->time_base);
pkt->stream_index = st->index;
return av_interleaved_write_frame(fmt_ctx, pkt);
}