Android MediaCodec向后寻求

时间:2016-07-18 08:15:01

标签: android mediacodec seek mediaextractor grafika

我正在尝试使用' how are you 'MediaCodec实现对视频的精确搜索。通过跟随格拉菲卡的MoviePlayer,我成功地实现了前锋搜寻。然而,我仍然有向后寻求的问题。相关的代码位在这里:

MediaExtractor

基本上,它与MoviePlayer的public void seekBackward(long position){ final int TIMEOUT_USEC = 10000; int inputChunk = 0; long firstInputTimeNsec = -1; boolean outputDone = false; boolean inputDone = false; mExtractor.seekTo(position, MediaExtractor.SEEK_TO_PREVIOUS_SYNC); Log.d("TEST_MEDIA", "sampleTime: " + mExtractor.getSampleTime()/1000 + " -- position: " + position/1000 + " ----- BACKWARD"); while (mExtractor.getSampleTime() < position && position >= 0) { if (VERBOSE) Log.d(TAG, "loop"); if (mIsStopRequested) { Log.d(TAG, "Stop requested"); return; } // Feed more data to the decoder. if (!inputDone) { int inputBufIndex = mDecoder.dequeueInputBuffer(TIMEOUT_USEC); if (inputBufIndex >= 0) { if (firstInputTimeNsec == -1) { firstInputTimeNsec = System.nanoTime(); } ByteBuffer inputBuf = mDecoderInputBuffers[inputBufIndex]; // Read the sample data into the ByteBuffer. This neither respects nor // updates inputBuf's position, limit, etc. int chunkSize = mExtractor.readSampleData(inputBuf, 0); if (chunkSize < 0) { // End of stream -- send empty frame with EOS flag set. mDecoder.queueInputBuffer(inputBufIndex, 0, 0, 0L, MediaCodec.BUFFER_FLAG_END_OF_STREAM); inputDone = true; if (VERBOSE) Log.d(TAG, "sent input EOS"); } else { if (mExtractor.getSampleTrackIndex() != mTrackIndex) { Log.w(TAG, "WEIRD: got sample from track " + mExtractor.getSampleTrackIndex() + ", expected " + mTrackIndex); } long presentationTimeUs = mExtractor.getSampleTime(); mDecoder.queueInputBuffer(inputBufIndex, 0, chunkSize, presentationTimeUs, 0 /*flags*/); if (VERBOSE) { Log.d(TAG, "submitted frame " + inputChunk + " to dec, size=" + chunkSize); } inputChunk++; mExtractor.advance(); } } else { if (VERBOSE) Log.d(TAG, "input buffer not available"); } } if (!outputDone) { int decoderStatus = mDecoder.dequeueOutputBuffer(mBufferInfo, TIMEOUT_USEC); if (decoderStatus == MediaCodec.INFO_TRY_AGAIN_LATER) { // no output available yet if (VERBOSE) Log.d(TAG, "no output from decoder available"); } else if (decoderStatus == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) { // not important for us, since we're using Surface if (VERBOSE) Log.d(TAG, "decoder output buffers changed"); } else if (decoderStatus == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) { MediaFormat newFormat = mDecoder.getOutputFormat(); if (VERBOSE) Log.d(TAG, "decoder output format changed: " + newFormat); } else if (decoderStatus < 0) { throw new RuntimeException( "unexpected result from decoder.dequeueOutputBuffer: " + decoderStatus); } else { // decoderStatus >= 0 if (firstInputTimeNsec != 0) { // Log the delay from the first buffer of input to the first buffer // of output. long nowNsec = System.nanoTime(); Log.d(TAG, "startup lag " + ((nowNsec-firstInputTimeNsec) / 1000000.0) + " ms"); firstInputTimeNsec = 0; } boolean doLoop = false; if (VERBOSE) Log.d(TAG, "surface decoder given buffer " + decoderStatus + " (size=" + mBufferInfo.size + ")"); if ((mBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) { if (VERBOSE) Log.d(TAG, "output EOS"); if (mLoop) { doLoop = true; } else { outputDone = true; } } boolean doRender = (mBufferInfo.size != 0); // As soon as we call releaseOutputBuffer, the buffer will be forwarded // to SurfaceTexture to convert to a texture. We can't control when it // appears on-screen, but we can manage the pace at which we release // the buffers. if (doRender && mFrameCallback != null) { mFrameCallback.preRender(mBufferInfo.presentationTimeUs); } mDecoder.releaseOutputBuffer(decoderStatus, doRender); doRender = false; if (doRender && mFrameCallback != null) { mFrameCallback.postRender(); } if (doLoop) { Log.d(TAG, "Reached EOS, looping"); mExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC); inputDone = false; mDecoder.flush(); // reset decoder state mFrameCallback.loopReset(); } } } } } 方法相同。我只是添加一个小修改来寻找回到前一个关键帧,而不是向前解码到我想要的位置。我也跟随着fadden的suggestion here但收效甚微。

另一方面的问题,据我所知,ExoPlayer是基于doExtract构建的,那么为什么它可以播放iOS录制的视频,而MoviePlayer的纯MediaCodec实现不可以?

1 个答案:

答案 0 :(得分:3)

好的,这就是我解决问题的方法,基本上我误解了fadden对render标志的评论。问题不在于解码,而是仅显示最接近搜索位置的最后一个缓冲区。我是这样做的:

if (Math.abs(position - mExtractor.getSampleTime()) < 10000) {
   mDecoder.releaseOutputBuffer(decoderStatus, true);
} else {
   mDecoder.releaseOutputBuffer(decoderStatus, false);
}

这是一个非常糟糕的方式来解决这个问题。优雅的方法应该是保存最后一个输出缓冲区并将其显示在while循环之外但我真的不知道如何访问输出缓冲区以便我可以将它保存到临时缓冲区。

修改

这是一种不那么强硬的方式。基本上,我们只需要计算关键帧和搜索位置之间的总帧数,然后我们只需要显示最接近搜索位置的1或2帧。像这样:

    mExtractor.seekTo(position, MediaExtractor.SEEK_TO_PREVIOUS_SYNC);
    int stopPosition = getStopPosition(mExtractor.getSampleTime(), position);
    int count = 0;

    while (mExtractor.getSampleTime() < position && mExtractor.getSampleTime() != -1 && position >= 0) {
    ....

        if(stopPosition - count < 2) { //just to make sure we will get something (1 frame sooner), see getStopPosition comment
           mDecoder.releaseOutputBuffer(decoderStatus, true);
        }else{
           mDecoder.releaseOutputBuffer(decoderStatus, false);
        }
        count++;
     ...
    }

/**
 * Calculate how many frame in between the key frame and the seeking position
 * so that we can determine how many while loop will be execute, then we can just
 * need to stop the loop 2 or 3 frames sooner to ensure we can get something.
 * */
private int getStopPosition(long start, long end){
    long delta = end - start;
    float framePerMicroSecond = mFPS / 1000000;

    return (int)(delta * framePerMicroSecond);
}