解码Speex音频并使用AudioQueues播放解码的PCM数据

时间:2014-04-17 08:11:11

标签: ios audioqueueservices speex

我正在以Speex格式获取音频流数据。我尝试为Speex解码为Speex编译ffmpeg。编译ffmpeg时遇到错误,可以在this链接中看到。所以我使用了另一个使用this框架来解码Speex音频的选项。我正在播放解码数据,即使用音频队列的PCM数据。以下是解码Speex数据和播放PCM数据的代码。

SpeexPlayer.h

#import <Foundation/Foundation.h>
#import <AVFoundation/AVFoundation.h>
#import <AudioToolbox/AudioToolbox.h>
#import "RtmpMediaStream.h"
#import "RtmpAVUtils.h"
#import <Speex/speex.h>
#import <Speex/speex_stereo.h>
#import <Speex/speex_callbacks.h>

typedef enum kAudioState {
    AUDIO_STATE_PAUSE,
    AUDIO_STATE_STOP,
    AUDIO_STATE_PLAYING,
    AUDIO_STATE_READY
} kAudioState;
typedef SInt16 AudioSampleType;
#define kNumAQBufs 3
#define kAudioBufferSeconds 3
@interface SpeexPlayer : RtmpAVUtils<RtmpMediaStreamAudioDataDelegate>{

    SpeexBits bits;
    void *dec_state;
    BOOL started;
    int frame_size;
    kAudioState state;
    AudioQueueRef audioQueue;
    AudioQueueBufferRef audioQueueBuffer;

}
- (id)initWithStream:(RtmpMediaStream *)stream;
- (void)audioQueueOutputCallback:(AudioQueueRef)inAQ inBuffer:(AudioQueueBufferRef)inBuffer;
- (void)audioQueueIsRunningCallback;
@end

SpeexPlayer.m

#import "SpeexPlayer.h"
#import "RtmpConstants.h"
#import "RtmpMediaStream.h"
#import "RtmpPacket.h"
int sample_rate = 11025;
int _lastTimestamp = 0;
BOOL isStarted = NO;
AudioTimeStamp tempTimeStamp;

void audioQueueOutputCallback(void *inClientData, AudioQueueRef inAQ,
                              AudioQueueBufferRef inBuffer) {
    SpeexPlayer *audioController = (__bridge SpeexPlayer*)inClientData;
    [audioController audioQueueOutputCallback:inAQ inBuffer:inBuffer];
}

void audioQueueIsRunningCallback(void *inClientData, AudioQueueRef inAQ,
                                 AudioQueuePropertyID inID) {

    SpeexPlayer *audioController = (__bridge SpeexPlayer*)inClientData;
    [audioController audioQueueIsRunningCallback];
}
@implementation SpeexPlayer

- (id)initWithStream:(RtmpMediaStream *)stream {
    if (self = [super initWithStream:stream]) {
        frame_size = 0;
        int enh = 1;
        started = NO;
        dec_state = speex_decoder_init(&speex_wb_mode);
        speex_decoder_ctl(dec_state, SPEEX_GET_FRAME_SIZE, &frame_size);
        speex_decoder_ctl(dec_state, SPEEX_SET_ENH, &enh);
        speex_decoder_ctl(dec_state, SPEEX_SET_SAMPLING_RATE, &sample_rate);
        speex_bits_init(&bits);
        AVAudioSession *audioSession = [AVAudioSession sharedInstance];
        [audioSession setCategory:AVAudioSessionCategoryPlayback error:nil];
        self.stream.audioDelegate = self;
    }
    return  self;
}
- (void)rtmpMediaStream:(RtmpMediaStream *)stream receivedAudioPacket:(RtmpPacket *)p {
    if (state != AUDIO_STATE_READY && state != AUDIO_STATE_PLAYING ) {
        [self createAudioQueue];
         [self startAudio];
    }
    static AVPacket packet;

    if (state != AUDIO_STATE_PLAYING && isStarted) {

    }
    isStarted = YES;
    if ([self encodePacket:p intoAVPacket:&packet]) {
        [self encodeRTMP:p intoQueueBuffer:audioQueueBuffer];
        av_free_packet(&packet);
        AudioTimeStamp bufferStartTime;
        AudioQueueGetCurrentTime(audioQueue, NULL, &bufferStartTime, NULL);
        bufferStartTime.mSampleTime += ((Float64)packet.dts)/1000;
        bufferStartTime.mFlags = kAudioTimeStampSampleTimeValid;
        tempTimeStamp.mSampleTime = bufferStartTime.mSampleTime - 5.0f;
        tempTimeStamp.mFlags = kAudioTimeStampSampleTimeValid;
        //NSLog(@"timestamp = %@",bufferStartTime);
        //[self enqueueBuffer:audioQueueBuffer withTimestamp:bufferStartTime];
       [self enqueueBuffer:audioQueueBuffer];
    }
}


- (BOOL)createAudioQueue {
    state = AUDIO_STATE_READY;
    AudioStreamBasicDescription audioStreamBasicDesc;
    audioStreamBasicDesc.mFormatID = kAudioFormatLinearPCM;
    audioStreamBasicDesc.mSampleRate = 8000;
    audioStreamBasicDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
    kLinearPCMFormatFlagIsPacked | kAudioFormatFlagsCanonical;
    audioStreamBasicDesc.mBytesPerPacket = 2;
    audioStreamBasicDesc.mFramesPerPacket = 1;
    audioStreamBasicDesc.mBytesPerFrame = sizeof(AudioSampleType);
    audioStreamBasicDesc.mChannelsPerFrame = 1 ;
    audioStreamBasicDesc.mBitsPerChannel = 8 * sizeof(AudioSampleType);
    audioStreamBasicDesc.mReserved = 0;
        OSStatus status = AudioQueueNewOutput(&audioStreamBasicDesc, audioQueueOutputCallback, (__bridge void*)self,
                                          NULL, NULL, 0, &audioQueue);
    if (status != noErr) {
        NSLog(@"Could not create new output.");
        return NO;
    }

    status = AudioQueueAddPropertyListener(audioQueue, kAudioQueueProperty_IsRunning,
                                           audioQueueIsRunningCallback, (__bridge void*)self);
    if (status != noErr) {
        NSLog(@"Could not add propery listener. (kAudioQueueProperty_IsRunning)");
        return NO;
    }
    status = AudioQueueAllocateBufferWithPacketDescriptions(audioQueue,
                                                                audioStreamBasicDesc.mSampleRate * kAudioBufferSeconds / 8,
                                                                audioStreamBasicDesc.mSampleRate * kAudioBufferSeconds / audioStreamBasicDesc.mFramesPerPacket + 1,
                                                                &audioQueueBuffer);
    //AudioQueueAllocateBufferWithPacketDescriptions(audioQueue, 400, 500, &audioQueueBuffer);
    //status = AudioQueueAllocateBuffer(audioQueue, audioStreamBasicDesc.mSampleRate * kAudioBufferSeconds / 8, &audioQueueBuffer);

    if (status != noErr) {
        NSLog(@"Could not allocate buffer.");
        return NO;
    }
    return YES;
}
- (void)startAudio {
    if (started) {
        AudioQueueStart(audioQueue, NULL);
    } else {
        [self startQueue];
    }
    state = AUDIO_STATE_PLAYING;
}

- (void)stopAudio{
    if (started) {
        AudioQueueStop(audioQueue, YES);
        state = AUDIO_STATE_STOP;
        if (!self.stream.videoDelegate) {
            [self.stream pause];
        }
    }
}

- (void)play {
    if (!self.stream.videoDelegate) {
        [self.stream play];
    }
}

- (void)pause {
    if (started) {
        state = AUDIO_STATE_PAUSE;
        AudioQueuePause(audioQueue);
        AudioQueueReset(audioQueue);
        if (!self.stream.videoDelegate) {
            [self.stream pause];
        }
    }
}
- (OSStatus)startQueue {
    OSStatus status = noErr;
    if (!started) {
        status = AudioQueueStart(audioQueue, NULL);
        if (status == noErr) {
            started = YES;
        }
        else {
            NSLog(@"Could not start audio queue.");
        }
    }
    return status;
}

- (void)removeAudioQueue {
    [self stopAudio];
    started = NO;
    AudioQueueFreeBuffer(audioQueue, audioQueueBuffer);
    AudioQueueDispose(audioQueue, YES);
}


-(AudioQueueBufferRef)encodeRTMP:(RtmpPacket*)packet intoQueueBuffer:(AudioQueueBufferRef)buffer {
    buffer->mAudioDataByteSize = 0;
    buffer->mPacketDescriptionCount = 0;
    NSData *data = packet.m_payload;
    NSData * subdata = [data subdataWithRange:NSMakeRange(1, data.length-1)];
    char *c = (char *)subdata.bytes;
    speex_bits_read_from(&bits, c,(int)subdata.length);
    short *outTemp = (short *)calloc(1, frame_size * 2);
    speex_decode_int(dec_state, &bits, outTemp);
    NSData *audioData = [NSData dataWithBytes:outTemp length:frame_size];
    if (buffer->mAudioDataBytesCapacity - buffer->mAudioDataByteSize >= audioData.length) {
        NSLog(@"true");
        memcpy((uint8_t *)buffer->mAudioData + buffer->mAudioDataByteSize, outTemp, audioData.length);
        buffer->mPacketDescriptions[buffer->mPacketDescriptionCount].mStartOffset = buffer->mAudioDataByteSize;
        buffer->mPacketDescriptions[buffer->mPacketDescriptionCount].mDataByteSize = (int)audioData.length;
        buffer->mPacketDescriptions[buffer->mPacketDescriptionCount].mVariableFramesInPacket = (int)audioData.length;
        buffer->mAudioDataByteSize += audioData.length;
        buffer->mPacketDescriptionCount++;
    }
    return buffer;
}

- (OSStatus)enqueueBuffer:(AudioQueueBufferRef)buffer{
           // withTimestamp:(AudioTimeStamp)timestamp {
    OSStatus status = noErr;

    NSLock * decodeLock = [[NSLock alloc] init];
    [decodeLock lock];

    if (buffer->mPacketDescriptionCount > 0) {
       //status = AudioQueueEnqueueBufferWithParameters(audioQueue, buffer, 0, NULL, 0, 0, 0, NULL, &timestamp, &timestamp);
       status = AudioQueueEnqueueBuffer(audioQueue, buffer, 0, NULL);
        if (status != noErr) {
            NSLog(@"Could not enqueue buffer.");
        }
    }
    else {
        AudioQueueStop(audioQueue, NO);
    }

    [decodeLock unlock];
    return status;
}
- (void)audioQueueOutputCallback:(AudioQueueRef)inAQ inBuffer:(AudioQueueBufferRef)inBuffer {
    if (state == AUDIO_STATE_PLAYING) {
        //[self enqueueBuffer:audioQueueBuffer withTimestamp:tempTimeStamp];
       [self enqueueBuffer:inBuffer ];
    }
}

- (void)audioQueueIsRunningCallback {
    UInt32 isRunning;
    UInt32 size = sizeof(isRunning);
    OSStatus status = AudioQueueGetProperty(audioQueue, kAudioQueueProperty_IsRunning, &isRunning, &size);
    if (status == noErr && !isRunning && state == AUDIO_STATE_PLAYING) {
        state = AUDIO_STATE_STOP;
    }
}

@end

在方法- (void)rtmpMediaStream:(RtmpMediaStream *)stream receivedAudioPacket:(RtmpPacket *)p和回调方法- (void)audioQueueOutputCallback:(AudioQueueRef)inAQ inBuffer:(AudioQueueBufferRef)inBuffer中收到音频流包时,我将音频队列缓冲区入队。当AudioQueue播放传入流时,我无法收听正确的音频。我所能听到的只是机器人声音,所有测试案例中音频的音调也非常有男子气概。我无法想象我哪里出错了。我无法理解问题是解码部分还是使用AudioQueues使用Audio播放音频。 有人可以提出解决方案吗?

0 个答案:

没有答案