在iOS中同时录制和播放音频

时间:2012-02-26 01:00:26

标签: iphone ios avfoundation avaudioplayer avaudiorecorder

我正在尝试录制时同时播放录制的内容。目前我使用AVAudioRecorder进行录制,AVAudioPlayer进行播放。

当我试图同时播放内容时,没有播放任何内容。请找到我正在做的伪代码。

如果我在停止录制后做同样的事情,一切正常。

AVAudioRecorder *recorder;  //Initializing the recorder properly.
[recorder record];
NSError *error=nil;
NSUrl recordingPathUrl;     //Contains the recording path.
AVAudioPlayer *audioPlayer = [[AVAudioPlayer alloc] initWithContentsOfURL:recordingPathUrl 
                                                                    error:&error];
[audioPlayer  prepareToPlay];
[audioPlayer  play];

请问任何人让我知道你的想法或想法吗?

3 个答案:

答案 0 :(得分:5)

这是可以实现的,请使用这些链接并下载它: https://code.google.com/p/ios-coreaudio-example/downloads/detail?name=Aruts.zip&can=2&q=

此链接将播放扬声器的声音,但不会录制它,我已实现录制功能以下是完整的代码说明..

IN .h文件

#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>

#ifndef max
#define max( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif

#ifndef min
#define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif


@interface IosAudioController : NSObject {
    AudioComponentInstance audioUnit;
    AudioBuffer tempBuffer; // this will hold the latest data from the microphone
    ExtAudioFileRef             mAudioFileRef;
}
@property (readonly)ExtAudioFileRef        mAudioFileRef;
@property (readonly) AudioComponentInstance audioUnit;
@property (readonly) AudioBuffer tempBuffer;

- (void) start;
- (void) stop;
- (void) processAudio: (AudioBufferList*) bufferList;

@end

// setup a global iosAudio variable, accessible everywhere
extern IosAudioController* iosAudio;

IN .m

#import "IosAudioController.h"
#import <AudioToolbox/AudioToolbox.h>
#import <AVFoundation/AVFoundation.h>
#define kOutputBus 0
#define kInputBus 1

IosAudioController* iosAudio;

void checkStatus(int status){
    if (status) {
        printf("Status not 0! %d\n", status);
//      exit(1);
    }
}




static void printAudioUnitRenderActionFlags(AudioUnitRenderActionFlags * ioActionFlags)
{
    if (*ioActionFlags == 0) {

        printf("AudioUnitRenderActionFlags(%lu) ", *ioActionFlags);
        return;
    }
    printf("AudioUnitRenderActionFlags(%lu): ", *ioActionFlags);
    if (*ioActionFlags & kAudioUnitRenderAction_PreRender)              printf("kAudioUnitRenderAction_PreRender ");
    if (*ioActionFlags & kAudioUnitRenderAction_PostRender)             printf("kAudioUnitRenderAction_PostRender ");
    if (*ioActionFlags & kAudioUnitRenderAction_OutputIsSilence)        printf("kAudioUnitRenderAction_OutputIsSilence ");
    if (*ioActionFlags & kAudioOfflineUnitRenderAction_Preflight)       printf("kAudioOfflineUnitRenderAction_Prefli ght ");
    if (*ioActionFlags & kAudioOfflineUnitRenderAction_Render)          printf("kAudioOfflineUnitRenderAction_Render");
    if (*ioActionFlags & kAudioOfflineUnitRenderAction_Complete)        printf("kAudioOfflineUnitRenderAction_Complete ");
    if (*ioActionFlags & kAudioUnitRenderAction_PostRenderError)        printf("kAudioUnitRenderAction_PostRenderError ");
    if (*ioActionFlags & kAudioUnitRenderAction_DoNotCheckRenderArgs)   printf("kAudioUnitRenderAction_DoNotCheckRenderArgs ");
}


/**
 This callback is called when new audio data from the microphone is
 available.
 */
static OSStatus recordingCallback(void *inRefCon, 
                                  AudioUnitRenderActionFlags *ioActionFlags, 
                                  const AudioTimeStamp *inTimeStamp, 
                                  UInt32 inBusNumber, 
                                  UInt32 inNumberFrames, 
                                  AudioBufferList *ioData) {

    double timeInSeconds = inTimeStamp->mSampleTime / 44100.00;

     printf("\n%fs inBusNumber: %lu inNumberFrames: %lu ", timeInSeconds, inBusNumber, inNumberFrames);

    printAudioUnitRenderActionFlags(ioActionFlags);

    // Because of the way our audio format (setup below) is chosen:
    // we only need 1 buffer, since it is mono
    // Samples are 16 bits = 2 bytes.
    // 1 frame includes only 1 sample

    AudioBuffer buffer;

    buffer.mNumberChannels = 1;
    buffer.mDataByteSize = inNumberFrames * 2;
    buffer.mData = malloc( inNumberFrames * 2 );

    // Put buffer in a AudioBufferList
    AudioBufferList bufferList;

     SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
    memset (&samples, 0, sizeof (samples));



    bufferList.mNumberBuffers = 1;
    bufferList.mBuffers[0] = buffer;

    // Then:
    // Obtain recorded samples

    OSStatus status;

    status = AudioUnitRender([iosAudio audioUnit], 
                             ioActionFlags, 
                             inTimeStamp, 
                             inBusNumber, 
                             inNumberFrames, 
                             &bufferList);
    checkStatus(status);

    // Now, we have the samples we just read sitting in buffers in bufferList
    // Process the new data
    [iosAudio processAudio:&bufferList];


    // Now, we have the samples we just read sitting in buffers in bufferList
      ExtAudioFileWriteAsync([iosAudio mAudioFileRef], inNumberFrames, &bufferList);

    // release the malloc'ed data in the buffer we created earlier
    free(bufferList.mBuffers[0].mData);

    return noErr;
}




/**
 This callback is called when the audioUnit needs new data to play through the
 speakers. If you don't have any, just don't write anything in the buffers
 */
static OSStatus playbackCallback(void *inRefCon, 
                                 AudioUnitRenderActionFlags *ioActionFlags, 
                                 const AudioTimeStamp *inTimeStamp, 
                                 UInt32 inBusNumber, 
                                 UInt32 inNumberFrames, 
                                 AudioBufferList *ioData) {    
    // Notes: ioData contains buffers (may be more than one!)
    // Fill them up as much as you can. Remember to set the size value in each buffer to match how
    // much data is in the buffer.

    for (int i=0; i < ioData->mNumberBuffers; i++) { // in practice we will only ever have 1 buffer, since audio format is mono
        AudioBuffer buffer = ioData->mBuffers[i];

//      NSLog(@"  Buffer %d has %d channels and wants %d bytes of data.", i, buffer.mNumberChannels, buffer.mDataByteSize);

        // copy temporary buffer data to output buffer
        UInt32 size = min(buffer.mDataByteSize, [iosAudio tempBuffer].mDataByteSize); // dont copy more data then we have, or then fits
        memcpy(buffer.mData, [iosAudio tempBuffer].mData, size);
        buffer.mDataByteSize = size; // indicate how much data we wrote in the buffer

        // uncomment to hear random noise
        /*
        UInt16 *frameBuffer = buffer.mData;
        for (int j = 0; j < inNumberFrames; j++) {
            frameBuffer[j] = rand();
        }
        */

    }

    return noErr;
}

@implementation IosAudioController

@synthesize audioUnit, tempBuffer,mAudioFileRef;

/**
 Initialize the audioUnit and allocate our own temporary buffer.
 The temporary buffer will hold the latest data coming in from the microphone,
 and will be copied to the output when this is requested.
 */
- (id) init {
    self = [super init];

    OSStatus status;

    AVAudioSession *session = [AVAudioSession sharedInstance];
    NSLog(@"%f",session.preferredIOBufferDuration);


    // Describe audio component
    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_RemoteIO;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;

    // Get component
    AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

    // Get audio units
    status = AudioComponentInstanceNew(inputComponent, &audioUnit);
    checkStatus(status);

    // Enable IO for recording
    UInt32 flag = 1;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_EnableIO, 
                                  kAudioUnitScope_Input, 
                                  kInputBus,
                                  &flag, 
                                  sizeof(flag));
    checkStatus(status);

    // Enable IO for playback
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_EnableIO, 
                                  kAudioUnitScope_Output, 
                                  kOutputBus,
                                  &flag, 
                                  sizeof(flag));
    checkStatus(status);

    // Describe format
    AudioStreamBasicDescription audioFormat;
    audioFormat.mSampleRate         = 44100.00;
    audioFormat.mFormatID           = kAudioFormatLinearPCM;
    audioFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    audioFormat.mFramesPerPacket    = 1;
    audioFormat.mChannelsPerFrame   = 1;
    audioFormat.mBitsPerChannel     = 16;
    audioFormat.mBytesPerPacket     = 2;
    audioFormat.mBytesPerFrame      = 2;

    // Apply format
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Output, 
                                  kInputBus, 
                                  &audioFormat, 
                                  sizeof(audioFormat));
    checkStatus(status);
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Input, 
                                  kOutputBus, 
                                  &audioFormat, 
                                  sizeof(audioFormat));
    checkStatus(status);


    // Set input callback
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = recordingCallback;
    callbackStruct.inputProcRefCon = self;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_SetInputCallback, 
                                  kAudioUnitScope_Global, 
                                  kInputBus, 
                                  &callbackStruct, 
                                  sizeof(callbackStruct));
    checkStatus(status);

    // Set output callback
    callbackStruct.inputProc = playbackCallback;
    callbackStruct.inputProcRefCon = self;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_SetRenderCallback, 
                                  kAudioUnitScope_Global, 
                                  kOutputBus,
                                  &callbackStruct, 
                                  sizeof(callbackStruct));
    checkStatus(status);

    // Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
    flag = 0;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_ShouldAllocateBuffer,
                                  kAudioUnitScope_Output, 
                                  kInputBus,
                                  &flag, 
                                  sizeof(flag));

    // set preferred buffer size
    Float32 audioBufferSize = (0.023220);
    UInt32 size = sizeof(audioBufferSize);
    status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration,
                                     size, &audioBufferSize);

    // Allocate our own buffers (1 channel, 16 bits per sample, thus 16 bits per frame, thus 2 bytes per frame).
    // Practice learns the buffers used contain 512 frames, if this changes it will be fixed in processAudio.
    tempBuffer.mNumberChannels = 1;
    tempBuffer.mDataByteSize = 512 * 2;
    tempBuffer.mData = malloc( 512 * 2 );





     NSArray  *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
     NSString *documentsDirectory = [paths objectAtIndex:0];
    NSString *destinationFilePath = [[NSString alloc] initWithFormat: @"%@/output.caf", documentsDirectory];
    NSLog(@">>> %@\n", destinationFilePath);

     CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, ( CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);

    OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileCAFType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &mAudioFileRef);
    CFRelease(destinationURL);

    NSAssert(setupErr == noErr, @"Couldn't create file for writing");


    setupErr = ExtAudioFileSetProperty(mAudioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);
    NSAssert(setupErr == noErr, @"Couldn't create file for format");


    setupErr =  ExtAudioFileWriteAsync(mAudioFileRef, 0, NULL);
    NSAssert(setupErr == noErr, @"Couldn't initialize write buffers for audio file");

    // Initialise
    status = AudioUnitInitialize(audioUnit);
    checkStatus(status);

  //   [NSTimer scheduledTimerWithTimeInterval:5 target:self selector:@selector(stopRecording:) userInfo:nil repeats:NO];

    return self;
}

/**
 Start the audioUnit. This means data will be provided from
 the microphone, and requested for feeding to the speakers, by
 use of the provided callbacks.
 */
- (void) start {
    OSStatus status = AudioOutputUnitStart(audioUnit);
    checkStatus(status);
}

/**
 Stop the audioUnit
 */
- (void) stop {
    OSStatus status = AudioOutputUnitStop(audioUnit);
    checkStatus(status);
    [self stopRecording:nil];
}

/**
 Change this function to decide what is done with incoming
 audio data from the microphone.
 Right now we copy it to our own temporary buffer.
 */
- (void) processAudio: (AudioBufferList*) bufferList{
    AudioBuffer sourceBuffer = bufferList->mBuffers[0];

    // fix tempBuffer size if it's the wrong size
    if (tempBuffer.mDataByteSize != sourceBuffer.mDataByteSize) {
        free(tempBuffer.mData);
        tempBuffer.mDataByteSize = sourceBuffer.mDataByteSize;
        tempBuffer.mData = malloc(sourceBuffer.mDataByteSize);
    }

    // copy incoming audio data to temporary buffer
    memcpy(tempBuffer.mData, bufferList->mBuffers[0].mData, bufferList->mBuffers[0].mDataByteSize);
}


- (void)stopRecording:(NSTimer*)theTimer
{
    printf("\nstopRecording\n");
    OSStatus status = ExtAudioFileDispose(mAudioFileRef);
    printf("OSStatus(ExtAudioFileDispose): %ld\n", status);
}

/**
 Clean up.
 */
- (void) dealloc {
    [super  dealloc];
    AudioUnitUninitialize(audioUnit);
    free(tempBuffer.mData);
}

这肯定会帮助你们..

另一种最好的方法是从https://github.com/tkzic/audiograph下载Audio Touch并查看此应用程序的Echo功能,它会在您说话时重复语音,但它不会录制音频,因此将录制功能添加到其中,如下所述:

IN MixerHostAudio.h

@property (readwrite) ExtAudioFileRef   mRecordFile;
-(void)Record;
-(void)StopRecord;



IN MixerHostAudio.m

//在这个类中添加这两个函数

-(void)Record{
    NSString *completeFileNameAndPath = [[NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject] stringByAppendingString:@"/Record.wav"];
    //create the url that the recording object needs to reference the file
    CFURLRef audioFileURL = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8 *)[completeFileNameAndPath cStringUsingEncoding:[NSString defaultCStringEncoding]] , strlen([completeFileNameAndPath cStringUsingEncoding:[NSString defaultCStringEncoding]]), false);
   AudioStreamBasicDescription dstFormat, clientFormat;
    memset(&dstFormat, 0, sizeof(dstFormat));
    memset(&clientFormat, 0, sizeof(clientFormat));

    AudioFileTypeID fileTypeId = kAudioFileWAVEType;
        UInt32 size = sizeof(dstFormat);
    dstFormat.mFormatID = kAudioFormatLinearPCM;

    // setup the output file format
    dstFormat.mSampleRate = 44100.0; // set sample rate

    // create a 16-bit 44100kHz Stereo format
    dstFormat.mChannelsPerFrame = 2;
    dstFormat.mBitsPerChannel = 16;
    dstFormat.mBytesPerPacket = dstFormat.mBytesPerFrame = 4;
    dstFormat.mFramesPerPacket = 1;
    dstFormat.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger; // little-endian

    //get the client format directly from
    UInt32 asbdSize = sizeof (AudioStreamBasicDescription);
    AudioUnitGetProperty(mixerUnit,
                         kAudioUnitProperty_StreamFormat,
                         kAudioUnitScope_Input,
                         0, // input bus
                         &clientFormat,
                         &asbdSize);

     ExtAudioFileCreateWithURL(audioFileURL, fileTypeId, &dstFormat, NULL, kAudioFileFlags_EraseFile, &mRecordFile);


        printf("recording\n");
        ExtAudioFileSetProperty(mRecordFile, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
        //call this once as this will alloc space on the first call
        ExtAudioFileWriteAsync(mRecordFile, 0, NULL);


}



-(void)StopRecord{
    ExtAudioFileDispose(mRecordFile);
}



//In micLineInCallback function Add this line at last before  return noErr; :

  ExtAudioFileWriteAsync([THIS mRecordFile] , inNumberFrames, ioData);

从MixerHostViewController.m中调用这些函数 - (IBAction)playOrStop:(id)sender method

答案 1 :(得分:0)

如果您想要实时监控音频输入,则需要使用AudioUnits。

答案 2 :(得分:0)

RemoteIO音频单元可用于同时录制和播放。有很多使用RemoteIO(aurioTouch)录制和使用RemoteIO播放的例子。只需启用单位输入和单位输出,并处理两个缓冲区回调。查看示例here