播放来自NSStream的原始pcm音频数据

时间:2015-02-05 09:47:38

标签: ios iphone audio pcm nsstream

我正在尝试播放NSInputStream中的pcm数据。任何人都可以为我提供正确的方法或代码。

我使用以下代码获取了StreamHasData事件中的音频。

uint8_t bytes[self.audioStreamReadMaxLength];
        UInt32 length = [audioStream readData:bytes maxLength:self.audioStreamReadMaxLength];

现在我如何在iphone中播放字节音频数据?

2 个答案:

答案 0 :(得分:7)

我处理过类似的问题,最后我解决了这个问题。

这是我所做的基础。我正在使用一个库作为套接字

below课程负责获取音频并使其可供连接的客户使用。

with open("new.txt", "w") as output_file:

    output_file.write( "{0} {1}\n".format(max_name, max_age) )
    output_file.write( "{0} {1}".format(min_name, min_age) )

以下课程负责从服务器检索音频并播放

#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import  <AudioToolbox/AudioToolbox.h>

@interface AudioServer : NSObject <GCDAsyncSocketDelegate>

@property (nonatomic, strong)GCDAsyncSocket * serverSocket;

@property (nonatomic, strong)NSMutableArray *connectedClients;

@property (nonatomic) AudioComponentInstance audioUnit;

-(void) start;
-(void) stop;
-(void) writeDataToClients:(NSData*)data;

@end

#define kOutputBus 0
#define kInputBus 1

#import "AudioServer.h"
#import "SM_Utils.h"

static OSStatus recordingCallback(void *inRefCon,
                                  AudioUnitRenderActionFlags *ioActionFlags,
                                  const AudioTimeStamp *inTimeStamp,
                                  UInt32 inBusNumber,
                                  UInt32 inNumberFrames,
                                  AudioBufferList *ioData) {

    // TODO: Use inRefCon to access our interface object to do stuff
    // Then, use inNumberFrames to figure out how much data is available, and make
    // that much space available in buffers in an AudioBufferList.

    AudioServer *server = (__bridge AudioServer*)inRefCon;

    AudioBufferList bufferList;

    SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
    memset (&samples, 0, sizeof (samples));

    bufferList.mNumberBuffers = 1;
    bufferList.mBuffers[0].mData = samples;
    bufferList.mBuffers[0].mNumberChannels = 1;
    bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);

    // Then:
    // Obtain recorded samples

    OSStatus status;

    status = AudioUnitRender(server.audioUnit,
                             ioActionFlags,
                             inTimeStamp,
                             inBusNumber,
                             inNumberFrames,
                             &bufferList);

    NSData *dataToSend = [NSData dataWithBytes:bufferList.mBuffers[0].mData length:bufferList.mBuffers[0].mDataByteSize];
    [server writeDataToClients:dataToSend];

    return noErr;
}

@implementation AudioServer

-(id) init
{
    return [super init];
}

-(void) start
{

    [UIApplication sharedApplication].idleTimerDisabled = YES;
    // Create a new instance of AURemoteIO

    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;

    AudioComponent comp = AudioComponentFindNext(NULL, &desc);
    AudioComponentInstanceNew(comp, &_audioUnit);

    //  Enable input and output on AURemoteIO
    //  Input is enabled on the input scope of the input element
    //  Output is enabled on the output scope of the output element

    UInt32 one = 1;
    AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one));

    AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &one, sizeof(one));

    // Explicitly set the input and output client formats
    // sample rate = 44100, num channels = 1, format = 32 bit floating point

    AudioStreamBasicDescription audioFormat = [self getAudioDescription];
    AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat));
    AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat));

    // Set the MaximumFramesPerSlice property. This property is used to describe to an audio unit the maximum number
    // of samples it will be asked to produce on any single given call to AudioUnitRender
    UInt32 maxFramesPerSlice = 4096;
    AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));

    // Get the property value back from AURemoteIO. We are going to use this value to allocate buffers accordingly
    UInt32 propSize = sizeof(UInt32);
    AudioUnitGetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, &propSize);


    AURenderCallbackStruct renderCallback;
    renderCallback.inputProc = recordingCallback;
    renderCallback.inputProcRefCon = (__bridge void *)(self);

    AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &renderCallback, sizeof(renderCallback));


    // Initialize the AURemoteIO instance
    AudioUnitInitialize(_audioUnit);

    AudioOutputUnitStart(_audioUnit);

    _connectedClients = [[NSMutableArray alloc] init];
    _serverSocket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue:dispatch_get_main_queue()];

    [self startAcceptingConnections];
}

- (AudioStreamBasicDescription)getAudioDescription {
    AudioStreamBasicDescription audioDescription = {0};
    audioDescription.mFormatID          = kAudioFormatLinearPCM;
    audioDescription.mFormatFlags       = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
    audioDescription.mChannelsPerFrame  = 1;
    audioDescription.mBytesPerPacket    = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
    audioDescription.mFramesPerPacket   = 1;
    audioDescription.mBytesPerFrame     = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
    audioDescription.mBitsPerChannel    = 8 * sizeof(SInt16);
    audioDescription.mSampleRate        = 44100.0;
    return audioDescription;
}

-(void) startAcceptingConnections
{
    NSError *error = nil;
    if(_serverSocket)
        [_serverSocket acceptOnPort:[SM_Utils serverPort] error:&error];
}


-(void)socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
    if(_connectedClients)
        [_connectedClients removeObject:sock];
}

- (void)socket:(GCDAsyncSocket *)socket didAcceptNewSocket:(GCDAsyncSocket *)newSocket {

    NSLog(@"Accepted New Socket from %@:%hu", [newSocket connectedHost], [newSocket connectedPort]);

    @synchronized(_connectedClients)
    {
        dispatch_async(dispatch_get_main_queue(), ^{
            if(_connectedClients)
                [_connectedClients addObject:newSocket];
        });
    }

    NSError *error = nil;
    if(_serverSocket)
        [_serverSocket acceptOnPort:[SM_Utils serverPort] error:&error];
}

-(void) writeDataToClients:(NSData *)data
{
    if(_connectedClients)
    {
        for (GCDAsyncSocket *socket in _connectedClients) {
            if([socket isConnected])
            {
                [socket writeData:data withTimeout:-1 tag:0];
            }
            else{
                if([_connectedClients containsObject:socket])
                    [_connectedClients removeObject:socket];
            }
        }
    }
}

-(void) stop
{
    if(_serverSocket)
    {
        _serverSocket = nil;
    }
    [UIApplication sharedApplication].idleTimerDisabled = NO;
    AudioOutputUnitStop(_audioUnit);
}

-(void) dealloc
{
    if(_serverSocket)
    {
        _serverSocket = nil;
    }
    [UIApplication sharedApplication].idleTimerDisabled = NO;
    AudioOutputUnitStop(_audioUnit);
}

@end

有些代码是独特的,我的要求,但大部分都可以重复使用,我希望这会有所帮助。

答案 1 :(得分:0)

Apple有一个例子,做同样的事情: -

void* MyGetOpenALAudioData(CFURLRef inFileURL, ALsizei *outDataSize, ALenum *outDataFormat, ALsizei*    outSampleRate)
{
OSStatus                        err = noErr;    
SInt64                          theFileLengthInFrames = 0;
AudioStreamBasicDescription     theFileFormat;
UInt32                          thePropertySize = sizeof(theFileFormat);
ExtAudioFileRef                 extRef = NULL;
void*                           theData = NULL;
AudioStreamBasicDescription     theOutputFormat;

// Open a file with ExtAudioFileOpen()
err = ExtAudioFileOpenURL(inFileURL, &extRef);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileOpenURL FAILED, Error = %ld\n", err); goto Exit; }

// Get the audio data format
err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileDataFormat, &thePropertySize, &theFileFormat);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileDataFormat) FAILED, Error = %ld\n", err); goto Exit; }
if (theFileFormat.mChannelsPerFrame > 2)  { printf("MyGetOpenALAudioData - Unsupported Format, channel count is greater than stereo\n"); goto Exit;}

// Set the client format to 16 bit signed integer (native-endian) data
// Maintain the channel count and sample rate of the original source format
theOutputFormat.mSampleRate = theFileFormat.mSampleRate;
theOutputFormat.mChannelsPerFrame = theFileFormat.mChannelsPerFrame;

theOutputFormat.mFormatID = kAudioFormatLinearPCM;
theOutputFormat.mBytesPerPacket = 2 * theOutputFormat.mChannelsPerFrame;
theOutputFormat.mFramesPerPacket = 1;
theOutputFormat.mBytesPerFrame = 2 * theOutputFormat.mChannelsPerFrame;
theOutputFormat.mBitsPerChannel = 16;
theOutputFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;

// Set the desired client (output) data format
err = ExtAudioFileSetProperty(extRef, kExtAudioFileProperty_ClientDataFormat, sizeof(theOutputFormat), &theOutputFormat);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileSetProperty(kExtAudioFileProperty_ClientDataFormat) FAILED, Error = %ld\n", err); goto Exit; }

// Get the total frame count
thePropertySize = sizeof(theFileLengthInFrames);
err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileLengthFrames, &thePropertySize, &theFileLengthInFrames);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileLengthFrames) FAILED, Error = %ld\n", err); goto Exit; }

// Read all the data into memory
UInt32 theFramesToRead = (UInt32)theFileLengthInFrames;     
UInt32 dataSize = theFramesToRead * theOutputFormat.mBytesPerFrame;;
theData = malloc(dataSize);
if (theData)
{
    AudioBufferList     theDataBuffer;
    theDataBuffer.mNumberBuffers = 1;
    theDataBuffer.mBuffers[0].mDataByteSize = dataSize;
    theDataBuffer.mBuffers[0].mNumberChannels = theOutputFormat.mChannelsPerFrame;
    theDataBuffer.mBuffers[0].mData = theData;

    // Read the data into an AudioBufferList
    err = ExtAudioFileRead(extRef, &theFramesToRead, &theDataBuffer);
    if(err == noErr)
    {
        // success
        *outDataSize = (ALsizei)dataSize;
        *outDataFormat = (theOutputFormat.mChannelsPerFrame > 1) ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16;
        *outSampleRate = (ALsizei)theOutputFormat.mSampleRate;
    }
    else 
    { 
        // failure
        free (theData);
        theData = NULL; // make sure to return NULL
        printf("MyGetOpenALAudioData: ExtAudioFileRead FAILED, Error = %ld\n", err); goto Exit;
    }   
}

Exit:
// Dispose the ExtAudioFileRef, it is no longer needed
if (extRef) ExtAudioFileDispose(extRef);
return theData;
}

查找Sample Code Here,希望这有帮助。

相关问题