• Open Menu Close Menu
  • Apple
  • Shopping Bag
  • Apple
  • Mac
  • iPad
  • iPhone
  • Watch
  • TV
  • Music
  • Support
  • Search apple.com
  • Shopping Bag

Lists

Open Menu Close Menu
  • Terms and Conditions
  • Lists hosted on this site
  • Email the Postmaster
  • Tips for posting to public mailing lists
Fwd: "fmt?" error from AudioUnitInitialize on iOS simulator, but not device
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Fwd: "fmt?" error from AudioUnitInitialize on iOS simulator, but not device


  • Subject: Fwd: "fmt?" error from AudioUnitInitialize on iOS simulator, but not device
  • From: Jacob Bandes-Storch <email@hidden>
  • Date: Fri, 04 Jan 2013 22:37:08 -0800


Hi all. I am trying to make a simple (for starters) iOS application that passes audio data from the microphone into the FFT functions of Accelerate.framework. The code I have so far is listed below.

On the simulator, where the audio session gives inputNumberOfChannels == 2, the call to AudioUnitInitialize(unit) returns the "fmt?" error. On an iPhone 5 device, where inputNumberOfChannels == 1, it gives no error.

The aurioTouch2 sample code uses the perplexing stream format description of mBytesPerFrame = 4, mChannelsPerFrame = 2, and mBitsPerChannel = 32 (which defies the mathematical relationship 8 * Bpf = bpc * cpf). And yet it works on both simulator and device. Furthermore, if I use these settings, I no longer get the "fmt?" error. How can this possibly be right? Why wouldn't what I have below work on both device and simulator?

(Note: I haven't implemented my input callback function yet, but I doubt that affects or effects the error I'm seeing.)

Thanks in advance!
Jacob Bandes-Storch

enum : AudioUnitElement {

kOutputElement = 0,

kInputElement = 1

};

const UInt32 disableFlag = 0;

const UInt32 enableFlag = 1;

OSStatus err = noErr;

NSError *error = nil;

// Configure & activate audio session

AVAudioSession *session = [AVAudioSession sharedInstance];

if (![session setCategory:AVAudioSessionCategoryRecord error:&error]) NSLog(@"Error configuring session category: %@", error);

if (![session setMode:AVAudioSessionModeMeasurement error:&error]) NSLog(@"Error configuring session mode: %@", error);

if (![session setActive:YES error:&error]) NSLog(@"Error activating audio session: %@", error);

NSLog(@"Session activated. sample rate %f", session.sampleRate);

NSLog(@"Number of channels %d", session.inputNumberOfChannels);

NSInteger numChannels = session.inputNumberOfChannels;

// Set up Remote I/O audio unit for audio capture

AudioComponent component = AudioComponentFindNext(NULL, &(const AudioComponentDescription){

.componentType = kAudioUnitType_Output,

.componentSubType = kAudioUnitSubType_RemoteIO,

.componentManufacturer = kAudioUnitManufacturer_Apple,

.componentFlags = 0,

.componentFlagsMask = 0

});

AudioComponentInstance unit;

// Create audio component

err = AudioComponentInstanceNew(component, &unit);

if (err != noErr) NSLog(@"Error instantiating audio unit: %@", E2S(err));

// Enable input

err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputElement, &enableFlag, sizeof(enableFlag));

if (err != noErr) NSLog(@"Error enabling input for audio unit: %@", E2S(err));

// Disable output

err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputElement, &disableFlag, sizeof(disableFlag));

if (err != noErr) NSLog(@"Error disabling output for audio unit: %@", E2S(err));

// "The canonical audio sample type for audio units and other audio processing in iPhone OS is noninterleaved linear PCM with 8.24-bit fixed-point samples."

assert(kAudioFormatFlagsAudioUnitCanonical & kAudioFormatFlagIsNonInterleaved);

AudioStreamBasicDescription streamDesc = {

.mSampleRate       = session.sampleRate,

.mFormatID         = kAudioFormatLinearPCM,

.mFormatFlags      = kAudioFormatFlagsAudioUnitCanonical /*matches AudioUnitSampleType*/,

.mBytesPerPacket   = sizeof(AudioUnitSampleType) * numChannels,

.mFramesPerPacket  = 1,

.mBytesPerFrame    = sizeof(AudioUnitSampleType) * numChannels,

.mChannelsPerFrame = numChannels,

.mBitsPerChannel   = 8 * sizeof(AudioUnitSampleType),

.mReserved = 0,

};

err = AudioUnitSetProperty(unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputElement, &streamDesc, sizeof(streamDesc));

if (err != noErr) NSLog(@"Error configuring input stream format for audio unit: %@", E2S(err));

AURenderCallbackStruct callbacks = {

.inputProc = myInputCallback,

.inputProcRefCon = unit

};

err = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Input, kOutputElement, &callbacks, sizeof(callbacks));

if (err != noErr) NSLog(@"Error configuring input callbacks for audio unit: %@", E2S(err));

err = AudioUnitInitialize(unit);

if (err != noErr) NSLog(@"Error initializing audio unit: %@", E2S(err));

err = AudioOutputUnitStart(unit);

if (err != noErr) NSLog(@"Error starting audio unit: %@", E2S(err));



 _______________________________________________
Do not post admin requests to the list. They will be ignored.
Coreaudio-api mailing list      (email@hidden)
Help/Unsubscribe/Update your Subscription:

This email sent to email@hidden

  • Prev by Date: Clip AudioBufferList to a Range
  • Next by Date: getting generator audio unit output in render callback
  • Previous by thread: Clip AudioBufferList to a Range
  • Next by thread: getting generator audio unit output in render callback
  • Index(es):
    • Date
    • Thread