A little background on the workflow im trying to establish, in conjunction with what i've attempt to do to accomplish it.
I want to pass audio through the input device to the output device while downsampling to a sample rate of 8000 mono channel audio in WAV format
however the C is all wrapped up on Objective-C methods to make life easier. The basic init method to get things kicked off does the following
AudioStreamBasicDescription recordFormat;
memset(&recordFormat, 0, sizeof(recordFormat));
(there is the ASBD of what im trying to accomplish)
AudioStreamBasicDescription outFormat;
memset(&recordFormat, 0, sizeof(outFormat));
outFormat.mFormatID = kAudioFormatLinearPCM;
outFormat.mSampleRate = 8000;
outFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
outFormat.mBitsPerChannel = 16;
outFormat.mChannelsPerFrame = 1;
outFormat.mFramesPerPacket = 1;
outFormat.mBytesPerPacket = 2;
outFormat.mBytesPerFrame = 2;
outFormat.mReserved = 0;
//pass the proper format in, we need to use an audio converter to downsample
recordFormat.mSampleRate = 44100;
recordFormat.mFormatID = kAudioFormatLinearPCM;
recordFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
recordFormat.mBitsPerChannel = 16;
recordFormat.mChannelsPerFrame = 2;
recordFormat.mFramesPerPacket = 1;
recordFormat.mBytesPerPacket = 4;
recordFormat.mBytesPerFrame = 4;
recordFormat.mReserved = 0;
CheckError (AudioConverterNew(&recordFormat, &outFormat, &audioConverter),
"AudioConveterNew failed");
//3.
UInt32 propSize = sizeof(recordFormat);
CheckError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
0,
NULL,
&propSize,
&recordFormat), "AudioFormatGetProperty failed");
CheckError(AudioQueueNewInput(&recordFormat, MyAQInputCallback, (__bridge void *)self, NULL, NULL, 0, &recordQueue), "AudioQueueNewInput failed");
//5. This step might also be frivolous
// Fills in ABSD a little more
UInt32 size = sizeof(recordFormat);
CheckError(AudioQueueGetProperty(recordQueue,
kAudioConverterCurrentOutputStreamDescription,
&recordFormat,
&size), "Couldn't get queue's format");
//6.
int bufferByteSize = [self computeRecordBufferSize:&recordFormat inAudioQueue:recordQueue withSeconds:.5];
//NSLog(@"%d",__LINE__);
//7. Create and Enqueue buffers
int bufferIndex;
for (bufferIndex = 0;
bufferIndex < kNumberRecordBuffers;
++bufferIndex) {
AudioQueueBufferRef buffer;
CheckError(AudioQueueAllocateBuffer(recordQueue,
bufferByteSize,
&buffer), "AudioQueueBufferRef failed");
CheckError(AudioQueueEnqueueBuffer(recordQueue, buffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
}
CheckError(AudioQueueNewOutput(&recordFormat,
MyAQOutputCallback,
(__bridge void *)self, NULL, NULL, 0,
&playerQueue), "AudioOutputNewQueue failed");
UInt32 playBufferByteSize;
CalculateBytesForPlaythrough(recordQueue, recordFormat, 0.1, &playBufferByteSize, &(numPacketsToRead));
bool isFormatVBR = (recordFormat.mBytesPerPacket == 0
|| recordFormat.mFramesPerPacket == 0);
if (isFormatVBR) {
NSLog(@"Not supporting VBR");
packetDescs = (AudioStreamPacketDescription*) malloc(sizeof(AudioStreamPacketDescription) * numPacketsToRead);
} else {
packetDescs = NULL;
}
//start the queues up!
CheckError(AudioQueueStart(playerQueue, NULL), "AudioQueueStart failed");
CheckError(AudioQueueStart(recordQueue, NULL), "AudioQueueStart failed");
The C methods to get things passed through are pretty cut and dry
OSStatus MyAudioConverterCallback(AudioConverterRef inAudioConverter,
UInt32 *ioDataPacketCount,
AudioBufferList *ioData,
AudioStreamPacketDescription **outDataPacketDescription,
void *inUserData)
{
CSServerSessionManager *myData = (__bridge CSServerSessionManager*)inUserData;
[myData processConverter:inAudioConverter withPacketCount:ioDataPacketCount bufferList:ioData packetDescription:outDataPacketDescription];
return 0;
}
static void MyAQInputCallback(void *inUserData,
AudioQueueRef inQueue,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp *inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription *inPacketDesc)
{
CSServerSessionManager *myData = (__bridge CSServerSessionManager*)inUserData;
if (!myData.sessionActive) return;
[myData handleAudioInQueue:inQueue withBuffer:inBuffer atTime:inStartTime withPackets:inNumPackets andDescription:inPacketDesc];
}
i KNOW im doing something wrong here, but i just dont know what. AudioConverterFillComplexBuffer could not be more convoluted and difficult to understand!!!
- (void)processConverter:(AudioConverterRef)inAudioConverter withPacketCount:(UInt32 *)ioDataPacketCount bufferList:(AudioBufferList *)ioData packetDescription:(AudioStreamPacketDescription **)outDataPacketDescription
{
NSLog(@"iopacketcount: %u byteSize: %u", *ioDataPacketCount, currentAudioDataByteSize);
// initialize in case of failure
ioData->mBuffers[0].mData = NULL;
ioData->mBuffers[0].mDataByteSize = 0;
ioData->mBuffers[0].mData = currentAudioData;
ioData->mBuffers[0].mDataByteSize = currentAudioDataByteSize;
}
currentAudioData (is just a void *currentAudioData in the header)
i know im doing somethign wrong here too, i just have no idea what :(
- (void)handleAudioInQueue:(AudioQueueRef)inQueue
withBuffer:(AudioQueueBufferRef)inBuffer
atTime:(const AudioTimeStamp *)inStartTime
withPackets:(UInt32)inNumPackets
andDescription:(const AudioStreamPacketDescription *)inPacketDesc
{
//LOG_SELF_INFO;
if (inNumPackets > 0) {
outputBufferSize = 23 * 1024; // 32 KB is a good starting point
packetsPerBuffer = outputBufferSize / 2;
UInt8 *convertOutputBuffer = (UInt8 *)malloc(sizeof(UInt8) * inBuffer->mAudioDataBytesCapacity); // CHRIS: not sizeof(UInt8*). check book text!
AudioBufferList convertedData;
convertedData.mNumberBuffers = 1;
convertedData.mBuffers[0].mNumberChannels = 1;
convertedData.mBuffers[0].mDataByteSize = inBuffer->mAudioDataBytesCapacity;
convertedData.mBuffers[0].mData = convertOutputBuffer;
if (currentAudioData != NULL)
{
free(currentAudioData);
currentAudioData = NULL;
}
currentAudioData = (void *)calloc(1, inBuffer->mAudioDataBytesCapacity);
memcpy(currentAudioData, inBuffer->mAudioData, inBuffer->mAudioDataByteSize);
currentAudioDataByteSize = inBuffer->mAudioDataByteSize;
UInt32 ioOutputDataPackets = packetsPerBuffer;
CheckError(AudioConverterFillComplexBuffer(audioConverter,
MyAudioConverterCallback,
(__bridge void *)self,
&inNumPackets,
&convertedData,
nil), "fill complex buffer error");
// Enqueue on the output Queue!
AudioQueueBufferRef outputBuffer;
CheckError(AudioQueueAllocateBuffer(playerQueue, convertedData.mBuffers[0].mDataByteSize, &outputBuffer), "Input callback failed to allocate new output buffer");
//copy the input buffer to the output buffer
memcpy(outputBuffer->mAudioData, convertedData.mBuffers[0].mData, convertedData.mBuffers[0].mDataByteSize);
outputBuffer->mAudioDataByteSize = convertedData.mBuffers[0].mDataByteSize;
//if we dont create a release pool here, things leak like crazy.
@autoreleasepool{
//wrap the bytes into NSData so we can process and send it.
NSData *currentData = [NSData dataWithBytes:convertedData.mBuffers[0].mData length:convertedData.mBuffers[0].mDataByteSize];
[outputHandle writeData: currentData]; //output handle is an nsfilehandle created in the init method
//NSLog(@"data: %@",[NSData dataWithBytes:inBuffer->mAudioData length:inBuffer->mAudioDataByteSize]);
}
// Assuming LPCM so no packet descriptions
CheckError(AudioQueueEnqueueBuffer(playerQueue, outputBuffer, 0, NULL), "Enqueing the buffer in input callback failed");
recordPacket += inNumPackets;
}
if (self.sessionActive) {
CheckError(AudioQueueEnqueueBuffer(inQueue, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
}
}
Sorry for the massive amount of code, i just want to be as thorough as possible when explaining the issue. I know im feeding the buffers incorrectly in AudioConverterFillComplexBuffer, i just cant wrap my head around how to use them with AudioQueues. I've literally thrawled through YEARS of the mailing list thread to no avail. i've tried incorporating the EZAudio project to use AudioUnits instead but it only played through in mono for some reason, the volume was reduced and i couldn't figure out how to get downsampling or downmixing working there either.
So long story short, when the converter is in there it doesnt play through the outputqueue (not as concerned about that till i can get downsampling / downmixing working properly anyways) the output file that i create starts off well enough but then starts repeating and skipping samples.
PLEASE HELP!!