• Open Menu Close Menu
  • Apple
  • Shopping Bag
  • Apple
  • Mac
  • iPad
  • iPhone
  • Watch
  • TV
  • Music
  • Support
  • Search apple.com
  • Shopping Bag

Lists

Open Menu Close Menu
  • Terms and Conditions
  • Lists hosted on this site
  • Email the Postmaster
  • Tips for posting to public mailing lists
Re: Export soundfile from/with RemoteIO
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: Export soundfile from/with RemoteIO


  • Subject: Re: Export soundfile from/with RemoteIO
  • From: Support <email@hidden>
  • Date: Wed, 01 Jun 2011 14:11:40 +1000

Hi

This has been asked a few times and it can be a little tricky, took me a day or two to get it working.

So here is what I'm doing with the RemoteIO and AUGraph. When I go to offline bounce (or render) I simply re-initialise the AUGraph with an output node instead of the RemoteIO node. Then we loop through the playback object and render the buffer to the file.

May be someone will suggest a better solution.

regards
peter johnson
one red dog media



// Pass in a string for the wav file
OSStatus AudioDriver::offlineBounce(const std::string& filePath)
{
    AudioFileID audioFileID;
    ExtAudioFileRef audiofile;
    AudioStreamBasicDescription audioFormat = { 0 };
    OSStatus status = noErr;

    initGraph(true);

    // Set up the WAV output file
    CFURLRef fileURL = CFURLCreateFromFileSystemRepresentation(0, (const UInt8*)filePath.c_str(), filePath.length(), false);

    audioFormat.mFormatID     = kAudioFormatLinearPCM;
    audioFormat.mSampleRate         = kDefaultSoundRate;
    audioFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    audioFormat.mChannelsPerFrame   = 2;
    audioFormat.mBitsPerChannel     = sizeof(short) * 8;
    audioFormat.mFramesPerPacket    = 1;
    audioFormat.mBytesPerFrame      = audioFormat.mBitsPerChannel * audioFormat.mChannelsPerFrame / 8;
    audioFormat.mBytesPerPacket     = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame;
    audioFormat.mReserved           = 0;

    status = AudioFileCreateWithURL(fileURL, kAudioFileWAVEType, &audioFormat, kAudioFileFlags_EraseFile, &audioFileID);
    CFRelease(fileURL);
    if (status)
    {
        fprintf(stderr, "AudioFileCreateWithURL failure\n");
        return status;
    }

    status = ExtAudioFileWrapAudioFileID(audioFileID, true, &audiofile);
    if (status)
    {
        fprintf(stderr, "ExtAudioFileWrapAudioFileID failure\n");
        return status;
    }

    ExtAudioFileSetProperty(audiofile, kExtAudioFileProperty_ClientDataFormat, sizeof(audioFormat), &audioFormat);
    status = ExtAudioFileWriteAsync(audiofile, 0, 0);
    if (status)
    {
        fprintf(stderr, "ExtAudioFileWriteAsync failure\n");
        return status;
    }

    // Write frames
    const UInt32 numFrames = 512;

    

    AudioBufferList* ioData = AllocateAudioBufferList(1, numFrames * sizeof(short));
    AudioBufferList* partialData = AllocateAudioBufferList(1, numFrames * sizeof(short));

    AudioTimeStamp timeStamp;
    UInt64 currentTime = 0;
    UInt64 sampleTime = 0;
    MidiScheduler::hostTimeInNanos = CAHostTimeBase::ConvertFromNanos(currentTime);

    FillOutAudioTimeStampWithSampleAndHostTime(timeStamp, sampleTime, currentTime);

    // TODO: Here you prep your playback object, sequencer, etc

    // ...

    // TODO: Here you calculate where the first sample will be rendered in case you have any startup latency
    UIInt32 firstFrame = ...;
    // TODO: Here you calculate the length of the render
    UInt32 lastFrame = ...;

    bool first = true;

    while (timeStamp.mSampleTime < lastFrame)
    {
        AudioUnitRenderActionFlags actionFlags = 0;
        status = AudioUnitRender(remoteIOUnit, &actionFlags, &timeStamp, 0, numFrames, ioData);
        if (status)
        {
            fprintf(stderr, "AudioUnitRender failure %lu\n", status);
        }

        

        if (first)
        {
            if (timeStamp.mSampleTime + numFrames > firstFrame) // skip "latency" in the scheduler look-ahead
            {
                UInt32 frameIndex = (firstFrame - (UInt64)timeStamp.mSampleTime) % numFrames;
                UInt32 frames = numFrames - frameIndex;
                memcpy(partialData->mBuffers[0].mData, ((UInt32*)ioData->mBuffers[0].mData) + frameIndex, frames * sizeof(UInt32));
                partialData->mBuffers[0].mDataByteSize = frames * sizeof(UInt32);
                status = ExtAudioFileWriteAsync(audiofile, frames, partialData);
                first = false;
            }
        }
        else
        {
            status = ExtAudioFileWriteAsync(audiofile, numFrames, ioData);
        }

        currentTime += (numFrames * 1000000000LL / kDefaultSoundRateInt);
        sampleTime += numFrames;

        

        timeStamp.mHostTime = CAHostTimeBase::ConvertFromNanos(currentTime);
        timeStamp.mSampleTime = sampleTime;
    }

    // Stop
    status = ExtAudioFileDispose(audiofile);
    if (status)
    {
        fprintf(stderr, "ExtAudioFileDispose failure\n");
        return status;
    }

    status = AudioFileClose(audioFileID);
    if (status)
    {
        fprintf(stderr, "AudioFileClose failure\n");
        return status;
    }

    DestroyAudioBufferList(ioData);
    DestroyAudioBufferList(partialData);

    // TODO: Here you'll stop & reset your playback object

    initGraph(false);

    song->setRepeat(true);

    return status;
}

// Takes a bool that determins whether or not audio output goes via RemoteIO
void AudioDriver::initGraph(bool offline)
{
    OSErr err = noErr;

    try
    {
        if (graph)
        {
            DisposeAUGraph(graph);
        }

        

        // The graph
        err = NewAUGraph(&graph);
        XThrowIfError(err != noErr, "Error creating graph.");

        //the descriptions for the components
        AudioComponentDescription crossFaderMixerDescription;
        AudioComponentDescription masterFaderDescription;
        AudioComponentDescription outputDescription;

        // The cross fader mixer
        crossFaderMixerDescription.componentFlags = 0;
        crossFaderMixerDescription.componentFlagsMask = 0;
        crossFaderMixerDescription.componentType = kAudioUnitType_Mixer;
        crossFaderMixerDescription.componentSubType = kAudioUnitSubType_MultiChannelMixer;
        crossFaderMixerDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
        err = AUGraphAddNode(graph, &crossFaderMixerDescription, &crossFaderMixerNode);
        XThrowIfError(err != noErr, "Error creating mixer node.");

            

        // The master mixer
        masterFaderDescription.componentFlags = 0;
        masterFaderDescription.componentFlagsMask = 0;
        masterFaderDescription.componentType = kAudioUnitType_Mixer;
        masterFaderDescription.componentSubType = kAudioUnitSubType_MultiChannelMixer;
        masterFaderDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
        err = AUGraphAddNode(graph, &masterFaderDescription, &masterMixerNode);
        XThrowIfError(err != noErr, "Error creating mixer node.");

        

        // The device output
        outputDescription.componentFlags = 0;
        outputDescription.componentFlagsMask = 0;
        outputDescription.componentType = kAudioUnitType_Output;
        outputDescription.componentSubType = offline ? kAudioUnitSubType_GenericOutput : kAudioUnitSubType_RemoteIO;
        outputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
        err = AUGraphAddNode(graph, &outputDescription, &remoteIONode);
        XThrowIfError(err != noErr, "Error creating output node.");

        // Open the graph
        err = AUGraphOpen(graph);
        XThrowIfError(err != noErr, "Error opening graph.");

        

        // Get the cross fader
        err = AUGraphNodeInfo(graph, crossFaderMixerNode, &crossFaderMixerDescription, &crossFaderMixerUnit);
        // Get the master fader
        err = AUGraphNodeInfo(graph, masterMixerNode, &masterFaderDescription, &masterFaderMixerUnit);
        // Get the device output
        err = AUGraphNodeInfo(graph, remoteIONode, &outputDescription, &remoteIOUnit);

        // The cross fader mixer
        AURenderCallbackStruct callbackCrossFader;
        callbackCrossFader.inputProc = crossFaderMixerCallback;
        callbackCrossFader.inputProcRefCon = this;

        // Mixer channel 0
        err = AUGraphSetNodeInputCallback(graph, crossFaderMixerNode, 0, &callbackCrossFader);
        XThrowIfError(err != noErr, "Error setting render callback 0 Cross fader.");
        // Mixer channel 1
        err = AUGraphSetNodeInputCallback(graph, crossFaderMixerNode, 1, &callbackCrossFader);
        XThrowIfError(err != noErr, "Error setting render callback 1 Cross fader.");

        

        // Set up the master fader callback
        AURenderCallbackStruct playbackCallbackStruct;
        playbackCallbackStruct.inputProc = masterFaderCallback;
        playbackCallbackStruct.inputProcRefCon = this;

        

        err = AUGraphSetNodeInputCallback(graph, remoteIONode, 0, &playbackCallbackStruct);
        XThrowIfError(err != noErr, "Error setting effects callback.");

        // Describe format
        audioFormat.mFormatID = kAudioFormatLinearPCM;
        audioFormat.mSampleRate         = kDefaultSoundRate;
        audioFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
        audioFormat.mChannelsPerFrame   = 2;
        audioFormat.mBitsPerChannel     = sizeof(short) * 8;
        audioFormat.mFramesPerPacket    = 1;
        audioFormat.mBytesPerFrame      = audioFormat.mBitsPerChannel * audioFormat.mChannelsPerFrame / 8;
        audioFormat.mBytesPerPacket     = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame;
        audioFormat.mReserved           = 0;

        

        // Set the RemoteIO properties
        if (!offline)
        {
            err = AudioUnitSetProperty(remoteIOUnit, 
                               kAudioUnitProperty_StreamFormat, 
                               kAudioUnitScope_Input, 
                               0, 
                               &audioFormat, 
                               sizeof(audioFormat));
            XThrowIfError(err != noErr, "Error setting RIO input property.");
        }
        else
        {
            // Set the offline output properties
            err = AudioUnitSetProperty(remoteIOUnit, 
                                       kAudioUnitProperty_StreamFormat, 
                                       kAudioUnitScope_Input, 
                                       0, 
                                       &audioFormat, 
                                       sizeof(audioFormat));
            XThrowIfError(err != noErr, "Error setting output input property.");

            err = AudioUnitSetProperty(remoteIOUnit, 
                                       kAudioUnitProperty_StreamFormat, 
                                       kAudioUnitScope_Output, 
                                       0, 
                                       &audioFormat, 
                                       sizeof(audioFormat));
            XThrowIfError(err != noErr, "Error setting output output property.");

            err = AudioUnitSetProperty(remoteIOUnit, 
                                       kAudioUnitProperty_SampleRate, 
                                       kAudioUnitScope_Output, 
                                       0, 
                                       &kDefaultSoundRate, 
                                       sizeof(kDefaultSoundRate));
            XThrowIfError(err != noErr, "Error setting RIO output property.");
        }

        

        // Set the master fader properties
        err = AudioUnitSetProperty(masterFaderMixerUnit, 
                                   kAudioUnitProperty_StreamFormat, 
                                   kAudioUnitScope_Input, 
                                   0, 
                                   &audioFormat, 
                                   sizeof(audioFormat));
        XThrowIfError(err != noErr, "Error setting Master fader property.");

        

        err = AudioUnitSetProperty(masterFaderMixerUnit, 
                                   kAudioUnitProperty_StreamFormat, 
                                   kAudioUnitScope_Output, 
                                   0, 
                                   &audioFormat, 
                                   sizeof(audioFormat));
        XThrowIfError(err != noErr, "Error setting Master fader property.");

        // Set the crossfader properties for all channels
        err = AudioUnitSetProperty(crossFaderMixerUnit, 
                                   kAudioUnitProperty_StreamFormat, 
                                   kAudioUnitScope_Output, 
                                   0, 
                                   &audioFormat, 
                                   sizeof(audioFormat));
        XThrowIfError(err != noErr, "Error setting output property format 0.");

        err = AudioUnitSetProperty(crossFaderMixerUnit, 
                                   kAudioUnitProperty_StreamFormat, 
                                   kAudioUnitScope_Input, 
                                   0, 
                                   &audioFormat, 
                                   sizeof(audioFormat));
        XThrowIfError(err != noErr, "Error setting property format 0.");

        err = AudioUnitSetProperty(crossFaderMixerUnit, 
                                   kAudioUnitProperty_StreamFormat, 
                                   kAudioUnitScope_Input, 
                                   1, 
                                   &audioFormat, 
                                   sizeof(audioFormat));
        XThrowIfError(err != noErr, "Error setting property format 1.");

        // set the mixer unit to handle 4096 samples per slice since we want to keep rendering during screen lock
        UInt32 maxFPS = 4096;
        err = AudioUnitSetProperty(crossFaderMixerUnit,
                                   kAudioUnitProperty_MaximumFramesPerSlice,
                                   kAudioUnitScope_Global,
                                   0,
                                   &maxFPS,
                                   sizeof(maxFPS));
        XThrowIfError(err != noErr, "Error setting max frame slice.");

        

        err = AUGraphInitialize(graph);
        XThrowIfError(err != noErr, "Error initializing graph.");

        // Debug
        CAShow(graph); 

        

        // Start the graph
        err = AUGraphStart(graph);
        XThrowIfError(err != noErr, "Error starting graph.");
    }
    catch (CAXException e)
    {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        err = e.mError;
    }
}

void AudioDriver::initAudio()
{
    OSErr err = noErr;

    try
    {
        /*
         Getting the value of kAudioUnitProperty_ElementCount tells you how many elements you have in a scope. This happens to be 8 for this mixer.
         If you want to increase it, you need to set this property. 
         */
        // Initialize and configure the audio session, and add an interuption listener
        AudioSessionInitialize(0, 0, rioInterruptionListener, this);

        

        // Set the audio category
        UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
        AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory);

        UInt32 doSetProperty = 1;
        err = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryMixWithOthers, sizeof(doSetProperty), &doSetProperty);
        XThrowIfError(err != noErr, "Error couldn't set up audio mix category.");

        

        UInt32 getAudioCategory = sizeof(audioCategory);
        AudioSessionGetProperty(kAudioSessionProperty_AudioCategory, &getAudioCategory, &getAudioCategory);

        

        // Set the buffer size as small as we can
        Float32 preferredBufferSize = (float)kAudioBufferNumFrames / kDefaultSoundRate;
        AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize);

        

        // Set the audio session active
        AudioSessionSetActive(true);

        

        initGraph(false);
    }
    catch (CAXException e)
    {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
        err = e.mError;
    }
}








On 01/06/2011, at 4:39 AM, Pascal wrote:

Hello,

I have an audio application with a sequencer using RemoteIO. What is the best way to export an audio sequence to a WAV soundfile?  Does the export have to be in real-time or can it be rendered faster ?

Thanks,

Pascal
Do not post admin requests to the list. They will be ignored. Coreaudio-api mailing list (email@hidden) Help/Unsubscribe/Update your Subscription: This email sent to email@hidden
References: 
 >Export soundfile from/with RemoteIO (From: Pascal <email@hidden>)

  • Prev by Date: audioqueue time/pitch processor
  • Previous by thread: Export soundfile from/with RemoteIO
  • Next by thread: audioqueue time/pitch processor
  • Index(es):
    • Date
    • Thread