Created
January 6, 2023 18:04
-
-
Save wavesequencer/4fdbe198d1b9744ec0ba51ee5f54b677 to your computer and use it in GitHub Desktop.
Mac Audio Workgroups in JUCE AU plugins
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Note - main changes are 'toFnPtr' and additional 'createObjCBlockImplCopy' and 'CreateObjCBlockCopy' (this file is based on JUCE 7.0.3) | |
/* | |
============================================================================== | |
This file is part of the JUCE library. | |
Copyright (c) 2022 - Raw Material Software Limited | |
JUCE is an open source library subject to commercial or open-source | |
licensing. | |
The code included in this file is provided under the terms of the ISC license | |
http://www.isc.org/downloads/software-support-policy/isc-license. Permission | |
To use, copy, modify, and/or distribute this software for any purpose with or | |
without fee is hereby granted provided that the above copyright notice and | |
this permission notice appear in all copies. | |
JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER | |
EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE | |
DISCLAIMED. | |
============================================================================== | |
*/ | |
#include "juce_mac_CFHelpers.h" | |
/* This file contains a few helper functions that are used internally but which | |
need to be kept away from the public headers because they use obj-C symbols. | |
*/ | |
namespace juce | |
{ | |
//============================================================================== | |
inline Range<int> nsRangeToJuce (NSRange range) | |
{ | |
return { (int) range.location, (int) (range.location + range.length) }; | |
} | |
inline NSRange juceRangeToNS (Range<int> range) | |
{ | |
return NSMakeRange ((NSUInteger) range.getStart(), (NSUInteger) range.getLength()); | |
} | |
inline String nsStringToJuce (NSString* s) | |
{ | |
return CharPointer_UTF8 ([s UTF8String]); | |
} | |
inline NSString* juceStringToNS (const String& s) | |
{ | |
return [NSString stringWithUTF8String: s.toUTF8()]; | |
} | |
inline NSString* nsStringLiteral (const char* const s) noexcept | |
{ | |
return [NSString stringWithUTF8String: s]; | |
} | |
inline NSString* nsEmptyString() noexcept | |
{ | |
return [NSString string]; | |
} | |
inline NSURL* createNSURLFromFile (const String& f) | |
{ | |
return [NSURL fileURLWithPath: juceStringToNS (f)]; | |
} | |
inline NSURL* createNSURLFromFile (const File& f) | |
{ | |
return createNSURLFromFile (f.getFullPathName()); | |
} | |
inline NSArray* createNSArrayFromStringArray (const StringArray& strings) | |
{ | |
auto array = [[NSMutableArray alloc] init]; | |
for (auto string: strings) | |
[array addObject:juceStringToNS (string)]; | |
return [array autorelease]; | |
} | |
inline NSArray* varArrayToNSArray (const var& varToParse); | |
inline NSDictionary* varObjectToNSDictionary (const var& varToParse) | |
{ | |
auto dictionary = [NSMutableDictionary dictionary]; | |
if (varToParse.isObject()) | |
{ | |
auto* dynamicObject = varToParse.getDynamicObject(); | |
auto& properties = dynamicObject->getProperties(); | |
for (int i = 0; i < properties.size(); ++i) | |
{ | |
auto* keyString = juceStringToNS (properties.getName (i).toString()); | |
const var& valueVar = properties.getValueAt (i); | |
if (valueVar.isObject()) | |
{ | |
auto* valueDictionary = varObjectToNSDictionary (valueVar); | |
[dictionary setObject: valueDictionary forKey: keyString]; | |
} | |
else if (valueVar.isArray()) | |
{ | |
auto* valueArray = varArrayToNSArray (valueVar); | |
[dictionary setObject: valueArray forKey: keyString]; | |
} | |
else | |
{ | |
auto* valueString = juceStringToNS (valueVar.toString()); | |
[dictionary setObject: valueString forKey: keyString]; | |
} | |
} | |
} | |
return dictionary; | |
} | |
inline NSArray* varArrayToNSArray (const var& varToParse) | |
{ | |
jassert (varToParse.isArray()); | |
if (! varToParse.isArray()) | |
return nil; | |
const auto* varArray = varToParse.getArray(); | |
auto array = [NSMutableArray arrayWithCapacity: (NSUInteger) varArray->size()]; | |
for (const auto& aVar : *varArray) | |
{ | |
if (aVar.isObject()) | |
{ | |
auto* valueDictionary = varObjectToNSDictionary (aVar); | |
[array addObject: valueDictionary]; | |
} | |
else if (aVar.isArray()) | |
{ | |
auto* valueArray = varArrayToNSArray (aVar); | |
[array addObject: valueArray]; | |
} | |
else | |
{ | |
auto* valueString = juceStringToNS (aVar.toString()); | |
[array addObject: valueString]; | |
} | |
} | |
return array; | |
} | |
var nsObjectToVar (NSObject* array); | |
inline var nsDictionaryToVar (NSDictionary* dictionary) | |
{ | |
DynamicObject::Ptr dynamicObject (new DynamicObject()); | |
for (NSString* key in dictionary) | |
dynamicObject->setProperty (nsStringToJuce (key), nsObjectToVar ([dictionary objectForKey: key])); | |
return var (dynamicObject.get()); | |
} | |
inline var nsArrayToVar (NSArray* array) | |
{ | |
Array<var> resultArray; | |
for (id value in array) | |
resultArray.add (nsObjectToVar (value)); | |
return var (resultArray); | |
} | |
inline var nsObjectToVar (NSObject* obj) | |
{ | |
if ([obj isKindOfClass: [NSString class]]) return nsStringToJuce ((NSString*) obj); | |
else if ([obj isKindOfClass: [NSNumber class]]) return nsStringToJuce ([(NSNumber*) obj stringValue]); | |
else if ([obj isKindOfClass: [NSDictionary class]]) return nsDictionaryToVar ((NSDictionary*) obj); | |
else if ([obj isKindOfClass: [NSArray class]]) return nsArrayToVar ((NSArray*) obj); | |
else | |
{ | |
// Unsupported yet, add here! | |
jassertfalse; | |
} | |
return {}; | |
} | |
#if JUCE_MAC | |
template <typename RectangleType> | |
NSRect makeNSRect (const RectangleType& r) noexcept | |
{ | |
return NSMakeRect (static_cast<CGFloat> (r.getX()), | |
static_cast<CGFloat> (r.getY()), | |
static_cast<CGFloat> (r.getWidth()), | |
static_cast<CGFloat> (r.getHeight())); | |
} | |
#endif | |
#if JUCE_INTEL | |
template <typename T> | |
struct NeedsStret | |
{ | |
#if JUCE_32BIT | |
static constexpr auto value = sizeof (T) > 8; | |
#else | |
static constexpr auto value = sizeof (T) > 16; | |
#endif | |
}; | |
template <> | |
struct NeedsStret<void> { static constexpr auto value = false; }; | |
template <typename T, bool b = NeedsStret<T>::value> | |
struct MetaSuperFn { static constexpr auto value = objc_msgSendSuper_stret; }; | |
template <typename T> | |
struct MetaSuperFn<T, false> { static constexpr auto value = objc_msgSendSuper; }; | |
#else | |
template <typename> | |
struct MetaSuperFn { static constexpr auto value = objc_msgSendSuper; }; | |
#endif | |
template <typename SuperType, typename ReturnType, typename... Params> | |
inline ReturnType ObjCMsgSendSuper (id self, SEL sel, Params... params) | |
{ | |
using SuperFn = ReturnType (*) (struct objc_super*, SEL, Params...); | |
const auto fn = reinterpret_cast<SuperFn> (MetaSuperFn<ReturnType>::value); | |
objc_super s = { self, [SuperType class] }; | |
return fn (&s, sel, params...); | |
} | |
//============================================================================== | |
struct NSObjectDeleter | |
{ | |
void operator() (NSObject* object) const noexcept | |
{ | |
if (object != nullptr) | |
[object release]; | |
} | |
}; | |
template <typename NSType> | |
using NSUniquePtr = std::unique_ptr<NSType, NSObjectDeleter>; | |
/* This has very similar semantics to NSUniquePtr, with the main difference that it doesn't | |
automatically add a pointer to the managed type. This makes it possible to declare | |
scoped handles to id or block types. | |
*/ | |
template <typename T> | |
class ObjCObjectHandle | |
{ | |
public: | |
ObjCObjectHandle() = default; | |
// Note that this does *not* retain the argument. | |
explicit ObjCObjectHandle (T ptr) : item (ptr) {} | |
~ObjCObjectHandle() noexcept { reset(); } | |
ObjCObjectHandle (const ObjCObjectHandle& other) | |
: item (other.item) | |
{ | |
if (item != nullptr) | |
[item retain]; | |
} | |
ObjCObjectHandle& operator= (const ObjCObjectHandle& other) | |
{ | |
auto copy = other; | |
swap (copy); | |
return *this; | |
} | |
ObjCObjectHandle (ObjCObjectHandle&& other) noexcept { swap (other); } | |
ObjCObjectHandle& operator= (ObjCObjectHandle&& other) noexcept | |
{ | |
reset(); | |
swap (other); | |
return *this; | |
} | |
// Note that this does *not* retain the argument. | |
void reset (T ptr) { *this = ObjCObjectHandle { ptr }; } | |
T get() const { return item; } | |
void reset() | |
{ | |
if (item != nullptr) | |
[item release]; | |
item = {}; | |
} | |
bool operator== (const ObjCObjectHandle& other) const { return item == other.item; } | |
bool operator!= (const ObjCObjectHandle& other) const { return ! (*this == other); } | |
bool operator== (std::nullptr_t) const { return item == nullptr; } | |
bool operator!= (std::nullptr_t) const { return ! (*this == nullptr); } | |
private: | |
void swap (ObjCObjectHandle& other) noexcept { std::swap (other.item, item); } | |
T item{}; | |
}; | |
//============================================================================== | |
namespace detail | |
{ | |
constexpr auto makeCompileTimeStr() | |
{ | |
return std::array<char, 1> { { '\0' } }; | |
} | |
template <typename A, size_t... As, typename B, size_t... Bs> | |
constexpr auto joinCompileTimeStrImpl (A&& a, std::index_sequence<As...>, | |
B&& b, std::index_sequence<Bs...>) | |
{ | |
return std::array<char, sizeof... (As) + sizeof... (Bs) + 1> { { a[As]..., b[Bs]..., '\0' } }; | |
} | |
template <size_t A, size_t B> | |
constexpr auto joinCompileTimeStr (const char (&a)[A], std::array<char, B> b) | |
{ | |
return joinCompileTimeStrImpl (a, std::make_index_sequence<A - 1>(), | |
b, std::make_index_sequence<B - 1>()); | |
} | |
template <size_t A, typename... Others> | |
constexpr auto makeCompileTimeStr (const char (&v)[A], Others&&... others) | |
{ | |
return joinCompileTimeStr (v, makeCompileTimeStr (others...)); | |
} | |
template <typename Functor, typename Return, typename... Args> | |
static constexpr auto toFnPtr (Functor functor, Return (Functor::*) (Args...) const) | |
{ | |
return static_cast<Return (*) (Args...)> (functor); | |
} | |
template <typename Functor> | |
static constexpr auto toFnPtr (Functor functor) { return toFnPtr (functor, &Functor::operator()); } | |
} // namespace detail | |
//============================================================================== | |
template <typename Type> | |
inline Type getIvar (id self, const char* name) | |
{ | |
void* v = nullptr; | |
object_getInstanceVariable (self, name, &v); | |
return static_cast<Type> (v); | |
} | |
template <typename SuperclassType> | |
struct ObjCClass | |
{ | |
ObjCClass (const char* nameRoot) | |
: cls (objc_allocateClassPair ([SuperclassType class], getRandomisedName (nameRoot).toUTF8(), 0)) | |
{ | |
} | |
~ObjCClass() | |
{ | |
auto kvoSubclassName = String ("NSKVONotifying_") + class_getName (cls); | |
if (objc_getClass (kvoSubclassName.toUTF8()) == nullptr) | |
objc_disposeClassPair (cls); | |
} | |
void registerClass() | |
{ | |
objc_registerClassPair (cls); | |
} | |
SuperclassType* createInstance() const | |
{ | |
return class_createInstance (cls, 0); | |
} | |
template <typename Type> | |
void addIvar (const char* name) | |
{ | |
BOOL b = class_addIvar (cls, name, sizeof (Type), (uint8_t) rint (log2 (sizeof (Type))), @encode (Type)); | |
jassert (b); ignoreUnused (b); | |
} | |
template <typename Fn> | |
void addMethod (SEL selector, Fn callbackFn) { addMethod (selector, detail::toFnPtr (callbackFn)); } | |
template <typename Result, typename... Args> | |
void addMethod (SEL selector, Result (*callbackFn) (id, SEL, Args...)) | |
{ | |
const auto s = detail::makeCompileTimeStr (@encode (Result), @encode (id), @encode (SEL), @encode (Args)...); | |
const auto b = class_addMethod (cls, selector, (IMP) callbackFn, s.data()); | |
jassertquiet (b); | |
} | |
void addProtocol (Protocol* protocol) | |
{ | |
BOOL b = class_addProtocol (cls, protocol); | |
jassert (b); ignoreUnused (b); | |
} | |
template <typename ReturnType, typename... Params> | |
static ReturnType sendSuperclassMessage (id self, SEL sel, Params... params) | |
{ | |
return ObjCMsgSendSuper<SuperclassType, ReturnType, Params...> (self, sel, params...); | |
} | |
Class cls; | |
private: | |
static String getRandomisedName (const char* root) | |
{ | |
return root + String::toHexString (juce::Random::getSystemRandom().nextInt64()); | |
} | |
JUCE_DECLARE_NON_COPYABLE (ObjCClass) | |
}; | |
//============================================================================== | |
#ifndef DOXYGEN | |
template <class JuceClass> | |
struct ObjCLifetimeManagedClass : public ObjCClass<NSObject> | |
{ | |
ObjCLifetimeManagedClass() | |
: ObjCClass<NSObject> ("ObjCLifetimeManagedClass_") | |
{ | |
addIvar<JuceClass*> ("cppObject"); | |
JUCE_BEGIN_IGNORE_WARNINGS_GCC_LIKE ("-Wundeclared-selector") | |
addMethod (@selector (initWithJuceObject:), initWithJuceObject); | |
JUCE_END_IGNORE_WARNINGS_GCC_LIKE | |
addMethod (@selector (dealloc), dealloc); | |
registerClass(); | |
} | |
static id initWithJuceObject (id _self, SEL, JuceClass* obj) | |
{ | |
NSObject* self = sendSuperclassMessage<NSObject*> (_self, @selector (init)); | |
object_setInstanceVariable (self, "cppObject", obj); | |
return self; | |
} | |
static void dealloc (id _self, SEL) | |
{ | |
if (auto* obj = getIvar<JuceClass*> (_self, "cppObject")) | |
{ | |
delete obj; | |
object_setInstanceVariable (_self, "cppObject", nullptr); | |
} | |
sendSuperclassMessage<void> (_self, @selector (dealloc)); | |
} | |
static ObjCLifetimeManagedClass objCLifetimeManagedClass; | |
}; | |
template <typename Class> | |
ObjCLifetimeManagedClass<Class> ObjCLifetimeManagedClass<Class>::objCLifetimeManagedClass; | |
#endif | |
// this will return an NSObject which takes ownership of the JUCE instance passed-in | |
// This is useful to tie the life-time of a juce instance to the life-time of an NSObject | |
template <typename Class> | |
NSObject* createNSObjectFromJuceClass (Class* obj) | |
{ | |
JUCE_BEGIN_IGNORE_WARNINGS_GCC_LIKE ("-Wobjc-method-access") | |
return [ObjCLifetimeManagedClass<Class>::objCLifetimeManagedClass.createInstance() initWithJuceObject:obj]; | |
JUCE_END_IGNORE_WARNINGS_GCC_LIKE | |
} | |
// Get the JUCE class instance that was tied to the life-time of an NSObject with the | |
// function above | |
template <typename Class> | |
Class* getJuceClassFromNSObject (NSObject* obj) | |
{ | |
return obj != nullptr ? getIvar<Class*> (obj, "cppObject") : nullptr; | |
} | |
namespace detail | |
{ | |
template <typename> struct Signature; | |
template <typename R, typename... A> struct Signature<R (A...)> {}; | |
template <typename Class, typename Result, typename... Args> | |
constexpr auto getSignature (Result (Class::*) (Args...)) { return Signature<Result (Args...)>{}; } | |
template <typename Class, typename Result, typename... Args> | |
constexpr auto getSignature (Result (Class::*) (Args...) const) { return Signature<Result (Args...)>{}; } | |
template <typename Class, typename Fn, typename Result, typename... Params> | |
auto createObjCBlockImpl (Class* object, Fn func, Signature<Result (Params...)>) | |
{ | |
__block auto _this = object; | |
__block auto _func = func; | |
return [[^Result (Params... params) { return (_this->*_func) (params...); } copy] autorelease]; | |
} | |
template <typename Class, typename Fn, typename Result, typename... Params> | |
auto createObjCBlockImplCopy (Class* object, Fn func, Signature<Result (Params...)>) | |
{ | |
__block auto _this = object; | |
__block auto _func = func; | |
return [^Result (Params... params) { return (_this->*_func) (params...); } copy]; | |
} | |
} // namespace detail | |
template <typename Class, typename MemberFunc> | |
auto CreateObjCBlock (Class* object, MemberFunc fn) | |
{ | |
return detail::createObjCBlockImpl (object, fn, detail::getSignature (fn)); | |
} | |
template <typename Class, typename MemberFunc> | |
auto CreateObjCBlockCopy (Class* object, MemberFunc fn) | |
{ | |
return detail::createObjCBlockImplCopy (object, fn, detail::getSignature (fn)); | |
} | |
template <typename BlockType> | |
class ObjCBlock | |
{ | |
public: | |
ObjCBlock() { block = nullptr; } | |
template <typename R, class C, typename... P> | |
ObjCBlock (C* _this, R (C::*fn)(P...)) : block (CreateObjCBlock (_this, fn)) {} | |
ObjCBlock (BlockType b) : block ([b copy]) {} | |
ObjCBlock& operator= (const BlockType& other) { if (block != nullptr) { [block release]; } block = [other copy]; return *this; } | |
bool operator== (const void* ptr) const { return ((const void*) block == ptr); } | |
bool operator!= (const void* ptr) const { return ((const void*) block != ptr); } | |
~ObjCBlock() { if (block != nullptr) [block release]; } | |
operator BlockType() const { return block; } | |
private: | |
BlockType block; | |
}; | |
//============================================================================== | |
class ScopedNotificationCenterObserver | |
{ | |
public: | |
ScopedNotificationCenterObserver() = default; | |
ScopedNotificationCenterObserver (id observerIn, SEL selector, NSNotificationName nameIn, id objectIn) | |
: observer (observerIn), name (nameIn), object (objectIn) | |
{ | |
[[NSNotificationCenter defaultCenter] addObserver: observer | |
selector: selector | |
name: name | |
object: object]; | |
} | |
~ScopedNotificationCenterObserver() | |
{ | |
if (observer != nullptr && name != nullptr) | |
{ | |
[[NSNotificationCenter defaultCenter] removeObserver: observer | |
name: name | |
object: object]; | |
} | |
} | |
ScopedNotificationCenterObserver (ScopedNotificationCenterObserver&& other) noexcept | |
{ | |
swap (other); | |
} | |
ScopedNotificationCenterObserver& operator= (ScopedNotificationCenterObserver&& other) noexcept | |
{ | |
auto moved = std::move (other); | |
swap (moved); | |
return *this; | |
} | |
ScopedNotificationCenterObserver (const ScopedNotificationCenterObserver&) = delete; | |
ScopedNotificationCenterObserver& operator= (const ScopedNotificationCenterObserver&) = delete; | |
private: | |
void swap (ScopedNotificationCenterObserver& other) noexcept | |
{ | |
std::swap (other.observer, observer); | |
std::swap (other.name, name); | |
std::swap (other.object, object); | |
} | |
id observer = nullptr; | |
NSNotificationName name = nullptr; | |
id object = nullptr; | |
}; | |
} // namespace juce |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Note - main changes are in class AudioProcessorHolder (this file is based on JUCE 7.0.3) | |
// also see 'renderContextObserverCallback' and 'case kAudioUnitProperty_RenderContextObserver:' | |
/* | |
============================================================================== | |
This file is part of the JUCE library. | |
Copyright (c) 2022 - Raw Material Software Limited | |
JUCE is an open source library subject to commercial or open-source | |
licensing. | |
By using JUCE, you agree to the terms of both the JUCE 7 End-User License | |
Agreement and JUCE Privacy Policy. | |
End User License Agreement: www.juce.com/juce-7-licence | |
Privacy Policy: www.juce.com/juce-privacy-policy | |
Or: You may also use this code under the terms of the GPL v3 (see | |
www.gnu.org/licenses). | |
JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER | |
EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE | |
DISCLAIMED. | |
============================================================================== | |
*/ | |
#include <juce_core/system/juce_TargetPlatform.h> | |
#include <juce_core/system/juce_CompilerWarnings.h> | |
#include "../utility/juce_CheckSettingMacros.h" | |
#if JucePlugin_Build_AU | |
JUCE_BEGIN_IGNORE_WARNINGS_GCC_LIKE ("-Wshorten-64-to-32", | |
"-Wunused-parameter", | |
"-Wdeprecated-declarations", | |
"-Wsign-conversion", | |
"-Wconversion", | |
"-Woverloaded-virtual", | |
"-Wextra-semi", | |
"-Wcast-align", | |
"-Wshadow", | |
"-Wswitch-enum", | |
"-Wzero-as-null-pointer-constant", | |
"-Wnullable-to-nonnull-conversion", | |
"-Wgnu-zero-variadic-macro-arguments", | |
"-Wformat-pedantic", | |
"-Wdeprecated-anon-enum-enum-conversion") | |
#include "../utility/juce_IncludeSystemHeaders.h" | |
#include <AudioUnit/AUCocoaUIView.h> | |
#include <AudioUnit/AudioUnit.h> | |
#include <AudioToolbox/AudioUnitUtilities.h> | |
#include <CoreMIDI/MIDIServices.h> | |
#include <QuartzCore/QuartzCore.h> | |
#include "AudioUnitSDK/MusicDeviceBase.h" | |
JUCE_END_IGNORE_WARNINGS_GCC_LIKE | |
#define JUCE_CORE_INCLUDE_OBJC_HELPERS 1 | |
#include "../utility/juce_IncludeModuleHeaders.h" | |
#include <juce_audio_basics/native/juce_mac_CoreAudioLayouts.h> | |
#include <juce_audio_basics/native/juce_mac_CoreAudioTimeConversions.h> | |
#include <juce_audio_processors/format_types/juce_LegacyAudioParameter.cpp> | |
#include <juce_audio_processors/format_types/juce_AU_Shared.h> | |
#if JucePlugin_Enable_ARA | |
#include <juce_audio_processors/utilities/ARA/juce_AudioProcessor_ARAExtensions.h> | |
#include <ARA_API/ARAAudioUnit.h> | |
#if ARA_SUPPORT_VERSION_1 | |
#error "Unsupported ARA version - only ARA version 2 and onward are supported by the current JUCE ARA implementation" | |
#endif | |
#endif | |
#include <set> | |
//============================================================================== | |
using namespace juce; | |
static Array<void*> activePlugins, activeUIs; | |
static const AudioUnitPropertyID juceFilterObjectPropertyID = 0x1a45ffe9; | |
template <> struct ContainerDeletePolicy<const __CFString> { static void destroy (const __CFString* o) { if (o != nullptr) CFRelease (o); } }; | |
// make sure the audio processor is initialized before the AUBase class | |
class AudioProcessorHolder | |
{ | |
public: | |
AudioProcessorHolder (bool initialiseGUI) | |
{ | |
if (initialiseGUI) | |
initialiseJuce_GUI(); | |
juceFilter.reset (createPluginFilterOfType (AudioProcessor::wrapperType_AudioUnit)); | |
// audio units do not have a notion of enabled or un-enabled buses | |
juceFilter->enableAllBuses(); | |
} | |
~AudioProcessorHolder() | |
{ | |
ScopedLock lock(threadWorkGroupRegistrationLock); | |
// for unlikely event threads took too long to exit... this additional workgroup removal loop will avoid issues | |
std::list<std::pair<void*, WorkgroupAndToken>>::iterator it; | |
for(it = threadTokenList.begin(); it!=threadTokenList.end() ; it++) | |
{ | |
if (@available(macOS 11.0, *)) | |
{ | |
os_workgroup_leave(it->second.workgroup, it->second.token); | |
} | |
else | |
{ | |
// Fallback on earlier versions | |
} | |
} | |
threadTokenList.clear(); | |
} | |
void osWorkgroupChangeNotifyFilter(os_workgroup_t workgroup) | |
// is it likely the workgroup will change after the plugin is loaded? | |
// event could maybe be triggered from hardware device change? | |
// in theory selecting a different interface causes prepare to play which should trigger respawn of my synth layers and consequent threads kill/workgroup exits triggered | |
{ | |
ScopedLock lock(threadWorkGroupRegistrationLock); | |
currentWorkgroup = workgroup; // this is set after the audio unit is loaded. | |
juceFilter->setWorkgroupJoinFunctionPointer([=](void* threadId) | |
{ | |
ScopedLock lock(threadWorkGroupRegistrationLock); | |
joinCurrentAuWorkGroup(threadId); | |
}); | |
juceFilter->setWorkgroupLeaveFunctionPointer([=](void* threadId) | |
{ | |
ScopedLock lock(threadWorkGroupRegistrationLock); | |
leaveCurrentAuWorkGroup(threadId); | |
}); | |
} | |
void nullWorkgroupEvent() | |
{ | |
// may need todo.. not sure.. | |
} | |
std::unique_ptr<AudioProcessor> juceFilter; | |
int joinCurrentAuWorkGroup(void* threadId) // call from thread at run, before while loop | |
{ | |
// Join this thread to the workgroup. | |
if (@available(macOS 11.0, *)) | |
{ | |
thread_local os_workgroup_join_token_s joinToken{}; | |
const int result = os_workgroup_join(currentWorkgroup, &joinToken); | |
AudioProcessorHolder::WorkgroupAndToken toStore = {currentWorkgroup , &joinToken}; | |
threadTokenList.emplace_back(threadId, toStore); | |
if (result == 0) | |
{ | |
return 1;// Success. | |
} | |
else if (result == EALREADY) | |
{ | |
// The thread is already part of a workgroup that can't be | |
// nested in the the specified workgroup. | |
return -1; | |
} | |
else if (result == EINVAL) | |
{ | |
// The workgroup has been canceled. | |
return -1; | |
} | |
} | |
else | |
{ | |
// Fallback on earlier versions | |
} | |
return -1; | |
} | |
void leaveCurrentAuWorkGroup(void* threadId) // Before exiting the thread, leave the workgroup. | |
{ | |
//os_workgroup_t workgroup, os_workgroup_join_token_t joinToken - get from list/pair | |
std::list<std::pair<void*, WorkgroupAndToken>>::iterator it; | |
for(it = threadTokenList.begin(); it!=threadTokenList.end() ; it++) | |
{ | |
if(it->first == threadId) | |
{ | |
if (@available(macOS 11.0, *)) | |
{ | |
os_workgroup_leave(it->second.workgroup, it->second.token); | |
break; | |
} else | |
{ | |
// Fallback on earlier versions | |
} | |
} | |
} | |
threadTokenList.erase(it); | |
} | |
private: | |
os_workgroup_t currentWorkgroup; | |
CriticalSection threadWorkGroupRegistrationLock; | |
class WorkgroupAndToken | |
{ | |
public: | |
os_workgroup_t workgroup; | |
os_workgroup_join_token_t token; | |
}; | |
std::list<std::pair<void*, WorkgroupAndToken>> threadTokenList; | |
}; | |
//============================================================================== | |
class JuceAU : public AudioProcessorHolder, | |
public ausdk::MusicDeviceBase, | |
public AudioProcessorListener, | |
public AudioProcessorParameter::Listener | |
{ | |
public: | |
JuceAU (AudioUnit component) | |
: AudioProcessorHolder (activePlugins.size() + activeUIs.size() == 0), | |
MusicDeviceBase (component, | |
(UInt32) AudioUnitHelpers::getBusCountForWrapper (*juceFilter, true), | |
(UInt32) AudioUnitHelpers::getBusCountForWrapper (*juceFilter, false)) | |
{ | |
inParameterChangedCallback = false; | |
#ifdef JucePlugin_PreferredChannelConfigurations | |
short configs[][2] = {JucePlugin_PreferredChannelConfigurations}; | |
const int numConfigs = sizeof (configs) / sizeof (short[2]); | |
jassert (numConfigs > 0 && (configs[0][0] > 0 || configs[0][1] > 0)); | |
juceFilter->setPlayConfigDetails (configs[0][0], configs[0][1], 44100.0, 1024); | |
for (int i = 0; i < numConfigs; ++i) | |
{ | |
AUChannelInfo info; | |
info.inChannels = configs[i][0]; | |
info.outChannels = configs[i][1]; | |
channelInfo.add (info); | |
} | |
#else | |
channelInfo = AudioUnitHelpers::getAUChannelInfo (*juceFilter); | |
#endif | |
AddPropertyListener (kAudioUnitProperty_ContextName, auPropertyListenerDispatcher, this); | |
totalInChannels = juceFilter->getTotalNumInputChannels(); | |
totalOutChannels = juceFilter->getTotalNumOutputChannels(); | |
juceFilter->addListener (this); | |
addParameters(); | |
activePlugins.add (this); | |
zerostruct (auEvent); | |
auEvent.mArgument.mParameter.mAudioUnit = GetComponentInstance(); | |
auEvent.mArgument.mParameter.mScope = kAudioUnitScope_Global; | |
auEvent.mArgument.mParameter.mElement = 0; | |
zerostruct (midiCallback); | |
CreateElements(); | |
if (syncAudioUnitWithProcessor() != noErr) | |
jassertfalse; | |
} | |
~JuceAU() override | |
{ | |
if (bypassParam != nullptr) | |
bypassParam->removeListener (this); | |
deleteActiveEditors(); | |
juceFilter = nullptr; | |
clearPresetsArray(); | |
jassert (activePlugins.contains (this)); | |
activePlugins.removeFirstMatchingValue (this); | |
if (activePlugins.size() + activeUIs.size() == 0) | |
shutdownJuce_GUI(); | |
} | |
//============================================================================== | |
ComponentResult Initialize() override | |
{ | |
ComponentResult err; | |
if ((err = syncProcessorWithAudioUnit()) != noErr) | |
return err; | |
if ((err = MusicDeviceBase::Initialize()) != noErr) | |
return err; | |
mapper.alloc (*juceFilter); | |
pulledSucceeded.calloc (static_cast<size_t> (AudioUnitHelpers::getBusCountForWrapper (*juceFilter, true))); | |
prepareToPlay(); | |
return noErr; | |
} | |
void Cleanup() override | |
{ | |
MusicDeviceBase::Cleanup(); | |
pulledSucceeded.free(); | |
mapper.release(); | |
if (juceFilter != nullptr) | |
juceFilter->releaseResources(); | |
audioBuffer.release(); | |
midiEvents.clear(); | |
incomingEvents.clear(); | |
prepared = false; | |
} | |
ComponentResult Reset (AudioUnitScope inScope, AudioUnitElement inElement) override | |
{ | |
if (! prepared) | |
prepareToPlay(); | |
if (juceFilter != nullptr) | |
juceFilter->reset(); | |
return MusicDeviceBase::Reset (inScope, inElement); | |
} | |
//============================================================================== | |
void prepareToPlay() | |
{ | |
if (juceFilter != nullptr) | |
{ | |
juceFilter->setRateAndBufferSizeDetails (getSampleRate(), (int) GetMaxFramesPerSlice()); | |
audioBuffer.prepare (AudioUnitHelpers::getBusesLayout (juceFilter.get()), (int) GetMaxFramesPerSlice() + 32); | |
juceFilter->prepareToPlay (getSampleRate(), (int) GetMaxFramesPerSlice()); | |
midiEvents.ensureSize (2048); | |
midiEvents.clear(); | |
incomingEvents.ensureSize (2048); | |
incomingEvents.clear(); | |
prepared = true; | |
} | |
} | |
//============================================================================== | |
bool BusCountWritable (AudioUnitScope scope) override | |
{ | |
#ifdef JucePlugin_PreferredChannelConfigurations | |
ignoreUnused (scope); | |
return false; | |
#else | |
bool isInput; | |
if (scopeToDirection (scope, isInput) != noErr) | |
return false; | |
#if JucePlugin_IsMidiEffect | |
return false; | |
#elif JucePlugin_IsSynth | |
if (isInput) return false; | |
#endif | |
const int busCount = AudioUnitHelpers::getBusCount (*juceFilter, isInput); | |
return (juceFilter->canAddBus (isInput) || (busCount > 0 && juceFilter->canRemoveBus (isInput))); | |
#endif | |
} | |
OSStatus SetBusCount (AudioUnitScope scope, UInt32 count) override | |
{ | |
OSStatus err = noErr; | |
bool isInput; | |
if ((err = scopeToDirection (scope, isInput)) != noErr) | |
return err; | |
if (count != (UInt32) AudioUnitHelpers::getBusCount (*juceFilter, isInput)) | |
{ | |
#ifdef JucePlugin_PreferredChannelConfigurations | |
return kAudioUnitErr_PropertyNotWritable; | |
#else | |
const int busCount = AudioUnitHelpers::getBusCount (*juceFilter, isInput); | |
if ((! juceFilter->canAddBus (isInput)) && ((busCount == 0) || (! juceFilter->canRemoveBus (isInput)))) | |
return kAudioUnitErr_PropertyNotWritable; | |
// we need to already create the underlying elements so that we can change their formats | |
err = MusicDeviceBase::SetBusCount (scope, count); | |
if (err != noErr) | |
return err; | |
// however we do need to update the format tag: we need to do the same thing in SetFormat, for example | |
const int requestedNumBus = static_cast<int> (count); | |
{ | |
(isInput ? currentInputLayout : currentOutputLayout).resize (requestedNumBus); | |
int busNr; | |
for (busNr = (busCount - 1); busNr != (requestedNumBus - 1); busNr += (requestedNumBus > busCount ? 1 : -1)) | |
{ | |
if (requestedNumBus > busCount) | |
{ | |
if (! juceFilter->addBus (isInput)) | |
break; | |
err = syncAudioUnitWithChannelSet (isInput, busNr, | |
juceFilter->getBus (isInput, busNr + 1)->getDefaultLayout()); | |
if (err != noErr) | |
break; | |
} | |
else | |
{ | |
if (! juceFilter->removeBus (isInput)) | |
break; | |
} | |
} | |
err = (busNr == (requestedNumBus - 1) ? (OSStatus) noErr : (OSStatus) kAudioUnitErr_FormatNotSupported); | |
} | |
// was there an error? | |
if (err != noErr) | |
{ | |
// restore bus state | |
const int newBusCount = AudioUnitHelpers::getBusCount (*juceFilter, isInput); | |
for (int i = newBusCount; i != busCount; i += (busCount > newBusCount ? 1 : -1)) | |
{ | |
if (busCount > newBusCount) | |
juceFilter->addBus (isInput); | |
else | |
juceFilter->removeBus (isInput); | |
} | |
(isInput ? currentInputLayout : currentOutputLayout).resize (busCount); | |
MusicDeviceBase::SetBusCount (scope, static_cast<UInt32> (busCount)); | |
return kAudioUnitErr_FormatNotSupported; | |
} | |
// update total channel count | |
totalInChannels = juceFilter->getTotalNumInputChannels(); | |
totalOutChannels = juceFilter->getTotalNumOutputChannels(); | |
addSupportedLayoutTagsForDirection (isInput); | |
if (err != noErr) | |
return err; | |
#endif | |
} | |
return noErr; | |
} | |
UInt32 SupportedNumChannels (const AUChannelInfo** outInfo) override | |
{ | |
if (outInfo != nullptr) | |
*outInfo = channelInfo.getRawDataPointer(); | |
return (UInt32) channelInfo.size(); | |
} | |
//============================================================================== | |
ComponentResult GetPropertyInfo (AudioUnitPropertyID inID, | |
AudioUnitScope inScope, | |
AudioUnitElement inElement, | |
UInt32& outDataSize, | |
bool& outWritable) override | |
{ | |
if (inScope == kAudioUnitScope_Global) | |
{ | |
switch (inID) | |
{ | |
case kAudioUnitProperty_RenderContextObserver: | |
outWritable = false; | |
outDataSize = sizeof(AURenderContextObserver); | |
return noErr; | |
case juceFilterObjectPropertyID: | |
outWritable = false; | |
outDataSize = sizeof (void*) * 2; | |
return noErr; | |
case kAudioUnitProperty_OfflineRender: | |
outWritable = true; | |
outDataSize = sizeof (UInt32); | |
return noErr; | |
case kMusicDeviceProperty_InstrumentCount: | |
outDataSize = sizeof (UInt32); | |
outWritable = false; | |
return noErr; | |
case kAudioUnitProperty_CocoaUI: | |
outDataSize = sizeof (AudioUnitCocoaViewInfo); | |
outWritable = true; | |
return noErr; | |
#if JucePlugin_ProducesMidiOutput || JucePlugin_IsMidiEffect | |
case kAudioUnitProperty_MIDIOutputCallbackInfo: | |
outDataSize = sizeof (CFArrayRef); | |
outWritable = false; | |
return noErr; | |
case kAudioUnitProperty_MIDIOutputCallback: | |
outDataSize = sizeof (AUMIDIOutputCallbackStruct); | |
outWritable = true; | |
return noErr; | |
#endif | |
case kAudioUnitProperty_ParameterStringFromValue: | |
outDataSize = sizeof (AudioUnitParameterStringFromValue); | |
outWritable = false; | |
return noErr; | |
case kAudioUnitProperty_ParameterValueFromString: | |
outDataSize = sizeof (AudioUnitParameterValueFromString); | |
outWritable = false; | |
return noErr; | |
case kAudioUnitProperty_BypassEffect: | |
outDataSize = sizeof (UInt32); | |
outWritable = true; | |
return noErr; | |
case kAudioUnitProperty_SupportsMPE: | |
outDataSize = sizeof (UInt32); | |
outWritable = false; | |
return noErr; | |
#if JucePlugin_Enable_ARA | |
case ARA::kAudioUnitProperty_ARAFactory: | |
outWritable = false; | |
outDataSize = sizeof (ARA::ARAAudioUnitFactory); | |
return noErr; | |
case ARA::kAudioUnitProperty_ARAPlugInExtensionBindingWithRoles: | |
outWritable = false; | |
outDataSize = sizeof (ARA::ARAAudioUnitPlugInExtensionBinding); | |
return noErr; | |
#endif | |
default: break; | |
} | |
} | |
return MusicDeviceBase::GetPropertyInfo (inID, inScope, inElement, outDataSize, outWritable); | |
} | |
void renderContextObserverCallback(const AudioUnitRenderContext *context) | |
{ | |
if (context) | |
{ | |
osWorkgroupChangeNotifyFilter(context->workgroup); | |
} | |
else | |
{ | |
/** | |
The new workgroup may be null in the case of a nonreal-time | |
render context, or a real-time thread that is not part of any | |
workgroup. | |
*/ | |
nullWorkgroupEvent(); | |
} | |
} | |
ComponentResult GetProperty (AudioUnitPropertyID inID, | |
AudioUnitScope inScope, | |
AudioUnitElement inElement, | |
void* outData) override | |
{ | |
if (inScope == kAudioUnitScope_Global) | |
{ | |
switch (inID) | |
{ | |
case kAudioUnitProperty_RenderContextObserver: | |
{ | |
if(auto *auRenderContextObserver = (AURenderContextObserver*)outData) | |
{ | |
*auRenderContextObserver = CreateObjCBlockCopy(this, &JuceAU::renderContextObserverCallback); | |
return noErr; | |
} | |
break; | |
} | |
case kAudioUnitProperty_ParameterClumpName: | |
if (auto* clumpNameInfo = (AudioUnitParameterNameInfo*) outData) | |
{ | |
if (juceFilter != nullptr) | |
{ | |
auto clumpIndex = clumpNameInfo->inID - 1; | |
const auto* group = parameterGroups[(int) clumpIndex]; | |
auto name = group->getName(); | |
while (group->getParent() != &juceFilter->getParameterTree()) | |
{ | |
group = group->getParent(); | |
name = group->getName() + group->getSeparator() + name; | |
} | |
clumpNameInfo->outName = name.toCFString(); | |
return noErr; | |
} | |
} | |
// Failed to find a group corresponding to the clump ID. | |
jassertfalse; | |
break; | |
//============================================================================== | |
#if JucePlugin_Enable_ARA | |
case ARA::kAudioUnitProperty_ARAFactory: | |
{ | |
auto auFactory = static_cast<ARA::ARAAudioUnitFactory*> (outData); | |
if (auFactory->inOutMagicNumber != ARA::kARAAudioUnitMagic) | |
return kAudioUnitErr_InvalidProperty; // if the magic value isn't found, the property ID is re-used outside the ARA context with different, unsupported sematics | |
auFactory->outFactory = createARAFactory(); | |
return noErr; | |
} | |
case ARA::kAudioUnitProperty_ARAPlugInExtensionBindingWithRoles: | |
{ | |
auto binding = static_cast<ARA::ARAAudioUnitPlugInExtensionBinding*> (outData); | |
if (binding->inOutMagicNumber != ARA::kARAAudioUnitMagic) | |
return kAudioUnitErr_InvalidProperty; // if the magic value isn't found, the property ID is re-used outside the ARA context with different, unsupported sematics | |
AudioProcessorARAExtension* araAudioProcessorExtension = dynamic_cast<AudioProcessorARAExtension*> (juceFilter.get()); | |
binding->outPlugInExtension = araAudioProcessorExtension->bindToARA (binding->inDocumentControllerRef, binding->knownRoles, binding->assignedRoles); | |
if (binding->outPlugInExtension == nullptr) | |
return kAudioUnitErr_CannotDoInCurrentContext; // bindToARA() returns null if binding is already established | |
return noErr; | |
} | |
#endif | |
case juceFilterObjectPropertyID: | |
((void**) outData)[0] = (void*) static_cast<AudioProcessor*> (juceFilter.get()); | |
((void**) outData)[1] = (void*) this; | |
return noErr; | |
case kAudioUnitProperty_OfflineRender: | |
*(UInt32*) outData = (juceFilter != nullptr && juceFilter->isNonRealtime()) ? 1 : 0; | |
return noErr; | |
case kMusicDeviceProperty_InstrumentCount: | |
*(UInt32*) outData = 1; | |
return noErr; | |
case kAudioUnitProperty_BypassEffect: | |
if (bypassParam != nullptr) | |
*(UInt32*) outData = (bypassParam->getValue() != 0.0f ? 1 : 0); | |
else | |
*(UInt32*) outData = isBypassed ? 1 : 0; | |
return noErr; | |
case kAudioUnitProperty_SupportsMPE: | |
*(UInt32*) outData = (juceFilter != nullptr && juceFilter->supportsMPE()) ? 1 : 0; | |
return noErr; | |
case kAudioUnitProperty_CocoaUI: | |
{ | |
JUCE_AUTORELEASEPOOL | |
{ | |
static JuceUICreationClass cls; | |
// (NB: this may be the host's bundle, not necessarily the component's) | |
NSBundle* bundle = [NSBundle bundleForClass: cls.cls]; | |
AudioUnitCocoaViewInfo* info = static_cast<AudioUnitCocoaViewInfo*> (outData); | |
info->mCocoaAUViewClass[0] = (CFStringRef) [juceStringToNS (class_getName (cls.cls)) retain]; | |
info->mCocoaAUViewBundleLocation = (CFURLRef) [[NSURL fileURLWithPath: [bundle bundlePath]] retain]; | |
} | |
return noErr; | |
} | |
break; | |
#if JucePlugin_ProducesMidiOutput || JucePlugin_IsMidiEffect | |
case kAudioUnitProperty_MIDIOutputCallbackInfo: | |
{ | |
CFStringRef strs[1]; | |
strs[0] = CFSTR ("MIDI Callback"); | |
CFArrayRef callbackArray = CFArrayCreate (nullptr, (const void**) strs, 1, &kCFTypeArrayCallBacks); | |
*(CFArrayRef*) outData = callbackArray; | |
return noErr; | |
} | |
#endif | |
case kAudioUnitProperty_ParameterValueFromString: | |
{ | |
if (AudioUnitParameterValueFromString* pv = (AudioUnitParameterValueFromString*) outData) | |
{ | |
if (juceFilter != nullptr) | |
{ | |
if (auto* param = getParameterForAUParameterID (pv->inParamID)) | |
{ | |
const String text (String::fromCFString (pv->inString)); | |
if (LegacyAudioParameter::isLegacy (param)) | |
pv->outValue = text.getFloatValue(); | |
else | |
pv->outValue = param->getValueForText (text) * getMaximumParameterValue (param); | |
return noErr; | |
} | |
} | |
} | |
} | |
break; | |
case kAudioUnitProperty_ParameterStringFromValue: | |
{ | |
if (AudioUnitParameterStringFromValue* pv = (AudioUnitParameterStringFromValue*) outData) | |
{ | |
if (juceFilter != nullptr) | |
{ | |
if (auto* param = getParameterForAUParameterID (pv->inParamID)) | |
{ | |
const float value = (float) *(pv->inValue); | |
String text; | |
if (LegacyAudioParameter::isLegacy (param)) | |
text = String (value); | |
else | |
text = param->getText (value / getMaximumParameterValue (param), 0); | |
pv->outString = text.toCFString(); | |
return noErr; | |
} | |
} | |
} | |
} | |
break; | |
default: | |
break; | |
} | |
} | |
return MusicDeviceBase::GetProperty (inID, inScope, inElement, outData); | |
} | |
ComponentResult SetProperty (AudioUnitPropertyID inID, | |
AudioUnitScope inScope, | |
AudioUnitElement inElement, | |
const void* inData, | |
UInt32 inDataSize) override | |
{ | |
if (inScope == kAudioUnitScope_Global) | |
{ | |
switch (inID) | |
{ | |
#if JucePlugin_ProducesMidiOutput || JucePlugin_IsMidiEffect | |
case kAudioUnitProperty_MIDIOutputCallback: | |
if (inDataSize < sizeof (AUMIDIOutputCallbackStruct)) | |
return kAudioUnitErr_InvalidPropertyValue; | |
if (AUMIDIOutputCallbackStruct* callbackStruct = (AUMIDIOutputCallbackStruct*) inData) | |
midiCallback = *callbackStruct; | |
return noErr; | |
#endif | |
case kAudioUnitProperty_BypassEffect: | |
{ | |
if (inDataSize < sizeof (UInt32)) | |
return kAudioUnitErr_InvalidPropertyValue; | |
const bool newBypass = *((UInt32*) inData) != 0; | |
const bool currentlyBypassed = (bypassParam != nullptr ? (bypassParam->getValue() != 0.0f) : isBypassed); | |
if (newBypass != currentlyBypassed) | |
{ | |
if (bypassParam != nullptr) | |
bypassParam->setValueNotifyingHost (newBypass ? 1.0f : 0.0f); | |
else | |
isBypassed = newBypass; | |
if (! currentlyBypassed && IsInitialized()) // turning bypass off and we're initialized | |
Reset (0, 0); | |
} | |
return noErr; | |
} | |
case kAudioUnitProperty_OfflineRender: | |
{ | |
const auto shouldBeOffline = (*reinterpret_cast<const UInt32*> (inData) != 0); | |
if (juceFilter != nullptr) | |
{ | |
const auto isOffline = juceFilter->isNonRealtime(); | |
if (isOffline != shouldBeOffline) | |
{ | |
const ScopedLock sl (juceFilter->getCallbackLock()); | |
juceFilter->setNonRealtime (shouldBeOffline); | |
if (prepared) | |
juceFilter->prepareToPlay (getSampleRate(), (int) GetMaxFramesPerSlice()); | |
} | |
} | |
return noErr; | |
} | |
case kAudioUnitProperty_AUHostIdentifier: | |
{ | |
if (inDataSize < sizeof (AUHostVersionIdentifier)) | |
return kAudioUnitErr_InvalidPropertyValue; | |
const auto* identifier = static_cast<const AUHostVersionIdentifier*> (inData); | |
PluginHostType::hostIdReportedByWrapper = String::fromCFString (identifier->hostName); | |
return noErr; | |
} | |
default: break; | |
} | |
} | |
return MusicDeviceBase::SetProperty (inID, inScope, inElement, inData, inDataSize); | |
} | |
//============================================================================== | |
ComponentResult SaveState (CFPropertyListRef* outData) override | |
{ | |
ComponentResult err = MusicDeviceBase::SaveState (outData); | |
if (err != noErr) | |
return err; | |
jassert (CFGetTypeID (*outData) == CFDictionaryGetTypeID()); | |
CFMutableDictionaryRef dict = (CFMutableDictionaryRef) *outData; | |
if (juceFilter != nullptr) | |
{ | |
juce::MemoryBlock state; | |
#if JUCE_AU_WRAPPERS_SAVE_PROGRAM_STATES | |
juceFilter->getCurrentProgramStateInformation (state); | |
#else | |
juceFilter->getStateInformation (state); | |
#endif | |
if (state.getSize() > 0) | |
{ | |
CFUniquePtr<CFDataRef> ourState (CFDataCreate (kCFAllocatorDefault, (const UInt8*) state.getData(), (CFIndex) state.getSize())); | |
CFUniquePtr<CFStringRef> key (CFStringCreateWithCString (kCFAllocatorDefault, JUCE_STATE_DICTIONARY_KEY, kCFStringEncodingUTF8)); | |
CFDictionarySetValue (dict, key.get(), ourState.get()); | |
} | |
} | |
return noErr; | |
} | |
ComponentResult RestoreState (CFPropertyListRef inData) override | |
{ | |
const ScopedValueSetter<bool> scope { restoringState, true }; | |
{ | |
// Remove the data entry from the state to prevent the superclass loading the parameters | |
CFUniquePtr<CFMutableDictionaryRef> copyWithoutData (CFDictionaryCreateMutableCopy (nullptr, 0, (CFDictionaryRef) inData)); | |
CFDictionaryRemoveValue (copyWithoutData.get(), CFSTR (kAUPresetDataKey)); | |
ComponentResult err = MusicDeviceBase::RestoreState (copyWithoutData.get()); | |
if (err != noErr) | |
return err; | |
} | |
if (juceFilter != nullptr) | |
{ | |
CFDictionaryRef dict = (CFDictionaryRef) inData; | |
CFDataRef data = nullptr; | |
CFUniquePtr<CFStringRef> key (CFStringCreateWithCString (kCFAllocatorDefault, JUCE_STATE_DICTIONARY_KEY, kCFStringEncodingUTF8)); | |
bool valuePresent = CFDictionaryGetValueIfPresent (dict, key.get(), (const void**) &data); | |
if (valuePresent) | |
{ | |
if (data != nullptr) | |
{ | |
const int numBytes = (int) CFDataGetLength (data); | |
const juce::uint8* const rawBytes = CFDataGetBytePtr (data); | |
if (numBytes > 0) | |
{ | |
#if JUCE_AU_WRAPPERS_SAVE_PROGRAM_STATES | |
juceFilter->setCurrentProgramStateInformation (rawBytes, numBytes); | |
#else | |
juceFilter->setStateInformation (rawBytes, numBytes); | |
#endif | |
} | |
} | |
} | |
} | |
return noErr; | |
} | |
//============================================================================== | |
bool busIgnoresLayout (bool isInput, int busNr) const | |
{ | |
#ifdef JucePlugin_PreferredChannelConfigurations | |
ignoreUnused (isInput, busNr); | |
return true; | |
#else | |
if (const AudioProcessor::Bus* bus = juceFilter->getBus (isInput, busNr)) | |
{ | |
AudioChannelSet discreteRangeSet; | |
const int n = bus->getDefaultLayout().size(); | |
for (int i = 0; i < n; ++i) | |
discreteRangeSet.addChannel ((AudioChannelSet::ChannelType) (256 + i)); | |
// if the audioprocessor supports this it cannot | |
// really be interested in the bus layouts | |
return bus->isLayoutSupported (discreteRangeSet); | |
} | |
return true; | |
#endif | |
} | |
UInt32 GetAudioChannelLayout (AudioUnitScope scope, | |
AudioUnitElement element, | |
AudioChannelLayout* outLayoutPtr, | |
bool& outWritable) override | |
{ | |
outWritable = false; | |
const auto info = getElementInfo (scope, element); | |
if (info.error != noErr) | |
return 0; | |
if (busIgnoresLayout (info.isInput, info.busNr)) | |
return 0; | |
outWritable = true; | |
const size_t sizeInBytes = sizeof (AudioChannelLayout) - sizeof (AudioChannelDescription); | |
if (outLayoutPtr != nullptr) | |
{ | |
zeromem (outLayoutPtr, sizeInBytes); | |
outLayoutPtr->mChannelLayoutTag = getCurrentLayout (info.isInput, info.busNr); | |
} | |
return sizeInBytes; | |
} | |
std::vector<AudioChannelLayoutTag> GetChannelLayoutTags (AudioUnitScope inScope, AudioUnitElement inElement) override | |
{ | |
const auto info = getElementInfo (inScope, inElement); | |
if (info.error != noErr) | |
return {}; | |
if (busIgnoresLayout (info.isInput, info.busNr)) | |
return {}; | |
return getSupportedBusLayouts (info.isInput, info.busNr); | |
} | |
OSStatus SetAudioChannelLayout (AudioUnitScope scope, AudioUnitElement element, const AudioChannelLayout* inLayout) override | |
{ | |
const auto info = getElementInfo (scope, element); | |
if (info.error != noErr) | |
return info.error; | |
if (busIgnoresLayout (info.isInput, info.busNr)) | |
return kAudioUnitErr_PropertyNotWritable; | |
if (inLayout == nullptr) | |
return kAudioUnitErr_InvalidPropertyValue; | |
auto& ioElement = IOElement (info.isInput ? kAudioUnitScope_Input : kAudioUnitScope_Output, element); | |
const AudioChannelSet newChannelSet = CoreAudioLayouts::fromCoreAudio (*inLayout); | |
const int currentNumChannels = static_cast<int> (ioElement.NumberChannels()); | |
const int newChannelNum = newChannelSet.size(); | |
if (currentNumChannels != newChannelNum) | |
return kAudioUnitErr_InvalidPropertyValue; | |
// check if the new layout could be potentially set | |
#ifdef JucePlugin_PreferredChannelConfigurations | |
short configs[][2] = {JucePlugin_PreferredChannelConfigurations}; | |
if (! AudioUnitHelpers::isLayoutSupported (*juceFilter, info.isInput, info.busNr, newChannelNum, configs)) | |
return kAudioUnitErr_FormatNotSupported; | |
#else | |
if (! juceFilter->getBus (info.isInput, info.busNr)->isLayoutSupported (newChannelSet)) | |
return kAudioUnitErr_FormatNotSupported; | |
#endif | |
getCurrentLayout (info.isInput, info.busNr) = CoreAudioLayouts::toCoreAudio (newChannelSet); | |
return noErr; | |
} | |
//============================================================================== | |
// When parameters are discrete we need to use integer values. | |
float getMaximumParameterValue (AudioProcessorParameter* juceParam) | |
{ | |
#if JUCE_FORCE_LEGACY_PARAMETER_AUTOMATION_TYPE | |
ignoreUnused (juceParam); | |
return 1.0f; | |
#else | |
return juceParam->isDiscrete() ? (float) (juceParam->getNumSteps() - 1) : 1.0f; | |
#endif | |
} | |
ComponentResult GetParameterInfo (AudioUnitScope inScope, | |
AudioUnitParameterID inParameterID, | |
AudioUnitParameterInfo& outParameterInfo) override | |
{ | |
if (inScope == kAudioUnitScope_Global && juceFilter != nullptr) | |
{ | |
if (auto* param = getParameterForAUParameterID (inParameterID)) | |
{ | |
outParameterInfo.unit = kAudioUnitParameterUnit_Generic; | |
outParameterInfo.flags = (UInt32) (kAudioUnitParameterFlag_IsWritable | |
| kAudioUnitParameterFlag_IsReadable | |
| kAudioUnitParameterFlag_HasCFNameString | |
| kAudioUnitParameterFlag_ValuesHaveStrings); | |
#if ! JUCE_FORCE_LEGACY_PARAMETER_AUTOMATION_TYPE | |
outParameterInfo.flags |= (UInt32) kAudioUnitParameterFlag_IsHighResolution; | |
#endif | |
const String name = param->getName (1024); | |
// Set whether the param is automatable (unnamed parameters aren't allowed to be automated) | |
if (name.isEmpty() || ! param->isAutomatable()) | |
outParameterInfo.flags |= kAudioUnitParameterFlag_NonRealTime; | |
const bool isParameterDiscrete = param->isDiscrete(); | |
if (! isParameterDiscrete) | |
outParameterInfo.flags |= kAudioUnitParameterFlag_CanRamp; | |
if (param->isMetaParameter()) | |
outParameterInfo.flags |= kAudioUnitParameterFlag_IsGlobalMeta; | |
auto parameterGroupHierarchy = juceFilter->getParameterTree().getGroupsForParameter (param); | |
if (! parameterGroupHierarchy.isEmpty()) | |
{ | |
outParameterInfo.flags |= kAudioUnitParameterFlag_HasClump; | |
outParameterInfo.clumpID = (UInt32) parameterGroups.indexOf (parameterGroupHierarchy.getLast()) + 1; | |
} | |
// Is this a meter? | |
if ((((unsigned int) param->getCategory() & 0xffff0000) >> 16) == 2) | |
{ | |
outParameterInfo.flags &= ~kAudioUnitParameterFlag_IsWritable; | |
outParameterInfo.flags |= kAudioUnitParameterFlag_MeterReadOnly | kAudioUnitParameterFlag_DisplayLogarithmic; | |
outParameterInfo.unit = kAudioUnitParameterUnit_LinearGain; | |
} | |
else | |
{ | |
#if ! JUCE_FORCE_LEGACY_PARAMETER_AUTOMATION_TYPE | |
if (isParameterDiscrete) | |
outParameterInfo.unit = param->isBoolean() ? kAudioUnitParameterUnit_Boolean | |
: kAudioUnitParameterUnit_Indexed; | |
#endif | |
} | |
MusicDeviceBase::FillInParameterName (outParameterInfo, name.toCFString(), true); | |
outParameterInfo.minValue = 0.0f; | |
outParameterInfo.maxValue = getMaximumParameterValue (param); | |
outParameterInfo.defaultValue = param->getDefaultValue() * getMaximumParameterValue (param); | |
jassert (outParameterInfo.defaultValue >= outParameterInfo.minValue | |
&& outParameterInfo.defaultValue <= outParameterInfo.maxValue); | |
return noErr; | |
} | |
} | |
return kAudioUnitErr_InvalidParameter; | |
} | |
ComponentResult GetParameterValueStrings (AudioUnitScope inScope, | |
AudioUnitParameterID inParameterID, | |
CFArrayRef *outStrings) override | |
{ | |
if (outStrings == nullptr) | |
return noErr; | |
if (inScope == kAudioUnitScope_Global && juceFilter != nullptr) | |
{ | |
if (auto* param = getParameterForAUParameterID (inParameterID)) | |
{ | |
if (param->isDiscrete()) | |
{ | |
auto index = LegacyAudioParameter::getParamIndex (*juceFilter, param); | |
if (auto* valueStrings = parameterValueStringArrays[index]) | |
{ | |
*outStrings = CFArrayCreate (nullptr, | |
(const void **) valueStrings->getRawDataPointer(), | |
valueStrings->size(), | |
nullptr); | |
return noErr; | |
} | |
} | |
} | |
} | |
return kAudioUnitErr_InvalidParameter; | |
} | |
ComponentResult GetParameter (AudioUnitParameterID inID, | |
AudioUnitScope inScope, | |
AudioUnitElement inElement, | |
Float32& outValue) override | |
{ | |
if (inScope == kAudioUnitScope_Global && juceFilter != nullptr) | |
{ | |
if (auto* param = getParameterForAUParameterID (inID)) | |
{ | |
const auto normValue = param->getValue(); | |
outValue = normValue * getMaximumParameterValue (param); | |
return noErr; | |
} | |
} | |
return MusicDeviceBase::GetParameter (inID, inScope, inElement, outValue); | |
} | |
ComponentResult SetParameter (AudioUnitParameterID inID, | |
AudioUnitScope inScope, | |
AudioUnitElement inElement, | |
Float32 inValue, | |
UInt32 inBufferOffsetInFrames) override | |
{ | |
if (inScope == kAudioUnitScope_Global && juceFilter != nullptr) | |
{ | |
if (auto* param = getParameterForAUParameterID (inID)) | |
{ | |
auto value = inValue / getMaximumParameterValue (param); | |
if (value != param->getValue()) | |
{ | |
inParameterChangedCallback = true; | |
param->setValueNotifyingHost (value); | |
} | |
return noErr; | |
} | |
} | |
return MusicDeviceBase::SetParameter (inID, inScope, inElement, inValue, inBufferOffsetInFrames); | |
} | |
// No idea what this method actually does or what it should return. Current Apple docs say nothing about it. | |
// (Note that this isn't marked 'override' in case older versions of the SDK don't include it) | |
bool CanScheduleParameters() const override { return false; } | |
//============================================================================== | |
bool SupportsTail() override { return true; } | |
Float64 GetTailTime() override { return juceFilter->getTailLengthSeconds(); } | |
double getSampleRate() | |
{ | |
if (AudioUnitHelpers::getBusCountForWrapper (*juceFilter, false) > 0) | |
return Output (0).GetStreamFormat().mSampleRate; | |
return 44100.0; | |
} | |
Float64 GetLatency() override | |
{ | |
const double rate = getSampleRate(); | |
jassert (rate > 0); | |
#if JucePlugin_Enable_ARA | |
jassert (juceFilter->getLatencySamples() == 0 || ! dynamic_cast<AudioProcessorARAExtension*> (juceFilter.get())->isBoundToARA()); | |
#endif | |
return rate > 0 ? juceFilter->getLatencySamples() / rate : 0; | |
} | |
class ScopedPlayHead : private AudioPlayHead | |
{ | |
public: | |
explicit ScopedPlayHead (JuceAU& juceAudioUnit) | |
: audioUnit (juceAudioUnit) | |
{ | |
audioUnit.juceFilter->setPlayHead (this); | |
} | |
~ScopedPlayHead() override | |
{ | |
audioUnit.juceFilter->setPlayHead (nullptr); | |
} | |
private: | |
Optional<PositionInfo> getPosition() const override | |
{ | |
PositionInfo info; | |
info.setFrameRate ([this]() -> Optional<FrameRate> | |
{ | |
switch (audioUnit.lastTimeStamp.mSMPTETime.mType) | |
{ | |
case kSMPTETimeType2398: return FrameRate().withBaseRate (24).withPullDown(); | |
case kSMPTETimeType24: return FrameRate().withBaseRate (24); | |
case kSMPTETimeType25: return FrameRate().withBaseRate (25); | |
case kSMPTETimeType30Drop: return FrameRate().withBaseRate (30).withDrop(); | |
case kSMPTETimeType30: return FrameRate().withBaseRate (30); | |
case kSMPTETimeType2997: return FrameRate().withBaseRate (30).withPullDown(); | |
case kSMPTETimeType2997Drop: return FrameRate().withBaseRate (30).withPullDown().withDrop(); | |
case kSMPTETimeType60: return FrameRate().withBaseRate (60); | |
case kSMPTETimeType60Drop: return FrameRate().withBaseRate (60).withDrop(); | |
case kSMPTETimeType5994: return FrameRate().withBaseRate (60).withPullDown(); | |
case kSMPTETimeType5994Drop: return FrameRate().withBaseRate (60).withPullDown().withDrop(); | |
case kSMPTETimeType50: return FrameRate().withBaseRate (50); | |
default: break; | |
} | |
return {}; | |
}()); | |
double ppqPosition = 0.0; | |
double bpm = 0.0; | |
if (audioUnit.CallHostBeatAndTempo (&ppqPosition, &bpm) == noErr) | |
{ | |
info.setPpqPosition (ppqPosition); | |
info.setBpm (bpm); | |
} | |
UInt32 outDeltaSampleOffsetToNextBeat; | |
double outCurrentMeasureDownBeat; | |
float num; | |
UInt32 den; | |
if (audioUnit.CallHostMusicalTimeLocation (&outDeltaSampleOffsetToNextBeat, | |
&num, | |
&den, | |
&outCurrentMeasureDownBeat) == noErr) | |
{ | |
info.setTimeSignature (TimeSignature { (int) num, (int) den }); | |
info.setPpqPositionOfLastBarStart (outCurrentMeasureDownBeat); | |
} | |
double outCurrentSampleInTimeLine = 0, outCycleStartBeat = 0, outCycleEndBeat = 0; | |
Boolean playing = false, looping = false, playchanged; | |
if (audioUnit.CallHostTransportState (&playing, | |
&playchanged, | |
&outCurrentSampleInTimeLine, | |
&looping, | |
&outCycleStartBeat, | |
&outCycleEndBeat) == noErr) | |
{ | |
info.setIsPlaying (playing); | |
info.setTimeInSamples ((int64) (outCurrentSampleInTimeLine + 0.5)); | |
info.setTimeInSeconds (*info.getTimeInSamples() / audioUnit.getSampleRate()); | |
info.setIsLooping (looping); | |
info.setLoopPoints (LoopPoints { outCycleStartBeat, outCycleEndBeat }); | |
} | |
else | |
{ | |
// If the host doesn't support this callback, then use the sample time from lastTimeStamp: | |
outCurrentSampleInTimeLine = audioUnit.lastTimeStamp.mSampleTime; | |
} | |
info.setHostTimeNs ((audioUnit.lastTimeStamp.mFlags & kAudioTimeStampHostTimeValid) != 0 | |
? makeOptional (audioUnit.timeConversions.hostTimeToNanos (audioUnit.lastTimeStamp.mHostTime)) | |
: nullopt); | |
return info; | |
} | |
JuceAU& audioUnit; | |
}; | |
//============================================================================== | |
void sendAUEvent (const AudioUnitEventType type, const int juceParamIndex) | |
{ | |
if (restoringState) | |
return; | |
auEvent.mEventType = type; | |
auEvent.mArgument.mParameter.mParameterID = getAUParameterIDForIndex (juceParamIndex); | |
AUEventListenerNotify (nullptr, nullptr, &auEvent); | |
} | |
void audioProcessorParameterChanged (AudioProcessor*, int index, float /*newValue*/) override | |
{ | |
if (inParameterChangedCallback.get()) | |
{ | |
inParameterChangedCallback = false; | |
return; | |
} | |
sendAUEvent (kAudioUnitEvent_ParameterValueChange, index); | |
} | |
void audioProcessorParameterChangeGestureBegin (AudioProcessor*, int index) override | |
{ | |
sendAUEvent (kAudioUnitEvent_BeginParameterChangeGesture, index); | |
} | |
void audioProcessorParameterChangeGestureEnd (AudioProcessor*, int index) override | |
{ | |
sendAUEvent (kAudioUnitEvent_EndParameterChangeGesture, index); | |
} | |
void audioProcessorChanged (AudioProcessor*, const ChangeDetails& details) override | |
{ | |
audioProcessorChangedUpdater.update (details); | |
} | |
//============================================================================== | |
// this will only ever be called by the bypass parameter | |
void parameterValueChanged (int, float) override | |
{ | |
if (! restoringState) | |
PropertyChanged (kAudioUnitProperty_BypassEffect, kAudioUnitScope_Global, 0); | |
} | |
void parameterGestureChanged (int, bool) override {} | |
//============================================================================== | |
bool StreamFormatWritable (AudioUnitScope scope, AudioUnitElement element) override | |
{ | |
const auto info = getElementInfo (scope, element); | |
return ((! IsInitialized()) && (info.error == noErr)); | |
} | |
bool ValidFormat (AudioUnitScope inScope, | |
AudioUnitElement inElement, | |
const AudioStreamBasicDescription& inNewFormat) override | |
{ | |
// DSP Quattro incorrectly uses global scope for the ValidFormat call | |
if (inScope == kAudioUnitScope_Global) | |
return ValidFormat (kAudioUnitScope_Input, inElement, inNewFormat) | |
|| ValidFormat (kAudioUnitScope_Output, inElement, inNewFormat); | |
const auto info = getElementInfo (inScope, inElement); | |
if (info.error != noErr) | |
return false; | |
if (info.kind == BusKind::wrapperOnly) | |
return true; | |
const auto newNumChannels = static_cast<int> (inNewFormat.mChannelsPerFrame); | |
const auto oldNumChannels = juceFilter->getChannelCountOfBus (info.isInput, info.busNr); | |
if (newNumChannels == oldNumChannels) | |
return true; | |
if (AudioProcessor::Bus* bus = juceFilter->getBus (info.isInput, info.busNr)) | |
{ | |
if (! MusicDeviceBase::ValidFormat (inScope, inElement, inNewFormat)) | |
return false; | |
#ifdef JucePlugin_PreferredChannelConfigurations | |
short configs[][2] = {JucePlugin_PreferredChannelConfigurations}; | |
ignoreUnused (bus); | |
return AudioUnitHelpers::isLayoutSupported (*juceFilter, info.isInput, info.busNr, newNumChannels, configs); | |
#else | |
return bus->isNumberOfChannelsSupported (newNumChannels); | |
#endif | |
} | |
return false; | |
} | |
// AU requires us to override this for the sole reason that we need to find a default layout tag if the number of channels have changed | |
OSStatus ChangeStreamFormat (AudioUnitScope inScope, | |
AudioUnitElement inElement, | |
const AudioStreamBasicDescription& inPrevFormat, | |
const AudioStreamBasicDescription& inNewFormat) override | |
{ | |
const auto info = getElementInfo (inScope, inElement); | |
if (info.error != noErr) | |
return info.error; | |
AudioChannelLayoutTag& currentTag = getCurrentLayout (info.isInput, info.busNr); | |
const auto newNumChannels = static_cast<int> (inNewFormat.mChannelsPerFrame); | |
const auto oldNumChannels = juceFilter->getChannelCountOfBus (info.isInput, info.busNr); | |
#ifdef JucePlugin_PreferredChannelConfigurations | |
short configs[][2] = {JucePlugin_PreferredChannelConfigurations}; | |
if (! AudioUnitHelpers::isLayoutSupported (*juceFilter, info.isInput, info.busNr, newNumChannels, configs)) | |
return kAudioUnitErr_FormatNotSupported; | |
#endif | |
// predict channel layout | |
const auto set = [&] | |
{ | |
if (info.kind == BusKind::wrapperOnly) | |
return AudioChannelSet::discreteChannels (newNumChannels); | |
if (newNumChannels != oldNumChannels) | |
return juceFilter->getBus (info.isInput, info.busNr)->supportedLayoutWithChannels (newNumChannels); | |
return juceFilter->getChannelLayoutOfBus (info.isInput, info.busNr); | |
}(); | |
if (set == AudioChannelSet()) | |
return kAudioUnitErr_FormatNotSupported; | |
const auto err = MusicDeviceBase::ChangeStreamFormat (inScope, inElement, inPrevFormat, inNewFormat); | |
if (err == noErr) | |
currentTag = CoreAudioLayouts::toCoreAudio (set); | |
return err; | |
} | |
//============================================================================== | |
ComponentResult Render (AudioUnitRenderActionFlags& ioActionFlags, | |
const AudioTimeStamp& inTimeStamp, | |
const UInt32 nFrames) override | |
{ | |
lastTimeStamp = inTimeStamp; | |
// prepare buffers | |
{ | |
pullInputAudio (ioActionFlags, inTimeStamp, nFrames); | |
prepareOutputBuffers (nFrames); | |
audioBuffer.reset(); | |
} | |
ioActionFlags &= ~kAudioUnitRenderAction_OutputIsSilence; | |
const int numInputBuses = AudioUnitHelpers::getBusCount (*juceFilter, true); | |
const int numOutputBuses = AudioUnitHelpers::getBusCount (*juceFilter, false); | |
// set buffer pointers to minimize copying | |
{ | |
int chIdx = 0, numChannels = 0; | |
bool interleaved = false; | |
AudioBufferList* buffer = nullptr; | |
// use output pointers | |
for (int busIdx = 0; busIdx < numOutputBuses; ++busIdx) | |
{ | |
GetAudioBufferList (false, busIdx, buffer, interleaved, numChannels); | |
const int* outLayoutMap = mapper.get (false, busIdx); | |
for (int ch = 0; ch < numChannels; ++ch) | |
audioBuffer.setBuffer (chIdx++, interleaved ? nullptr : static_cast<float*> (buffer->mBuffers[outLayoutMap[ch]].mData)); | |
} | |
// use input pointers on remaining channels | |
for (int busIdx = 0; chIdx < totalInChannels;) | |
{ | |
int channelIndexInBus = juceFilter->getOffsetInBusBufferForAbsoluteChannelIndex (true, chIdx, busIdx); | |
const bool badData = ! pulledSucceeded[busIdx]; | |
if (! badData) | |
GetAudioBufferList (true, busIdx, buffer, interleaved, numChannels); | |
const int* inLayoutMap = mapper.get (true, busIdx); | |
const int n = juceFilter->getChannelCountOfBus (true, busIdx); | |
for (int ch = channelIndexInBus; ch < n; ++ch) | |
audioBuffer.setBuffer (chIdx++, interleaved || badData ? nullptr : static_cast<float*> (buffer->mBuffers[inLayoutMap[ch]].mData)); | |
} | |
} | |
// copy input | |
{ | |
for (int busIdx = 0; busIdx < numInputBuses; ++busIdx) | |
{ | |
if (pulledSucceeded[busIdx]) | |
audioBuffer.set (busIdx, Input ((UInt32) busIdx).GetBufferList(), mapper.get (true, busIdx)); | |
else | |
audioBuffer.clearInputBus (busIdx, (int) nFrames); | |
} | |
audioBuffer.clearUnusedChannels ((int) nFrames); | |
} | |
// swap midi buffers | |
{ | |
const ScopedLock sl (incomingMidiLock); | |
midiEvents.clear(); | |
incomingEvents.swapWith (midiEvents); | |
} | |
// process audio | |
processBlock (audioBuffer.getBuffer (nFrames), midiEvents); | |
// copy back | |
{ | |
for (int busIdx = 0; busIdx < numOutputBuses; ++busIdx) | |
audioBuffer.get (busIdx, Output ((UInt32) busIdx).GetBufferList(), mapper.get (false, busIdx)); | |
} | |
// process midi output | |
#if JucePlugin_ProducesMidiOutput || JucePlugin_IsMidiEffect | |
if (! midiEvents.isEmpty() && midiCallback.midiOutputCallback != nullptr) | |
pushMidiOutput (nFrames); | |
#endif | |
midiEvents.clear(); | |
return noErr; | |
} | |
//============================================================================== | |
ComponentResult StartNote (MusicDeviceInstrumentID, MusicDeviceGroupID, NoteInstanceID*, UInt32, const MusicDeviceNoteParams&) override { return noErr; } | |
ComponentResult StopNote (MusicDeviceGroupID, NoteInstanceID, UInt32) override { return noErr; } | |
//============================================================================== | |
OSStatus HandleMIDIEvent (UInt8 inStatus, UInt8 inChannel, UInt8 inData1, UInt8 inData2, UInt32 inStartFrame) override | |
{ | |
#if JucePlugin_WantsMidiInput || JucePlugin_IsMidiEffect | |
const juce::uint8 data[] = { (juce::uint8) (inStatus | inChannel), | |
(juce::uint8) inData1, | |
(juce::uint8) inData2 }; | |
const ScopedLock sl (incomingMidiLock); | |
incomingEvents.addEvent (data, 3, (int) inStartFrame); | |
return noErr; | |
#else | |
ignoreUnused (inStatus, inChannel, inData1); | |
ignoreUnused (inData2, inStartFrame); | |
return kAudioUnitErr_PropertyNotInUse; | |
#endif | |
} | |
OSStatus HandleSysEx (const UInt8* inData, UInt32 inLength) override | |
{ | |
#if JucePlugin_WantsMidiInput || JucePlugin_IsMidiEffect | |
const ScopedLock sl (incomingMidiLock); | |
incomingEvents.addEvent (inData, (int) inLength, 0); | |
return noErr; | |
#else | |
ignoreUnused (inData, inLength); | |
return kAudioUnitErr_PropertyNotInUse; | |
#endif | |
} | |
//============================================================================== | |
ComponentResult GetPresets (CFArrayRef* outData) const override | |
{ | |
if (outData != nullptr) | |
{ | |
const int numPrograms = juceFilter->getNumPrograms(); | |
clearPresetsArray(); | |
presetsArray.insertMultiple (0, AUPreset(), numPrograms); | |
CFMutableArrayRef presetsArrayRef = CFArrayCreateMutable (nullptr, numPrograms, nullptr); | |
for (int i = 0; i < numPrograms; ++i) | |
{ | |
String name (juceFilter->getProgramName(i)); | |
if (name.isEmpty()) | |
name = "Untitled"; | |
AUPreset& p = presetsArray.getReference(i); | |
p.presetNumber = i; | |
p.presetName = name.toCFString(); | |
CFArrayAppendValue (presetsArrayRef, &p); | |
} | |
*outData = (CFArrayRef) presetsArrayRef; | |
} | |
return noErr; | |
} | |
OSStatus NewFactoryPresetSet (const AUPreset& inNewFactoryPreset) override | |
{ | |
const int numPrograms = juceFilter->getNumPrograms(); | |
const SInt32 chosenPresetNumber = (int) inNewFactoryPreset.presetNumber; | |
if (chosenPresetNumber >= numPrograms) | |
return kAudioUnitErr_InvalidProperty; | |
AUPreset chosenPreset; | |
chosenPreset.presetNumber = chosenPresetNumber; | |
chosenPreset.presetName = juceFilter->getProgramName (chosenPresetNumber).toCFString(); | |
juceFilter->setCurrentProgram (chosenPresetNumber); | |
SetAFactoryPresetAsCurrent (chosenPreset); | |
return noErr; | |
} | |
//============================================================================== | |
class EditorCompHolder : public Component | |
{ | |
public: | |
EditorCompHolder (AudioProcessorEditor* const editor) | |
{ | |
addAndMakeVisible (editor); | |
#if ! JucePlugin_EditorRequiresKeyboardFocus | |
setWantsKeyboardFocus (false); | |
#else | |
setWantsKeyboardFocus (true); | |
#endif | |
setBounds (getSizeToContainChild()); | |
lastBounds = getBounds(); | |
} | |
~EditorCompHolder() override | |
{ | |
deleteAllChildren(); // note that we can't use a std::unique_ptr because the editor may | |
// have been transferred to another parent which takes over ownership. | |
} | |
Rectangle<int> getSizeToContainChild() | |
{ | |
if (auto* editor = getChildComponent (0)) | |
return getLocalArea (editor, editor->getLocalBounds()); | |
return {}; | |
} | |
static NSView* createViewFor (AudioProcessor* filter, JuceAU* au, AudioProcessorEditor* const editor) | |
{ | |
auto* editorCompHolder = new EditorCompHolder (editor); | |
auto r = convertToHostBounds (makeNSRect (editorCompHolder->getSizeToContainChild())); | |
static JuceUIViewClass cls; | |
auto* view = [[cls.createInstance() initWithFrame: r] autorelease]; | |
JuceUIViewClass::setFilter (view, filter); | |
JuceUIViewClass::setAU (view, au); | |
JuceUIViewClass::setEditor (view, editorCompHolder); | |
[view setHidden: NO]; | |
[view setPostsFrameChangedNotifications: YES]; | |
[[NSNotificationCenter defaultCenter] addObserver: view | |
selector: @selector (applicationWillTerminate:) | |
name: NSApplicationWillTerminateNotification | |
object: nil]; | |
activeUIs.add (view); | |
editorCompHolder->addToDesktop (0, (void*) view); | |
editorCompHolder->setVisible (view); | |
return view; | |
} | |
void parentSizeChanged() override | |
{ | |
resizeHostWindow(); | |
if (auto* editor = getChildComponent (0)) | |
editor->repaint(); | |
} | |
void childBoundsChanged (Component*) override | |
{ | |
auto b = getSizeToContainChild(); | |
if (lastBounds != b) | |
{ | |
lastBounds = b; | |
setSize (jmax (32, b.getWidth()), jmax (32, b.getHeight())); | |
resizeHostWindow(); | |
} | |
} | |
bool keyPressed (const KeyPress&) override | |
{ | |
if (getHostType().isAbletonLive()) | |
{ | |
static NSTimeInterval lastEventTime = 0; // check we're not recursively sending the same event | |
NSTimeInterval eventTime = [[NSApp currentEvent] timestamp]; | |
if (lastEventTime != eventTime) | |
{ | |
lastEventTime = eventTime; | |
NSView* view = (NSView*) getWindowHandle(); | |
NSView* hostView = [view superview]; | |
NSWindow* hostWindow = [hostView window]; | |
[hostWindow makeFirstResponder: hostView]; | |
[hostView keyDown: (NSEvent*) [NSApp currentEvent]]; | |
[hostWindow makeFirstResponder: view]; | |
} | |
} | |
return false; | |
} | |
void resizeHostWindow() | |
{ | |
[CATransaction begin]; | |
[CATransaction setValue:(id) kCFBooleanTrue forKey:kCATransactionDisableActions]; | |
auto rect = convertToHostBounds (makeNSRect (lastBounds)); | |
auto* view = (NSView*) getWindowHandle(); | |
auto superRect = [[view superview] frame]; | |
superRect.size.width = rect.size.width; | |
superRect.size.height = rect.size.height; | |
[[view superview] setFrame: superRect]; | |
[view setFrame: rect]; | |
[CATransaction commit]; | |
[view setNeedsDisplay: YES]; | |
} | |
private: | |
Rectangle<int> lastBounds; | |
JUCE_DECLARE_NON_COPYABLE (EditorCompHolder) | |
}; | |
void deleteActiveEditors() | |
{ | |
for (int i = activeUIs.size(); --i >= 0;) | |
{ | |
id ui = (id) activeUIs.getUnchecked(i); | |
if (JuceUIViewClass::getAU (ui) == this) | |
JuceUIViewClass::deleteEditor (ui); | |
} | |
} | |
//============================================================================== | |
struct JuceUIViewClass : public ObjCClass<NSView> | |
{ | |
JuceUIViewClass() : ObjCClass<NSView> ("JUCEAUView_") | |
{ | |
addIvar<AudioProcessor*> ("filter"); | |
addIvar<JuceAU*> ("au"); | |
addIvar<EditorCompHolder*> ("editor"); | |
addMethod (@selector (dealloc), dealloc); | |
addMethod (@selector (applicationWillTerminate:), applicationWillTerminate); | |
addMethod (@selector (viewDidMoveToWindow), viewDidMoveToWindow); | |
addMethod (@selector (mouseDownCanMoveWindow), mouseDownCanMoveWindow); | |
registerClass(); | |
} | |
static void deleteEditor (id self) | |
{ | |
std::unique_ptr<EditorCompHolder> editorComp (getEditor (self)); | |
if (editorComp != nullptr) | |
{ | |
if (editorComp->getChildComponent(0) != nullptr | |
&& activePlugins.contains (getAU (self))) // plugin may have been deleted before the UI | |
{ | |
AudioProcessor* const filter = getIvar<AudioProcessor*> (self, "filter"); | |
filter->editorBeingDeleted ((AudioProcessorEditor*) editorComp->getChildComponent(0)); | |
} | |
editorComp = nullptr; | |
setEditor (self, nullptr); | |
} | |
} | |
static JuceAU* getAU (id self) { return getIvar<JuceAU*> (self, "au"); } | |
static EditorCompHolder* getEditor (id self) { return getIvar<EditorCompHolder*> (self, "editor"); } | |
static void setFilter (id self, AudioProcessor* filter) { object_setInstanceVariable (self, "filter", filter); } | |
static void setAU (id self, JuceAU* au) { object_setInstanceVariable (self, "au", au); } | |
static void setEditor (id self, EditorCompHolder* e) { object_setInstanceVariable (self, "editor", e); } | |
private: | |
static void dealloc (id self, SEL) | |
{ | |
if (activeUIs.contains (self)) | |
shutdown (self); | |
sendSuperclassMessage<void> (self, @selector (dealloc)); | |
} | |
static void applicationWillTerminate (id self, SEL, NSNotification*) | |
{ | |
shutdown (self); | |
} | |
static void shutdown (id self) | |
{ | |
[[NSNotificationCenter defaultCenter] removeObserver: self]; | |
deleteEditor (self); | |
jassert (activeUIs.contains (self)); | |
activeUIs.removeFirstMatchingValue (self); | |
if (activePlugins.size() + activeUIs.size() == 0) | |
{ | |
// there's some kind of component currently modal, but the host | |
// is trying to delete our plugin.. | |
jassert (Component::getCurrentlyModalComponent() == nullptr); | |
shutdownJuce_GUI(); | |
} | |
} | |
static void viewDidMoveToWindow (id self, SEL) | |
{ | |
if (NSWindow* w = [(NSView*) self window]) | |
{ | |
[w setAcceptsMouseMovedEvents: YES]; | |
if (EditorCompHolder* const editorComp = getEditor (self)) | |
[w makeFirstResponder: (NSView*) editorComp->getWindowHandle()]; | |
} | |
} | |
static BOOL mouseDownCanMoveWindow (id, SEL) | |
{ | |
return NO; | |
} | |
}; | |
//============================================================================== | |
struct JuceUICreationClass : public ObjCClass<NSObject> | |
{ | |
JuceUICreationClass() : ObjCClass<NSObject> ("JUCE_AUCocoaViewClass_") | |
{ | |
addMethod (@selector (interfaceVersion), interfaceVersion); | |
addMethod (@selector (description), description); | |
addMethod (@selector (uiViewForAudioUnit:withSize:), uiViewForAudioUnit); | |
addProtocol (@protocol (AUCocoaUIBase)); | |
registerClass(); | |
} | |
private: | |
static unsigned int interfaceVersion (id, SEL) { return 0; } | |
static NSString* description (id, SEL) | |
{ | |
return [NSString stringWithString: nsStringLiteral (JucePlugin_Name)]; | |
} | |
static NSView* uiViewForAudioUnit (id, SEL, AudioUnit inAudioUnit, NSSize) | |
{ | |
void* pointers[2]; | |
UInt32 propertySize = sizeof (pointers); | |
if (AudioUnitGetProperty (inAudioUnit, juceFilterObjectPropertyID, | |
kAudioUnitScope_Global, 0, pointers, &propertySize) == noErr) | |
{ | |
if (AudioProcessor* filter = static_cast<AudioProcessor*> (pointers[0])) | |
if (AudioProcessorEditor* editorComp = filter->createEditorIfNeeded()) | |
{ | |
#if JucePlugin_Enable_ARA | |
jassert (dynamic_cast<AudioProcessorEditorARAExtension*> (editorComp) != nullptr); | |
// for proper view embedding, ARA plug-ins must be resizable | |
jassert (editorComp->isResizable()); | |
#endif | |
return EditorCompHolder::createViewFor (filter, static_cast<JuceAU*> (pointers[1]), editorComp); | |
} | |
} | |
return nil; | |
} | |
}; | |
private: | |
//============================================================================== | |
/* The call to AUBase::PropertyChanged may allocate hence the need for this class */ | |
class AudioProcessorChangedUpdater final : private AsyncUpdater | |
{ | |
public: | |
explicit AudioProcessorChangedUpdater (JuceAU& o) : owner (o) {} | |
~AudioProcessorChangedUpdater() override { cancelPendingUpdate(); } | |
void update (const ChangeDetails& details) | |
{ | |
int flags = 0; | |
if (details.latencyChanged) | |
flags |= latencyChangedFlag; | |
if (details.parameterInfoChanged) | |
flags |= parameterInfoChangedFlag; | |
if (details.programChanged) | |
flags |= programChangedFlag; | |
if (flags != 0) | |
{ | |
callbackFlags.fetch_or (flags); | |
if (MessageManager::getInstance()->isThisTheMessageThread()) | |
handleAsyncUpdate(); | |
else | |
triggerAsyncUpdate(); | |
} | |
} | |
private: | |
void handleAsyncUpdate() override | |
{ | |
const auto flags = callbackFlags.exchange (0); | |
if ((flags & latencyChangedFlag) != 0) | |
owner.PropertyChanged (kAudioUnitProperty_Latency, kAudioUnitScope_Global, 0); | |
if ((flags & parameterInfoChangedFlag) != 0) | |
{ | |
owner.PropertyChanged (kAudioUnitProperty_ParameterList, kAudioUnitScope_Global, 0); | |
owner.PropertyChanged (kAudioUnitProperty_ParameterInfo, kAudioUnitScope_Global, 0); | |
} | |
owner.PropertyChanged (kAudioUnitProperty_ClassInfo, kAudioUnitScope_Global, 0); | |
if ((flags & programChangedFlag) != 0) | |
{ | |
owner.refreshCurrentPreset(); | |
owner.PropertyChanged (kAudioUnitProperty_PresentPreset, kAudioUnitScope_Global, 0); | |
} | |
} | |
JuceAU& owner; | |
static constexpr int latencyChangedFlag = 1 << 0, | |
parameterInfoChangedFlag = 1 << 1, | |
programChangedFlag = 1 << 2; | |
std::atomic<int> callbackFlags { 0 }; | |
}; | |
//============================================================================== | |
AudioUnitHelpers::CoreAudioBufferList audioBuffer; | |
MidiBuffer midiEvents, incomingEvents; | |
bool prepared = false, isBypassed = false, restoringState = false; | |
//============================================================================== | |
#if JUCE_FORCE_USE_LEGACY_PARAM_IDS | |
static constexpr bool forceUseLegacyParamIDs = true; | |
#else | |
static constexpr bool forceUseLegacyParamIDs = false; | |
#endif | |
//============================================================================== | |
LegacyAudioParametersWrapper juceParameters; | |
std::unordered_map<int32, AudioProcessorParameter*> paramMap; | |
Array<AudioUnitParameterID> auParamIDs; | |
Array<const AudioProcessorParameterGroup*> parameterGroups; | |
// Stores the parameter IDs in the order that they will be reported to the host. | |
std::vector<AudioUnitParameterID> cachedParameterList; | |
//============================================================================== | |
// According to the docs, this is the maximum size of a MIDIPacketList. | |
static constexpr UInt32 packetListBytes = 65536; | |
CoreAudioTimeConversions timeConversions; | |
AudioUnitEvent auEvent; | |
mutable Array<AUPreset> presetsArray; | |
CriticalSection incomingMidiLock; | |
AUMIDIOutputCallbackStruct midiCallback; | |
AudioTimeStamp lastTimeStamp; | |
int totalInChannels, totalOutChannels; | |
HeapBlock<bool> pulledSucceeded; | |
HeapBlock<MIDIPacketList> packetList { packetListBytes, 1 }; | |
ThreadLocalValue<bool> inParameterChangedCallback; | |
AudioProcessorChangedUpdater audioProcessorChangedUpdater { *this }; | |
//============================================================================== | |
Array<AUChannelInfo> channelInfo; | |
Array<std::vector<AudioChannelLayoutTag>> supportedInputLayouts, supportedOutputLayouts; | |
Array<AudioChannelLayoutTag> currentInputLayout, currentOutputLayout; | |
//============================================================================== | |
AudioUnitHelpers::ChannelRemapper mapper; | |
//============================================================================== | |
OwnedArray<OwnedArray<const __CFString>> parameterValueStringArrays; | |
//============================================================================== | |
AudioProcessorParameter* bypassParam = nullptr; | |
//============================================================================== | |
static NSRect convertToHostBounds (NSRect pluginRect) | |
{ | |
auto desktopScale = Desktop::getInstance().getGlobalScaleFactor(); | |
if (approximatelyEqual (desktopScale, 1.0f)) | |
return pluginRect; | |
return NSMakeRect (static_cast<CGFloat> (pluginRect.origin.x * desktopScale), | |
static_cast<CGFloat> (pluginRect.origin.y * desktopScale), | |
static_cast<CGFloat> (pluginRect.size.width * desktopScale), | |
static_cast<CGFloat> (pluginRect.size.height * desktopScale)); | |
} | |
static NSRect convertFromHostBounds (NSRect hostRect) | |
{ | |
auto desktopScale = Desktop::getInstance().getGlobalScaleFactor(); | |
if (approximatelyEqual (desktopScale, 1.0f)) | |
return hostRect; | |
return NSMakeRect (static_cast<CGFloat> (hostRect.origin.x / desktopScale), | |
static_cast<CGFloat> (hostRect.origin.y / desktopScale), | |
static_cast<CGFloat> (hostRect.size.width / desktopScale), | |
static_cast<CGFloat> (hostRect.size.height / desktopScale)); | |
} | |
//============================================================================== | |
void pullInputAudio (AudioUnitRenderActionFlags& flags, const AudioTimeStamp& timestamp, const UInt32 nFrames) noexcept | |
{ | |
const unsigned int numInputBuses = GetScope (kAudioUnitScope_Input).GetNumberOfElements(); | |
for (unsigned int i = 0; i < numInputBuses; ++i) | |
{ | |
auto& input = Input (i); | |
const bool succeeded = (input.PullInput (flags, timestamp, i, nFrames) == noErr); | |
if ((flags & kAudioUnitRenderAction_OutputIsSilence) != 0 && succeeded) | |
AudioUnitHelpers::clearAudioBuffer (input.GetBufferList()); | |
pulledSucceeded[i] = succeeded; | |
} | |
} | |
void prepareOutputBuffers (const UInt32 nFrames) noexcept | |
{ | |
const auto numProcessorBuses = AudioUnitHelpers::getBusCount (*juceFilter, false); | |
const auto numWrapperBuses = GetScope (kAudioUnitScope_Output).GetNumberOfElements(); | |
for (UInt32 busIdx = 0; busIdx < numWrapperBuses; ++busIdx) | |
{ | |
auto& output = Output (busIdx); | |
if (output.WillAllocateBuffer()) | |
output.PrepareBuffer (nFrames); | |
if (busIdx >= (UInt32) numProcessorBuses) | |
AudioUnitHelpers::clearAudioBuffer (output.GetBufferList()); | |
} | |
} | |
void processBlock (juce::AudioBuffer<float>& buffer, MidiBuffer& midiBuffer) noexcept | |
{ | |
const ScopedLock sl (juceFilter->getCallbackLock()); | |
const ScopedPlayHead playhead { *this }; | |
if (juceFilter->isSuspended()) | |
{ | |
buffer.clear(); | |
} | |
else if (bypassParam == nullptr && isBypassed) | |
{ | |
juceFilter->processBlockBypassed (buffer, midiBuffer); | |
} | |
else | |
{ | |
juceFilter->processBlock (buffer, midiBuffer); | |
} | |
} | |
void pushMidiOutput (UInt32 nFrames) noexcept | |
{ | |
MIDIPacket* end = nullptr; | |
const auto init = [&] | |
{ | |
end = MIDIPacketListInit (packetList); | |
}; | |
const auto send = [&] | |
{ | |
midiCallback.midiOutputCallback (midiCallback.userData, &lastTimeStamp, 0, packetList); | |
}; | |
const auto add = [&] (const MidiMessageMetadata& metadata) | |
{ | |
end = MIDIPacketListAdd (packetList, | |
packetListBytes, | |
end, | |
static_cast<MIDITimeStamp> (metadata.samplePosition), | |
static_cast<ByteCount> (metadata.numBytes), | |
metadata.data); | |
}; | |
init(); | |
for (const auto metadata : midiEvents) | |
{ | |
jassert (isPositiveAndBelow (metadata.samplePosition, nFrames)); | |
ignoreUnused (nFrames); | |
add (metadata); | |
if (end == nullptr) | |
{ | |
send(); | |
init(); | |
add (metadata); | |
if (end == nullptr) | |
{ | |
// If this is hit, the size of this midi packet exceeds the maximum size of | |
// a MIDIPacketList. Large SysEx messages should be broken up into smaller | |
// chunks. | |
jassertfalse; | |
init(); | |
} | |
} | |
} | |
send(); | |
} | |
void GetAudioBufferList (bool isInput, int busIdx, AudioBufferList*& bufferList, bool& interleaved, int& numChannels) | |
{ | |
auto* element = Element (isInput ? kAudioUnitScope_Input : kAudioUnitScope_Output, static_cast<UInt32> (busIdx)).AsIOElement(); | |
jassert (element != nullptr); | |
bufferList = &element->GetBufferList(); | |
jassert (bufferList->mNumberBuffers > 0); | |
interleaved = AudioUnitHelpers::isAudioBufferInterleaved (*bufferList); | |
numChannels = static_cast<int> (interleaved ? bufferList->mBuffers[0].mNumberChannels : bufferList->mNumberBuffers); | |
} | |
//============================================================================== | |
static OSStatus scopeToDirection (AudioUnitScope scope, bool& isInput) noexcept | |
{ | |
isInput = (scope == kAudioUnitScope_Input); | |
return (scope != kAudioUnitScope_Input | |
&& scope != kAudioUnitScope_Output) | |
? (OSStatus) kAudioUnitErr_InvalidScope : (OSStatus) noErr; | |
} | |
enum class BusKind | |
{ | |
processor, | |
wrapperOnly, | |
}; | |
struct ElementInfo | |
{ | |
int busNr; | |
BusKind kind; | |
bool isInput; | |
OSStatus error; | |
}; | |
ElementInfo getElementInfo (AudioUnitScope scope, AudioUnitElement element) noexcept | |
{ | |
bool isInput = false; | |
OSStatus err; | |
if ((err = scopeToDirection (scope, isInput)) != noErr) | |
return { {}, {}, {}, err }; | |
const auto busIdx = static_cast<int> (element); | |
if (isPositiveAndBelow (busIdx, AudioUnitHelpers::getBusCount (*juceFilter, isInput))) | |
return { busIdx, BusKind::processor, isInput, noErr }; | |
if (isPositiveAndBelow (busIdx, AudioUnitHelpers::getBusCountForWrapper (*juceFilter, isInput))) | |
return { busIdx, BusKind::wrapperOnly, isInput, noErr }; | |
return { {}, {}, {}, kAudioUnitErr_InvalidElement }; | |
} | |
OSStatus GetParameterList (AudioUnitScope inScope, AudioUnitParameterID* outParameterList, UInt32& outNumParameters) override | |
{ | |
if (forceUseLegacyParamIDs || inScope != kAudioUnitScope_Global) | |
return MusicDeviceBase::GetParameterList (inScope, outParameterList, outNumParameters); | |
outNumParameters = (UInt32) juceParameters.size(); | |
if (outParameterList == nullptr) | |
return noErr; | |
if (cachedParameterList.empty()) | |
{ | |
struct ParamInfo | |
{ | |
AudioUnitParameterID identifier; | |
int versionHint; | |
}; | |
std::vector<ParamInfo> vec; | |
vec.reserve (juceParameters.size()); | |
for (const auto* param : juceParameters) | |
vec.push_back ({ generateAUParameterID (*param), param->getVersionHint() }); | |
std::sort (vec.begin(), vec.end(), [] (auto a, auto b) { return a.identifier < b.identifier; }); | |
std::stable_sort (vec.begin(), vec.end(), [] (auto a, auto b) { return a.versionHint < b.versionHint; }); | |
std::transform (vec.begin(), vec.end(), std::back_inserter (cachedParameterList), [] (auto x) { return x.identifier; }); | |
} | |
std::copy (cachedParameterList.begin(), cachedParameterList.end(), outParameterList); | |
return noErr; | |
} | |
//============================================================================== | |
void addParameters() | |
{ | |
parameterGroups = juceFilter->getParameterTree().getSubgroups (true); | |
juceParameters.update (*juceFilter, forceUseLegacyParamIDs); | |
const int numParams = juceParameters.getNumParameters(); | |
if (forceUseLegacyParamIDs) | |
{ | |
Globals()->UseIndexedParameters (static_cast<UInt32> (numParams)); | |
} | |
else | |
{ | |
for (auto* param : juceParameters) | |
{ | |
const AudioUnitParameterID auParamID = generateAUParameterID (*param); | |
// Consider yourself very unlucky if you hit this assertion. The hash codes of your | |
// parameter ids are not unique. | |
jassert (paramMap.find (static_cast<int32> (auParamID)) == paramMap.end()); | |
auParamIDs.add (auParamID); | |
paramMap.emplace (static_cast<int32> (auParamID), param); | |
Globals()->SetParameter (auParamID, param->getValue()); | |
} | |
} | |
#if JUCE_DEBUG | |
// Some hosts can't handle the huge numbers of discrete parameter values created when | |
// using the default number of steps. | |
for (auto* param : juceParameters) | |
if (param->isDiscrete()) | |
jassert (param->getNumSteps() != AudioProcessor::getDefaultNumParameterSteps()); | |
#endif | |
parameterValueStringArrays.ensureStorageAllocated (numParams); | |
for (auto* param : juceParameters) | |
{ | |
OwnedArray<const __CFString>* stringValues = nullptr; | |
auto initialValue = param->getValue(); | |
bool paramIsLegacy = dynamic_cast<LegacyAudioParameter*> (param) != nullptr; | |
if (param->isDiscrete() && (! forceUseLegacyParamIDs)) | |
{ | |
const auto numSteps = param->getNumSteps(); | |
stringValues = new OwnedArray<const __CFString>(); | |
stringValues->ensureStorageAllocated (numSteps); | |
const auto maxValue = getMaximumParameterValue (param); | |
auto getTextValue = [param, paramIsLegacy] (float value) | |
{ | |
if (paramIsLegacy) | |
{ | |
param->setValue (value); | |
return param->getCurrentValueAsText(); | |
} | |
return param->getText (value, 256); | |
}; | |
for (int i = 0; i < numSteps; ++i) | |
{ | |
auto value = (float) i / maxValue; | |
stringValues->add (CFStringCreateCopy (nullptr, (getTextValue (value).toCFString()))); | |
} | |
} | |
if (paramIsLegacy) | |
param->setValue (initialValue); | |
parameterValueStringArrays.add (stringValues); | |
} | |
if ((bypassParam = juceFilter->getBypassParameter()) != nullptr) | |
bypassParam->addListener (this); | |
} | |
//============================================================================== | |
static AudioUnitParameterID generateAUParameterID (const AudioProcessorParameter& param) | |
{ | |
const String& juceParamID = LegacyAudioParameter::getParamID (¶m, forceUseLegacyParamIDs); | |
AudioUnitParameterID paramHash = static_cast<AudioUnitParameterID> (juceParamID.hashCode()); | |
#if JUCE_USE_STUDIO_ONE_COMPATIBLE_PARAMETERS | |
// studio one doesn't like negative parameters | |
paramHash &= ~(((AudioUnitParameterID) 1) << (sizeof (AudioUnitParameterID) * 8 - 1)); | |
#endif | |
return forceUseLegacyParamIDs ? static_cast<AudioUnitParameterID> (juceParamID.getIntValue()) | |
: paramHash; | |
} | |
inline AudioUnitParameterID getAUParameterIDForIndex (int paramIndex) const noexcept | |
{ | |
return forceUseLegacyParamIDs ? static_cast<AudioUnitParameterID> (paramIndex) | |
: auParamIDs.getReference (paramIndex); | |
} | |
AudioProcessorParameter* getParameterForAUParameterID (AudioUnitParameterID address) const noexcept | |
{ | |
const auto index = static_cast<int32> (address); | |
if (forceUseLegacyParamIDs) | |
return juceParameters.getParamForIndex (index); | |
const auto iter = paramMap.find (index); | |
return iter != paramMap.end() ? iter->second : nullptr; | |
} | |
//============================================================================== | |
OSStatus syncAudioUnitWithProcessor() | |
{ | |
OSStatus err = noErr; | |
const auto numWrapperInputs = AudioUnitHelpers::getBusCountForWrapper (*juceFilter, true); | |
const auto numWrapperOutputs = AudioUnitHelpers::getBusCountForWrapper (*juceFilter, false); | |
if ((err = MusicDeviceBase::SetBusCount (kAudioUnitScope_Input, static_cast<UInt32> (numWrapperInputs))) != noErr) | |
return err; | |
if ((err = MusicDeviceBase::SetBusCount (kAudioUnitScope_Output, static_cast<UInt32> (numWrapperOutputs))) != noErr) | |
return err; | |
addSupportedLayoutTags(); | |
const auto numProcessorInputs = AudioUnitHelpers::getBusCount (*juceFilter, true); | |
const auto numProcessorOutputs = AudioUnitHelpers::getBusCount (*juceFilter, false); | |
for (int i = 0; i < numProcessorInputs; ++i) | |
if ((err = syncAudioUnitWithChannelSet (true, i, juceFilter->getChannelLayoutOfBus (true, i))) != noErr) | |
return err; | |
for (int i = 0; i < numProcessorOutputs; ++i) | |
if ((err = syncAudioUnitWithChannelSet (false, i, juceFilter->getChannelLayoutOfBus (false, i))) != noErr) | |
return err; | |
return noErr; | |
} | |
OSStatus syncProcessorWithAudioUnit() | |
{ | |
const int numInputBuses = AudioUnitHelpers::getBusCount (*juceFilter, true); | |
const int numOutputBuses = AudioUnitHelpers::getBusCount (*juceFilter, false); | |
const int numInputElements = static_cast<int> (GetScope (kAudioUnitScope_Input). GetNumberOfElements()); | |
const int numOutputElements = static_cast<int> (GetScope (kAudioUnitScope_Output).GetNumberOfElements()); | |
AudioProcessor::BusesLayout requestedLayouts; | |
for (int dir = 0; dir < 2; ++dir) | |
{ | |
const bool isInput = (dir == 0); | |
const int n = (isInput ? numInputBuses : numOutputBuses); | |
const int numAUElements = (isInput ? numInputElements : numOutputElements); | |
Array<AudioChannelSet>& requestedBuses = (isInput ? requestedLayouts.inputBuses : requestedLayouts.outputBuses); | |
for (int busIdx = 0; busIdx < n; ++busIdx) | |
{ | |
const auto* element = (busIdx < numAUElements ? &IOElement (isInput ? kAudioUnitScope_Input : kAudioUnitScope_Output, (UInt32) busIdx) : nullptr); | |
const int numChannels = (element != nullptr ? static_cast<int> (element->NumberChannels()) : 0); | |
AudioChannelLayoutTag currentLayoutTag = isInput ? currentInputLayout[busIdx] : currentOutputLayout[busIdx]; | |
const int tagNumChannels = currentLayoutTag & 0xffff; | |
if (numChannels != tagNumChannels) | |
return kAudioUnitErr_FormatNotSupported; | |
requestedBuses.add (CoreAudioLayouts::fromCoreAudio (currentLayoutTag)); | |
} | |
} | |
#ifdef JucePlugin_PreferredChannelConfigurations | |
short configs[][2] = {JucePlugin_PreferredChannelConfigurations}; | |
if (! AudioProcessor::containsLayout (requestedLayouts, configs)) | |
return kAudioUnitErr_FormatNotSupported; | |
#endif | |
if (! AudioUnitHelpers::setBusesLayout (juceFilter.get(), requestedLayouts)) | |
return kAudioUnitErr_FormatNotSupported; | |
// update total channel count | |
totalInChannels = juceFilter->getTotalNumInputChannels(); | |
totalOutChannels = juceFilter->getTotalNumOutputChannels(); | |
return noErr; | |
} | |
OSStatus syncAudioUnitWithChannelSet (bool isInput, int busNr, const AudioChannelSet& channelSet) | |
{ | |
const int numChannels = channelSet.size(); | |
getCurrentLayout (isInput, busNr) = CoreAudioLayouts::toCoreAudio (channelSet); | |
// is this bus activated? | |
if (numChannels == 0) | |
return noErr; | |
auto& element = IOElement (isInput ? kAudioUnitScope_Input : kAudioUnitScope_Output, (UInt32) busNr); | |
element.SetName ((CFStringRef) juceStringToNS (juceFilter->getBus (isInput, busNr)->getName())); | |
const auto streamDescription = ausdk::ASBD::CreateCommonFloat32 (getSampleRate(), (UInt32) numChannels); | |
return element.SetStreamFormat (streamDescription); | |
} | |
//============================================================================== | |
void clearPresetsArray() const | |
{ | |
for (int i = presetsArray.size(); --i >= 0;) | |
CFRelease (presetsArray.getReference(i).presetName); | |
presetsArray.clear(); | |
} | |
void refreshCurrentPreset() | |
{ | |
// this will make the AU host re-read and update the current preset name | |
// in case it was changed here in the plug-in: | |
const int currentProgramNumber = juceFilter->getCurrentProgram(); | |
const String currentProgramName = juceFilter->getProgramName (currentProgramNumber); | |
AUPreset currentPreset; | |
currentPreset.presetNumber = currentProgramNumber; | |
currentPreset.presetName = currentProgramName.toCFString(); | |
SetAFactoryPresetAsCurrent (currentPreset); | |
} | |
//============================================================================== | |
std::vector<AudioChannelLayoutTag>& getSupportedBusLayouts (bool isInput, int bus) noexcept { return (isInput ? supportedInputLayouts : supportedOutputLayouts).getReference (bus); } | |
const std::vector<AudioChannelLayoutTag>& getSupportedBusLayouts (bool isInput, int bus) const noexcept { return (isInput ? supportedInputLayouts : supportedOutputLayouts).getReference (bus); } | |
AudioChannelLayoutTag& getCurrentLayout (bool isInput, int bus) noexcept { return (isInput ? currentInputLayout : currentOutputLayout).getReference (bus); } | |
AudioChannelLayoutTag getCurrentLayout (bool isInput, int bus) const noexcept { return (isInput ? currentInputLayout : currentOutputLayout)[bus]; } | |
//============================================================================== | |
std::vector<AudioChannelLayoutTag> getSupportedLayoutTagsForBus (bool isInput, int busNum) const | |
{ | |
std::set<AudioChannelLayoutTag> tags; | |
if (AudioProcessor::Bus* bus = juceFilter->getBus (isInput, busNum)) | |
{ | |
#ifndef JucePlugin_PreferredChannelConfigurations | |
auto& knownTags = CoreAudioLayouts::getKnownCoreAudioTags(); | |
for (auto tag : knownTags) | |
if (bus->isLayoutSupported (CoreAudioLayouts::fromCoreAudio (tag))) | |
tags.insert (tag); | |
#endif | |
// add discrete layout tags | |
int n = bus->getMaxSupportedChannels (maxChannelsToProbeFor()); | |
for (int ch = 0; ch < n; ++ch) | |
{ | |
#ifdef JucePlugin_PreferredChannelConfigurations | |
const short configs[][2] = { JucePlugin_PreferredChannelConfigurations }; | |
if (AudioUnitHelpers::isLayoutSupported (*juceFilter, isInput, busNum, ch, configs)) | |
tags.insert (static_cast<AudioChannelLayoutTag> ((int) kAudioChannelLayoutTag_DiscreteInOrder | ch)); | |
#else | |
if (bus->isLayoutSupported (AudioChannelSet::discreteChannels (ch))) | |
tags.insert (static_cast<AudioChannelLayoutTag> ((int) kAudioChannelLayoutTag_DiscreteInOrder | ch)); | |
#endif | |
} | |
} | |
return std::vector<AudioChannelLayoutTag> (tags.begin(), tags.end()); | |
} | |
void addSupportedLayoutTagsForDirection (bool isInput) | |
{ | |
auto& layouts = isInput ? supportedInputLayouts : supportedOutputLayouts; | |
layouts.clearQuick(); | |
auto numBuses = AudioUnitHelpers::getBusCount (*juceFilter, isInput); | |
for (int busNr = 0; busNr < numBuses; ++busNr) | |
layouts.add (getSupportedLayoutTagsForBus (isInput, busNr)); | |
} | |
void addSupportedLayoutTags() | |
{ | |
currentInputLayout.clear(); currentOutputLayout.clear(); | |
currentInputLayout. resize (AudioUnitHelpers::getBusCountForWrapper (*juceFilter, true)); | |
currentOutputLayout.resize (AudioUnitHelpers::getBusCountForWrapper (*juceFilter, false)); | |
addSupportedLayoutTagsForDirection (true); | |
addSupportedLayoutTagsForDirection (false); | |
} | |
static int maxChannelsToProbeFor() | |
{ | |
return (getHostType().isLogic() ? 8 : 64); | |
} | |
//============================================================================== | |
void auPropertyListener (AudioUnitPropertyID propId, AudioUnitScope scope, AudioUnitElement) | |
{ | |
if (scope == kAudioUnitScope_Global && propId == kAudioUnitProperty_ContextName | |
&& juceFilter != nullptr && GetContextName() != nullptr) | |
{ | |
AudioProcessor::TrackProperties props; | |
props.name = String::fromCFString (GetContextName()); | |
juceFilter->updateTrackProperties (props); | |
} | |
} | |
static void auPropertyListenerDispatcher (void* inRefCon, AudioUnit, AudioUnitPropertyID propId, | |
AudioUnitScope scope, AudioUnitElement element) | |
{ | |
static_cast<JuceAU*> (inRefCon)->auPropertyListener (propId, scope, element); | |
} | |
JUCE_DECLARE_NON_COPYABLE (JuceAU) | |
}; | |
//============================================================================== | |
#if JucePlugin_ProducesMidiOutput || JucePlugin_WantsMidiInput || JucePlugin_IsMidiEffect | |
#define FACTORY_BASE_CLASS ausdk::AUMusicDeviceFactory | |
#else | |
#define FACTORY_BASE_CLASS ausdk::AUBaseFactory | |
#endif | |
AUSDK_COMPONENT_ENTRY (FACTORY_BASE_CLASS, JuceAU) | |
#define JUCE_AU_ENTRY_POINT_NAME JUCE_CONCAT (JucePlugin_AUExportPrefix, Factory) | |
extern "C" void* JUCE_AU_ENTRY_POINT_NAME (const AudioComponentDescription* inDesc); | |
AUSDK_EXPORT extern "C" void* JUCE_AU_ENTRY_POINT_NAME (const AudioComponentDescription* inDesc) | |
{ | |
PluginHostType::jucePlugInClientCurrentWrapperType = AudioProcessor::wrapperType_AudioUnit; | |
return JuceAUFactory (inDesc); | |
} | |
#endif |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Note - the only changes here are the addition of the virtual functions below: (this file is based on JUCE 7.0.3) | |
// these need to be added to the plugin processor for MAC builds with #ifdef JUCE_MAC | |
#if 0 | |
virtual void setWorkgroupJoinFunctionPointer(std::function<void(void* threadId)>fptr){}; | |
virtual void setWorkgroupLeaveFunctionPointer(std::function<void(void* threadId)>fptr){}; | |
#endif | |
// Audio threads must be started with startRealtimeThread(RealtimeOptions{x,y}); // set proiorty and max process time expected | |
/* | |
In your thread run function/s, before enter the while loop - call: | |
#ifdef JUCE_MAC | |
owner.tryJoinMacWorkgroup((void*)this->getThreadId()); //(owner here is the parent class that holds the callback) | |
#endif | |
In your thread run function, before exit the run function call: | |
#ifdef JUCE_MAC | |
owner.tryLeaveMacWorkgroup((void*)this->getThreadId()); //(owner here is the parent class that holds the callback) | |
#endif | |
// Thread parent class functioon: | |
void tryJoinMacWorkgroup(void* threadId) | |
{ | |
if(mWorkgroupJoinFunctionPtr!= nullptr) | |
{ | |
mWorkgroupJoinFunctionPtr(threadId); | |
} | |
} | |
void tryLeaveMacWorkgroup(void* threadId) | |
{ | |
if(mWorkgroupLeaveFunctionPtr!= nullptr) | |
{ | |
mWorkgroupLeaveFunctionPtr(threadId); | |
} | |
} | |
It's crucial for threads to leave the workgroup with the same workgroup join token - hence the list held in AudioProcessorHolder | |
within juce_AU_Wrapper.mm | |
*/ | |
/* | |
============================================================================== | |
This file is part of the JUCE library. | |
Copyright (c) 2022 - Raw Material Software Limited | |
JUCE is an open source library subject to commercial or open-source | |
licensing. | |
By using JUCE, you agree to the terms of both the JUCE 7 End-User License | |
Agreement and JUCE Privacy Policy. | |
End User License Agreement: www.juce.com/juce-7-licence | |
Privacy Policy: www.juce.com/juce-privacy-policy | |
Or: You may also use this code under the terms of the GPL v3 (see | |
www.gnu.org/licenses). | |
JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER | |
EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE | |
DISCLAIMED. | |
============================================================================== | |
*/ | |
namespace juce | |
{ | |
//============================================================================== | |
/** | |
Base class for audio processing classes or plugins. | |
This is intended to act as a base class of audio processor that is general enough | |
to be wrapped as a VST, AU, AAX, etc, or used internally. | |
It is also used by the plugin hosting code as the wrapper around an instance | |
of a loaded plugin. | |
You should derive your own class from this base class, and if you're building a | |
plugin, you should implement a global function called createPluginFilter() which | |
creates and returns a new instance of your subclass. | |
@tags{Audio} | |
*/ | |
class JUCE_API AudioProcessor | |
{ | |
protected: | |
struct BusesProperties; | |
//============================================================================== | |
/** Constructor. | |
This constructor will create a main input and output bus which are disabled | |
by default. If you need more fine-grained control then use the other constructors. | |
*/ | |
AudioProcessor(); | |
/** Constructor for multi-bus AudioProcessors | |
If your AudioProcessor supports multiple buses than use this constructor | |
to initialise the bus layouts and bus names of your plug-in. | |
*/ | |
AudioProcessor (const BusesProperties& ioLayouts); | |
/** Constructor for AudioProcessors which use layout maps | |
If your AudioProcessor uses layout maps then use this constructor. | |
*/ | |
AudioProcessor (const std::initializer_list<const short[2]>& channelLayoutList) | |
: AudioProcessor (busesPropertiesFromLayoutArray (layoutListToArray (channelLayoutList))) | |
{ | |
} | |
public: | |
//============================================================================== | |
enum ProcessingPrecision | |
{ | |
singlePrecision, | |
doublePrecision | |
}; | |
enum class Realtime | |
{ | |
no, | |
yes | |
}; | |
using ChangeDetails = AudioProcessorListener::ChangeDetails; | |
//============================================================================== | |
/** Destructor. */ | |
virtual ~AudioProcessor(); | |
//============================================================================== | |
/** Returns the name of this processor. */ | |
virtual const String getName() const = 0; | |
/** Returns a list of alternative names to use for this processor. | |
Some hosts truncate the name of your AudioProcessor when there isn't enough | |
space in the GUI to show the full name. Overriding this method, allows the host | |
to choose an alternative name (such as an abbreviation) to better fit the | |
available space. | |
*/ | |
virtual StringArray getAlternateDisplayNames() const; | |
//============================================================================== | |
// MAC OS WORKGROUPS SUPPORT: | |
#ifdef JUCE_MAC | |
virtual void setWorkgroupJoinFunctionPointer(std::function<void(void* threadId)>fptr){}; | |
virtual void setWorkgroupLeaveFunctionPointer(std::function<void(void* threadId)>fptr){}; | |
#endif | |
//============================================================================== | |
/** Called before playback starts, to let the processor prepare itself. | |
The sample rate is the target sample rate, and will remain constant until | |
playback stops. | |
You can call getTotalNumInputChannels and getTotalNumOutputChannels | |
or query the busLayout member variable to find out the number of | |
channels your processBlock callback must process. | |
The maximumExpectedSamplesPerBlock value is a strong hint about the maximum | |
number of samples that will be provided in each block. You may want to use | |
this value to resize internal buffers. You should program defensively in | |
case a buggy host exceeds this value. The actual block sizes that the host | |
uses may be different each time the callback happens: completely variable | |
block sizes can be expected from some hosts. | |
@see busLayout, getTotalNumInputChannels, getTotalNumOutputChannels | |
*/ | |
virtual void prepareToPlay (double sampleRate, | |
int maximumExpectedSamplesPerBlock) = 0; | |
/** Called after playback has stopped, to let the object free up any resources it | |
no longer needs. | |
*/ | |
virtual void releaseResources() = 0; | |
/** Called by the host to indicate that you should reduce your memory footprint. | |
You should override this method to free up some memory gracefully, if possible, | |
otherwise the host may forcibly unload your AudioProcessor. | |
At the moment this method is only called when your AudioProcessor is an AUv3 | |
plug-in running on iOS. | |
*/ | |
virtual void memoryWarningReceived() { jassertfalse; } | |
/** Renders the next block. | |
When this method is called, the buffer contains a number of channels which is | |
at least as great as the maximum number of input and output channels that | |
this processor is using. It will be filled with the processor's input data and | |
should be replaced with the processor's output. | |
So for example if your processor has a total of 2 input channels and 4 output | |
channels, then the buffer will contain 4 channels, the first two being filled | |
with the input data. Your processor should read these, do its processing, and | |
replace the contents of all 4 channels with its output. | |
Or if your processor has a total of 5 inputs and 2 outputs, the buffer will have 5 | |
channels, all filled with data, and your processor should overwrite the first 2 of | |
these with its output. But be VERY careful not to write anything to the last 3 | |
channels, as these might be mapped to memory that the host assumes is read-only! | |
If your plug-in has more than one input or output buses then the buffer passed | |
to the processBlock methods will contain a bundle of all channels of each bus. | |
Use getBusBuffer to obtain an audio buffer for a particular bus. | |
Note that if you have more outputs than inputs, then only those channels that | |
correspond to an input channel are guaranteed to contain sensible data - e.g. | |
in the case of 2 inputs and 4 outputs, the first two channels contain the input, | |
but the last two channels may contain garbage, so you should be careful not to | |
let this pass through without being overwritten or cleared. | |
Also note that the buffer may have more channels than are strictly necessary, | |
but you should only read/write from the ones that your processor is supposed to | |
be using. | |
The number of samples in these buffers is NOT guaranteed to be the same for every | |
callback, and may be more or less than the estimated value given to prepareToPlay(). | |
Your code must be able to cope with variable-sized blocks, or you're going to get | |
clicks and crashes! | |
Also note that some hosts will occasionally decide to pass a buffer containing | |
zero samples, so make sure that your algorithm can deal with that! | |
If the processor is receiving a MIDI input, then the midiMessages array will be filled | |
with the MIDI messages for this block. Each message's timestamp will indicate the | |
message's time, as a number of samples from the start of the block. | |
Any messages left in the MIDI buffer when this method has finished are assumed to | |
be the processor's MIDI output. This means that your processor should be careful to | |
clear any incoming messages from the array if it doesn't want them to be passed-on. | |
If you have implemented the getBypassParameter method, then you need to check the | |
value of this parameter in this callback and bypass your processing if the parameter | |
has a non-zero value. | |
Note that when calling this method as a host, the result may still be bypassed as | |
the parameter that controls the bypass may be non-zero. | |
Be very careful about what you do in this callback - it's going to be called by | |
the audio thread, so any kind of interaction with the UI is absolutely | |
out of the question. If you change a parameter in here and need to tell your UI to | |
update itself, the best way is probably to inherit from a ChangeBroadcaster, let | |
the UI components register as listeners, and then call sendChangeMessage() inside the | |
processBlock() method to send out an asynchronous message. You could also use | |
the AsyncUpdater class in a similar way. | |
@see getBusBuffer | |
*/ | |
virtual void processBlock (AudioBuffer<float>& buffer, | |
MidiBuffer& midiMessages) = 0; | |
/** Renders the next block. | |
When this method is called, the buffer contains a number of channels which is | |
at least as great as the maximum number of input and output channels that | |
this processor is using. It will be filled with the processor's input data and | |
should be replaced with the processor's output. | |
So for example if your processor has a combined total of 2 input channels and | |
4 output channels, then the buffer will contain 4 channels, the first two | |
being filled with the input data. Your processor should read these, do its | |
processing, and replace the contents of all 4 channels with its output. | |
Or if your processor has 5 inputs and 2 outputs, the buffer will have 5 channels, | |
all filled with data, and your processor should overwrite the first 2 of these | |
with its output. But be VERY careful not to write anything to the last 3 | |
channels, as these might be mapped to memory that the host assumes is read-only! | |
If your plug-in has more than one input or output buses then the buffer passed | |
to the processBlock methods will contain a bundle of all channels of | |
each bus. Use getBusBuffer to obtain a audio buffer for a particular bus. | |
Note that if you have more outputs than inputs, then only those channels that | |
correspond to an input channel are guaranteed to contain sensible data - e.g. | |
in the case of 2 inputs and 4 outputs, the first two channels contain the input, | |
but the last two channels may contain garbage, so you should be careful not to | |
let this pass through without being overwritten or cleared. | |
Also note that the buffer may have more channels than are strictly necessary, | |
but you should only read/write from the ones that your processor is supposed to | |
be using. | |
If your plugin uses buses, then you should use getBusBuffer() or | |
getChannelIndexInProcessBlockBuffer() to find out which of the input and output | |
channels correspond to which of the buses. | |
The number of samples in these buffers is NOT guaranteed to be the same for every | |
callback, and may be more or less than the estimated value given to prepareToPlay(). | |
Your code must be able to cope with variable-sized blocks, or you're going to get | |
clicks and crashes! | |
Also note that some hosts will occasionally decide to pass a buffer containing | |
zero samples, so make sure that your algorithm can deal with that! | |
If the processor is receiving a MIDI input, then the midiMessages array will be filled | |
with the MIDI messages for this block. Each message's timestamp will indicate the | |
message's time, as a number of samples from the start of the block. | |
Any messages left in the MIDI buffer when this method has finished are assumed to | |
be the processor's MIDI output. This means that your processor should be careful to | |
clear any incoming messages from the array if it doesn't want them to be passed-on. | |
If you have implemented the getBypassParameter method, then you need to check the | |
value of this parameter in this callback and bypass your processing if the parameter | |
has a non-zero value. | |
Note that when calling this method as a host, the result may still be bypassed as | |
the parameter that controls the bypass may be non-zero. | |
Be very careful about what you do in this callback - it's going to be called by | |
the audio thread, so any kind of interaction with the UI is absolutely | |
out of the question. If you change a parameter in here and need to tell your UI to | |
update itself, the best way is probably to inherit from a ChangeBroadcaster, let | |
the UI components register as listeners, and then call sendChangeMessage() inside the | |
processBlock() method to send out an asynchronous message. You could also use | |
the AsyncUpdater class in a similar way. | |
@see getBusBuffer | |
*/ | |
virtual void processBlock (AudioBuffer<double>& buffer, | |
MidiBuffer& midiMessages); | |
/** Renders the next block when the processor is being bypassed. | |
The default implementation of this method will pass-through any incoming audio, but | |
you may override this method e.g. to add latency compensation to the data to match | |
the processor's latency characteristics. This will avoid situations where bypassing | |
will shift the signal forward in time, possibly creating pre-echo effects and odd timings. | |
Another use for this method would be to cross-fade or morph between the wet (not bypassed) | |
and dry (bypassed) signals. | |
*/ | |
virtual void processBlockBypassed (AudioBuffer<float>& buffer, | |
MidiBuffer& midiMessages); | |
/** Renders the next block when the processor is being bypassed. | |
The default implementation of this method will pass-through any incoming audio, but | |
you may override this method e.g. to add latency compensation to the data to match | |
the processor's latency characteristics. This will avoid situations where bypassing | |
will shift the signal forward in time, possibly creating pre-echo effects and odd timings. | |
Another use for this method would be to cross-fade or morph between the wet (not bypassed) | |
and dry (bypassed) signals. | |
*/ | |
virtual void processBlockBypassed (AudioBuffer<double>& buffer, | |
MidiBuffer& midiMessages); | |
//============================================================================== | |
/** | |
Represents the bus layout state of a plug-in | |
*/ | |
struct BusesLayout | |
{ | |
/** An array containing the list of input buses that this processor supports. */ | |
Array<AudioChannelSet> inputBuses; | |
/** An array containing the list of output buses that this processor supports. */ | |
Array<AudioChannelSet> outputBuses; | |
/** Get the number of channels of a particular bus */ | |
int getNumChannels (bool isInput, int busIndex) const noexcept | |
{ | |
auto& bus = (isInput ? inputBuses : outputBuses); | |
return isPositiveAndBelow (busIndex, bus.size()) ? bus.getReference (busIndex).size() : 0; | |
} | |
/** Get the channel set of a particular bus */ | |
AudioChannelSet& getChannelSet (bool isInput, int busIndex) noexcept | |
{ | |
return (isInput ? inputBuses : outputBuses).getReference (busIndex); | |
} | |
/** Get the channel set of a particular bus */ | |
AudioChannelSet getChannelSet (bool isInput, int busIndex) const noexcept | |
{ | |
return (isInput ? inputBuses : outputBuses)[busIndex]; | |
} | |
/** Get the input channel layout on the main bus. */ | |
AudioChannelSet getMainInputChannelSet() const noexcept { return getChannelSet (true, 0); } | |
/** Get the output channel layout on the main bus. */ | |
AudioChannelSet getMainOutputChannelSet() const noexcept { return getChannelSet (false, 0); } | |
/** Get the number of input channels on the main bus. */ | |
int getMainInputChannels() const noexcept { return getNumChannels (true, 0); } | |
/** Get the number of output channels on the main bus. */ | |
int getMainOutputChannels() const noexcept { return getNumChannels (false, 0); } | |
bool operator== (const BusesLayout& other) const noexcept { return inputBuses == other.inputBuses && outputBuses == other.outputBuses; } | |
bool operator!= (const BusesLayout& other) const noexcept { return inputBuses != other.inputBuses || outputBuses != other.outputBuses; } | |
}; | |
//============================================================================== | |
/** | |
Describes the layout and properties of an audio bus. | |
Effectively a bus description is a named set of channel types. | |
@see AudioChannelSet, AudioProcessor::addBus | |
*/ | |
class Bus | |
{ | |
public: | |
/** Returns true if this bus is an input bus. */ | |
bool isInput() const noexcept; | |
/** Returns the index of this bus. */ | |
int getBusIndex() const noexcept; | |
/** Returns true if the current bus is the main input or output bus. */ | |
bool isMain() const noexcept { return getBusIndex() == 0; } | |
//============================================================================== | |
/** The bus's name. */ | |
const String& getName() const noexcept { return name; } | |
/** Get the default layout of this bus. | |
@see AudioChannelSet | |
*/ | |
const AudioChannelSet& getDefaultLayout() const noexcept { return dfltLayout; } | |
//============================================================================== | |
/** The bus's current layout. This will be AudioChannelSet::disabled() if the current | |
layout is disabled. | |
@see AudioChannelSet | |
*/ | |
const AudioChannelSet& getCurrentLayout() const noexcept { return layout; } | |
/** Return the bus's last active channel layout. | |
If the bus is currently enabled then the result will be identical to getCurrentLayout | |
otherwise it will return the last enabled layout. | |
@see AudioChannelSet | |
*/ | |
const AudioChannelSet& getLastEnabledLayout() const noexcept { return lastLayout; } | |
/** Sets the bus's current layout. | |
If the AudioProcessor does not support this layout then this will return false. | |
@see AudioChannelSet | |
*/ | |
bool setCurrentLayout (const AudioChannelSet& layout); | |
/** Sets the bus's current layout without changing the enabled state. | |
If the AudioProcessor does not support this layout then this will return false. | |
@see AudioChannelSet | |
*/ | |
bool setCurrentLayoutWithoutEnabling (const AudioChannelSet& layout); | |
/** Return the number of channels of the current bus. */ | |
inline int getNumberOfChannels() const noexcept { return cachedChannelCount; } | |
/** Set the number of channels of this bus. This will return false if the AudioProcessor | |
does not support this layout. | |
*/ | |
bool setNumberOfChannels (int channels); | |
//============================================================================== | |
/** Checks if a particular layout is supported. | |
@param set The AudioChannelSet which is to be probed. | |
@param currentLayout If non-null, pretend that the current layout of the AudioProcessor is | |
currentLayout. On exit, currentLayout will be modified to | |
represent the buses layouts of the AudioProcessor as if the layout | |
of the receiver had been successfully changed. This is useful as changing | |
the layout of the receiver may change the bus layout of other buses. | |
@see AudioChannelSet | |
*/ | |
bool isLayoutSupported (const AudioChannelSet& set, BusesLayout* currentLayout = nullptr) const; | |
/** Checks if this bus can support a given number of channels. */ | |
bool isNumberOfChannelsSupported (int channels) const; | |
/** Returns a ChannelSet that the bus supports with a given number of channels. */ | |
AudioChannelSet supportedLayoutWithChannels (int channels) const; | |
/** Returns the maximum number of channels that this bus can support. | |
@param limit The maximum value to return. | |
*/ | |
int getMaxSupportedChannels (int limit = AudioChannelSet::maxChannelsOfNamedLayout) const; | |
/** Returns the resulting layouts of all buses after changing the layout of this bus. | |
Changing an individual layout of a bus may also change the layout of all the other | |
buses. This method returns what the layouts of all the buses of the audio processor | |
would be, if you were to change the layout of this bus to the given layout. If there | |
is no way to support the given layout then this method will return the next best | |
layout. | |
*/ | |
BusesLayout getBusesLayoutForLayoutChangeOfBus (const AudioChannelSet& set) const; | |
//============================================================================== | |
/** Returns true if the current bus is enabled. */ | |
bool isEnabled() const noexcept { return ! layout.isDisabled(); } | |
/** Enable or disable this bus. This will return false if the AudioProcessor | |
does not support disabling this bus. */ | |
bool enable (bool shouldEnable = true); | |
/** Returns if this bus is enabled by default. */ | |
bool isEnabledByDefault() const noexcept { return enabledByDefault; } | |
//============================================================================== | |
/** Returns the position of a bus's channels within the processBlock buffer. | |
This can be called in processBlock to figure out which channel of the master AudioBuffer | |
maps onto a specific bus's channel. | |
*/ | |
int getChannelIndexInProcessBlockBuffer (int channelIndex) const noexcept; | |
/** Returns an AudioBuffer containing a set of channel pointers for a specific bus. | |
This can be called in processBlock to get a buffer containing a sub-group of the master | |
AudioBuffer which contains all the plugin channels. | |
*/ | |
template <typename FloatType> | |
AudioBuffer<FloatType> getBusBuffer (AudioBuffer<FloatType>& processBlockBuffer) const | |
{ | |
auto di = getDirectionAndIndex(); | |
return owner.getBusBuffer (processBlockBuffer, di.isInput, di.index); | |
} | |
private: | |
friend class AudioProcessor; | |
Bus (AudioProcessor&, const String&, const AudioChannelSet&, bool); | |
struct BusDirectionAndIndex | |
{ | |
bool isInput; | |
int index; | |
}; | |
BusDirectionAndIndex getDirectionAndIndex() const noexcept; | |
void updateChannelCount() noexcept; | |
AudioProcessor& owner; | |
String name; | |
AudioChannelSet layout, dfltLayout, lastLayout; | |
bool enabledByDefault; | |
int cachedChannelCount; | |
JUCE_DECLARE_NON_COPYABLE (Bus) | |
}; | |
//============================================================================== | |
/** Returns the number of buses on the input or output side */ | |
int getBusCount (bool isInput) const noexcept { return (isInput ? inputBuses : outputBuses).size(); } | |
/** Returns the audio bus with a given index and direction. | |
If busIndex is invalid then this method will return a nullptr. | |
*/ | |
Bus* getBus (bool isInput, int busIndex) noexcept { return getBusImpl (*this, isInput, busIndex); } | |
/** Returns the audio bus with a given index and direction. | |
If busIndex is invalid then this method will return a nullptr. | |
*/ | |
const Bus* getBus (bool isInput, int busIndex) const noexcept { return getBusImpl (*this, isInput, busIndex); } | |
//============================================================================== | |
/** Callback to query if a bus can currently be added. | |
This callback probes if a bus can currently be added. You should override | |
this callback if you want to support dynamically adding/removing buses by | |
the host. This is useful for mixer audio processors. | |
The default implementation will always return false. | |
@see addBus | |
*/ | |
virtual bool canAddBus (bool isInput) const { ignoreUnused (isInput); return false; } | |
/** Callback to query if the last bus can currently be removed. | |
This callback probes if the last bus can currently be removed. You should | |
override this callback if you want to support dynamically adding/removing | |
buses by the host. This is useful for mixer audio processors. | |
If you return true in this callback then the AudioProcessor will go ahead | |
and delete the bus. | |
The default implementation will always return false. | |
*/ | |
virtual bool canRemoveBus (bool isInput) const { ignoreUnused (isInput); return false; } | |
/** Dynamically request an additional bus. | |
Request an additional bus from the audio processor. If the audio processor | |
does not support adding additional buses then this method will return false. | |
Most audio processors will not allow you to dynamically add/remove | |
audio buses and will return false. | |
This method will invoke the canApplyBusCountChange callback to probe | |
if a bus can be added and, if yes, will use the supplied bus properties | |
of the canApplyBusCountChange callback to create a new bus. | |
@see canApplyBusCountChange, removeBus | |
*/ | |
bool addBus (bool isInput); | |
/** Dynamically remove the latest added bus. | |
Request the removal of the last bus from the audio processor. If the | |
audio processor does not support removing buses then this method will | |
return false. | |
Most audio processors will not allow you to dynamically add/remove | |
audio buses and will return false. | |
The default implementation will return false. | |
This method will invoke the canApplyBusCountChange callback to probe if | |
a bus can currently be removed and, if yes, will go ahead and remove it. | |
@see addBus, canRemoveBus | |
*/ | |
bool removeBus (bool isInput); | |
//============================================================================== | |
/** Set the channel layouts of this audio processor. | |
If the layout is not supported by this audio processor then | |
this method will return false. You can use the checkBusesLayoutSupported | |
and getNextBestLayout methods to probe which layouts this audio | |
processor supports. | |
*/ | |
bool setBusesLayout (const BusesLayout&); | |
/** Set the channel layouts of this audio processor without changing the | |
enablement state of the buses. | |
If the layout is not supported by this audio processor then | |
this method will return false. You can use the checkBusesLayoutSupported | |
methods to probe which layouts this audio processor supports. | |
*/ | |
bool setBusesLayoutWithoutEnabling (const BusesLayout&); | |
/** Provides the current channel layouts of this audio processor. */ | |
BusesLayout getBusesLayout() const; | |
/** Provides the channel layout of the bus with a given index and direction. | |
If the index, direction combination is invalid then this will return an | |
AudioChannelSet with no channels. | |
*/ | |
AudioChannelSet getChannelLayoutOfBus (bool isInput, int busIndex) const noexcept; | |
/** Set the channel layout of the bus with a given index and direction. | |
If the index, direction combination is invalid or the layout is not | |
supported by the audio processor then this method will return false. | |
*/ | |
bool setChannelLayoutOfBus (bool isInput, int busIndex, const AudioChannelSet& layout); | |
/** Provides the number of channels of the bus with a given index and direction. | |
If the index, direction combination is invalid then this will return zero. | |
*/ | |
inline int getChannelCountOfBus (bool isInput, int busIndex) const noexcept | |
{ | |
if (auto* bus = getBus (isInput, busIndex)) | |
return bus->getNumberOfChannels(); | |
return 0; | |
} | |
/** Enables all buses */ | |
bool enableAllBuses(); | |
/** Disables all non-main buses (aux and sidechains). */ | |
bool disableNonMainBuses(); | |
//============================================================================== | |
/** Returns the position of a bus's channels within the processBlock buffer. | |
This can be called in processBlock to figure out which channel of the master AudioBuffer | |
maps onto a specific bus's channel. | |
*/ | |
int getChannelIndexInProcessBlockBuffer (bool isInput, int busIndex, int channelIndex) const noexcept; | |
/** Returns the offset in a bus's buffer from an absolute channel index. | |
This method returns the offset in a bus's buffer given an absolute channel index. | |
It also provides the bus index. For example, this method would return one | |
for a processor with two stereo buses when given the absolute channel index. | |
*/ | |
int getOffsetInBusBufferForAbsoluteChannelIndex (bool isInput, int absoluteChannelIndex, int& busIndex) const noexcept; | |
/** Returns an AudioBuffer containing a set of channel pointers for a specific bus. | |
This can be called in processBlock to get a buffer containing a sub-group of the master | |
AudioBuffer which contains all the plugin channels. | |
*/ | |
template <typename FloatType> | |
AudioBuffer<FloatType> getBusBuffer (AudioBuffer<FloatType>& processBlockBuffer, bool isInput, int busIndex) const | |
{ | |
auto busNumChannels = getChannelCountOfBus (isInput, busIndex); | |
auto channelOffset = getChannelIndexInProcessBlockBuffer (isInput, busIndex, 0); | |
return AudioBuffer<FloatType> (processBlockBuffer.getArrayOfWritePointers() + channelOffset, | |
busNumChannels, processBlockBuffer.getNumSamples()); | |
} | |
//============================================================================== | |
/** Returns true if the Audio processor is likely to support a given layout. | |
This can be called regardless if the processor is currently running. | |
*/ | |
bool checkBusesLayoutSupported (const BusesLayout&) const; | |
//============================================================================== | |
/** Returns true if the Audio processor supports double precision floating point processing. | |
The default implementation will always return false. | |
If you return true here then you must override the double precision versions | |
of processBlock. Additionally, you must call getProcessingPrecision() in | |
your prepareToPlay method to determine the precision with which you need to | |
allocate your internal buffers. | |
@see getProcessingPrecision, setProcessingPrecision | |
*/ | |
virtual bool supportsDoublePrecisionProcessing() const; | |
/** Returns the precision-mode of the processor. | |
Depending on the result of this method you MUST call the corresponding version | |
of processBlock. The default processing precision is single precision. | |
@see setProcessingPrecision, supportsDoublePrecisionProcessing | |
*/ | |
ProcessingPrecision getProcessingPrecision() const noexcept { return processingPrecision; } | |
/** Returns true if the current precision is set to doublePrecision. */ | |
bool isUsingDoublePrecision() const noexcept { return processingPrecision == doublePrecision; } | |
/** Changes the processing precision of the receiver. A client of the AudioProcessor | |
calls this function to indicate which version of processBlock (single or double | |
precision) it intends to call. The client MUST call this function before calling | |
the prepareToPlay method so that the receiver can do any necessary allocations | |
in the prepareToPlay() method. An implementation of prepareToPlay() should call | |
getProcessingPrecision() to determine with which precision it should allocate | |
it's internal buffers. | |
Note that setting the processing precision to double floating point precision | |
on a receiver which does not support double precision processing (i.e. | |
supportsDoublePrecisionProcessing() returns false) will result in an assertion. | |
@see getProcessingPrecision, supportsDoublePrecisionProcessing | |
*/ | |
void setProcessingPrecision (ProcessingPrecision newPrecision) noexcept; | |
//============================================================================== | |
/** Returns the current AudioPlayHead object that should be used to find | |
out the state and position of the playhead. | |
You can ONLY call this from your processBlock() method! Calling it at other | |
times will produce undefined behaviour, as the host may not have any context | |
in which a time would make sense, and some hosts will almost certainly have | |
multithreading issues if it's not called on the audio thread. | |
The AudioPlayHead object that is returned can be used to get the details about | |
the time of the start of the block currently being processed. But do not | |
store this pointer or use it outside of the current audio callback, because | |
the host may delete or re-use it. | |
If the host can't or won't provide any time info, this will return nullptr. | |
*/ | |
AudioPlayHead* getPlayHead() const noexcept { return playHead; } | |
//============================================================================== | |
/** Returns the total number of input channels. | |
This method will return the total number of input channels by accumulating | |
the number of channels on each input bus. The number of channels of the | |
buffer passed to your processBlock callback will be equivalent to either | |
getTotalNumInputChannels or getTotalNumOutputChannels - which ever | |
is greater. | |
Note that getTotalNumInputChannels is equivalent to | |
getMainBusNumInputChannels if your processor does not have any sidechains | |
or aux buses. | |
*/ | |
int getTotalNumInputChannels() const noexcept { return cachedTotalIns; } | |
/** Returns the total number of output channels. | |
This method will return the total number of output channels by accumulating | |
the number of channels on each output bus. The number of channels of the | |
buffer passed to your processBlock callback will be equivalent to either | |
getTotalNumInputChannels or getTotalNumOutputChannels - which ever | |
is greater. | |
Note that getTotalNumOutputChannels is equivalent to | |
getMainBusNumOutputChannels if your processor does not have any sidechains | |
or aux buses. | |
*/ | |
int getTotalNumOutputChannels() const noexcept { return cachedTotalOuts; } | |
/** Returns the number of input channels on the main bus. */ | |
inline int getMainBusNumInputChannels() const noexcept { return getChannelCountOfBus (true, 0); } | |
/** Returns the number of output channels on the main bus. */ | |
inline int getMainBusNumOutputChannels() const noexcept { return getChannelCountOfBus (false, 0); } | |
//============================================================================== | |
/** Returns true if the channel layout map contains a certain layout. | |
You can use this method to help you implement the checkBusesLayoutSupported | |
method. For example | |
@code | |
bool checkBusesLayoutSupported (const BusesLayout& layouts) override | |
{ | |
return containsLayout (layouts, {{1,1},{2,2}}); | |
} | |
@endcode | |
*/ | |
static bool containsLayout (const BusesLayout& layouts, const std::initializer_list<const short[2]>& channelLayoutList) | |
{ | |
return containsLayout (layouts, layoutListToArray (channelLayoutList)); | |
} | |
template <size_t numLayouts> | |
static bool containsLayout (const BusesLayout& layouts, const short (&channelLayoutList) [numLayouts][2]) | |
{ | |
return containsLayout (layouts, layoutListToArray (channelLayoutList)); | |
} | |
/** Returns the next best layout which is contained in a channel layout map. | |
You can use this method to help you implement getNextBestLayout. For example: | |
@code | |
BusesLayout getNextBestLayout (const BusesLayout& layouts) override | |
{ | |
return getNextBestLayoutInLayoutList (layouts, {{1,1},{2,2}}); | |
} | |
@endcode | |
*/ | |
template <size_t numLayouts> | |
BusesLayout getNextBestLayoutInLayoutList (const BusesLayout& layouts, | |
const short (&channelLayoutList) [numLayouts][2]) | |
{ | |
return getNextBestLayoutInList (layouts, layoutListToArray (channelLayoutList)); | |
} | |
//============================================================================== | |
/** Returns the current sample rate. | |
This can be called from your processBlock() method - it's not guaranteed | |
to be valid at any other time, and may return 0 if it's unknown. | |
*/ | |
double getSampleRate() const noexcept { return currentSampleRate; } | |
/** Returns the current typical block size that is being used. | |
This can be called from your processBlock() method - it's not guaranteed | |
to be valid at any other time. | |
Remember it's not the ONLY block size that may be used when calling | |
processBlock, it's just the normal one. The actual block sizes used may be | |
larger or smaller than this, and will vary between successive calls. | |
*/ | |
int getBlockSize() const noexcept { return blockSize; } | |
//============================================================================== | |
/** This returns the number of samples delay that the processor imposes on the audio | |
passing through it. | |
The host will call this to find the latency - the processor itself should set this value | |
by calling setLatencySamples() as soon as it can during its initialisation. | |
*/ | |
int getLatencySamples() const noexcept { return latencySamples; } | |
/** Your processor subclass should call this to set the number of samples delay that it introduces. | |
The processor should call this as soon as it can during initialisation, and can call it | |
later if the value changes. | |
*/ | |
void setLatencySamples (int newLatency); | |
/** Returns the length of the processor's tail, in seconds. */ | |
virtual double getTailLengthSeconds() const = 0; | |
/** Returns true if the processor wants MIDI messages. */ | |
virtual bool acceptsMidi() const = 0; | |
/** Returns true if the processor produces MIDI messages. */ | |
virtual bool producesMidi() const = 0; | |
/** Returns true if the processor supports MPE. */ | |
virtual bool supportsMPE() const { return false; } | |
/** Returns true if this is a MIDI effect plug-in and does no audio processing. */ | |
virtual bool isMidiEffect() const { return false; } | |
//============================================================================== | |
/** This returns a critical section that will automatically be locked while the host | |
is calling the processBlock() method. | |
Use it from your UI or other threads to lock access to variables that are used | |
by the process callback, but obviously be careful not to keep it locked for | |
too long, because that could cause stuttering playback. If you need to do something | |
that'll take a long time and need the processing to stop while it happens, use the | |
suspendProcessing() method instead. | |
@see suspendProcessing | |
*/ | |
const CriticalSection& getCallbackLock() const noexcept { return callbackLock; } | |
/** Enables and disables the processing callback. | |
If you need to do something time-consuming on a thread and would like to make sure | |
the audio processing callback doesn't happen until you've finished, use this | |
to disable the callback and re-enable it again afterwards. | |
E.g. | |
@code | |
void loadNewPatch() | |
{ | |
suspendProcessing (true); | |
..do something that takes ages.. | |
suspendProcessing (false); | |
} | |
@endcode | |
If the host tries to make an audio callback while processing is suspended, the | |
processor will return an empty buffer, but won't block the audio thread like it would | |
do if you use the getCallbackLock() critical section to synchronise access. | |
Any code that calls processBlock() should call isSuspended() before doing so, and | |
if the processor is suspended, it should avoid the call and emit silence or | |
whatever is appropriate. | |
@see getCallbackLock | |
*/ | |
void suspendProcessing (bool shouldBeSuspended); | |
/** Returns true if processing is currently suspended. | |
@see suspendProcessing | |
*/ | |
bool isSuspended() const noexcept { return suspended; } | |
/** A plugin can override this to be told when it should reset any playing voices. | |
The default implementation does nothing, but a host may call this to tell the | |
plugin that it should stop any tails or sounds that have been left running. | |
*/ | |
virtual void reset(); | |
//============================================================================== | |
/** Returns the parameter that controls the AudioProcessor's bypass state. | |
If this method returns a nullptr then you can still control the bypass by | |
calling processBlockBypassed instead of processBlock. On the other hand, | |
if this method returns a non-null value, you should never call | |
processBlockBypassed but use the returned parameter to control the bypass | |
state instead. | |
A plug-in can override this function to return a parameter which controls your | |
plug-in's bypass. You should always check the value of this parameter in your | |
processBlock callback and bypass any effects if it is non-zero. | |
*/ | |
virtual AudioProcessorParameter* getBypassParameter() const { return nullptr; } | |
//============================================================================== | |
/** Returns true if the processor is being run in an offline mode for rendering. | |
If the processor is being run live on realtime signals, this returns false. | |
If the mode is unknown, this will assume it's realtime and return false. | |
This value may be unreliable until the prepareToPlay() method has been called, | |
and could change each time prepareToPlay() is called. | |
@see setNonRealtime() | |
*/ | |
bool isNonRealtime() const noexcept { return nonRealtime; } | |
/** Returns no if the processor is being run in an offline mode for rendering. | |
If the processor is being run live on realtime signals, this returns yes. | |
If the mode is unknown, this will assume it's realtime and return yes. | |
This value may be unreliable until the prepareToPlay() method has been called, | |
and could change each time prepareToPlay() is called. | |
@see setNonRealtime() | |
*/ | |
Realtime isRealtime() const noexcept | |
{ | |
return isNonRealtime() ? Realtime::no : Realtime::yes; | |
} | |
/** Called by the host to tell this processor whether it's being used in a non-realtime | |
capacity for offline rendering or bouncing. | |
*/ | |
virtual void setNonRealtime (bool isNonRealtime) noexcept; | |
//============================================================================== | |
/** Creates the processor's GUI. | |
This can return nullptr if you want a GUI-less processor, in which case the host | |
may create a generic UI that lets the user twiddle the parameters directly. | |
If you do want to pass back a component, the component should be created and set to | |
the correct size before returning it. If you implement this method, you must | |
also implement the hasEditor() method and make it return true. | |
Remember not to do anything silly like allowing your processor to keep a pointer to | |
the component that gets created - it could be deleted later without any warning, which | |
would make your pointer into a dangler. Use the getActiveEditor() method instead. | |
The correct way to handle the connection between an editor component and its | |
processor is to use something like a ChangeBroadcaster so that the editor can | |
register itself as a listener, and be told when a change occurs. This lets them | |
safely unregister themselves when they are deleted. | |
Here are a few things to bear in mind when writing an editor: | |
- Initially there won't be an editor, until the user opens one, or they might | |
not open one at all. Your processor mustn't rely on it being there. | |
- An editor object may be deleted and a replacement one created again at any time. | |
- It's safe to assume that an editor will be deleted before its processor. | |
@see hasEditor | |
*/ | |
virtual AudioProcessorEditor* createEditor() = 0; | |
/** Your processor subclass must override this and return true if it can create an | |
editor component. | |
@see createEditor | |
*/ | |
virtual bool hasEditor() const = 0; | |
//============================================================================== | |
/** Returns the active editor, if there is one. Bear in mind this can return nullptr | |
even if an editor has previously been opened. | |
Note that you should only call this method from the message thread as the active | |
editor may be deleted by the message thread, causing a dangling pointer. | |
*/ | |
AudioProcessorEditor* getActiveEditor() const noexcept; | |
/** Returns the active editor, or if there isn't one, it will create one. | |
This may call createEditor() internally to create the component. | |
*/ | |
AudioProcessorEditor* createEditorIfNeeded(); | |
//============================================================================== | |
/** Returns the default number of steps for a parameter. | |
NOTE! This method is deprecated! It's recommended that you use | |
AudioProcessorParameter::getNumSteps() instead. | |
@see getParameterNumSteps | |
*/ | |
static int getDefaultNumParameterSteps() noexcept; | |
/** The processor can call this when something (apart from a parameter value) has changed. | |
It sends a hint to the host that something like the program, number of parameters, | |
etc, has changed, and that it should update itself. | |
*/ | |
void updateHostDisplay (const ChangeDetails& details = ChangeDetails::getDefaultFlags()); | |
//============================================================================== | |
/** Adds a parameter to the AudioProcessor. | |
The parameter object will be managed and deleted automatically by the | |
AudioProcessor when no longer needed. | |
*/ | |
void addParameter (AudioProcessorParameter*); | |
/** Adds a group of parameters to the AudioProcessor. | |
All the parameter objects contained within the group will be managed and | |
deleted automatically by the AudioProcessor when no longer needed. | |
@see addParameter | |
*/ | |
void addParameterGroup (std::unique_ptr<AudioProcessorParameterGroup>); | |
/** Returns the group of parameters managed by this AudioProcessor. */ | |
const AudioProcessorParameterGroup& getParameterTree() const; | |
/** Sets the group of parameters managed by this AudioProcessor. | |
Replacing the tree after your AudioProcessor has been constructed will | |
crash many hosts, so don't do it! You may, however, change parameter and | |
group names by iterating the tree returned by getParameterTree(). | |
Afterwards, call updateHostDisplay() to inform the host of the changes. | |
Not all hosts support dynamic changes to parameters and group names. | |
*/ | |
void setParameterTree (AudioProcessorParameterGroup&& newTree); | |
/** A processor should implement this method so that the host can ask it to | |
rebuild its parameter tree. | |
For most plug-ins it's enough to simply add your parameters in the | |
constructor and leave this unimplemented. | |
*/ | |
virtual void refreshParameterList(); | |
/** Returns a flat list of the parameters in the current tree. */ | |
const Array<AudioProcessorParameter*>& getParameters() const; | |
//============================================================================== | |
/** Returns the number of preset programs the processor supports. | |
The value returned must be valid as soon as this object is created, and | |
must not change over its lifetime. | |
This value shouldn't be less than 1. | |
*/ | |
virtual int getNumPrograms() = 0; | |
/** Returns the number of the currently active program. */ | |
virtual int getCurrentProgram() = 0; | |
/** Called by the host to change the current program. */ | |
virtual void setCurrentProgram (int index) = 0; | |
/** Must return the name of a given program. */ | |
virtual const String getProgramName (int index) = 0; | |
/** Called by the host to rename a program. */ | |
virtual void changeProgramName (int index, const String& newName) = 0; | |
//============================================================================== | |
/** The host will call this method when it wants to save the processor's internal state. | |
This must copy any info about the processor's state into the block of memory provided, | |
so that the host can store this and later restore it using setStateInformation(). | |
Note that there's also a getCurrentProgramStateInformation() method, which only | |
stores the current program, not the state of the entire processor. | |
See also the helper function copyXmlToBinary() for storing settings as XML. | |
@see getCurrentProgramStateInformation | |
*/ | |
virtual void getStateInformation (juce::MemoryBlock& destData) = 0; | |
/** The host will call this method if it wants to save the state of just the processor's | |
current program. | |
Unlike getStateInformation, this should only return the current program's state. | |
Not all hosts support this, and if you don't implement it, the base class | |
method just calls getStateInformation() instead. If you do implement it, be | |
sure to also implement getCurrentProgramStateInformation. | |
@see getStateInformation, setCurrentProgramStateInformation | |
*/ | |
virtual void getCurrentProgramStateInformation (juce::MemoryBlock& destData); | |
/** This must restore the processor's state from a block of data previously created | |
using getStateInformation(). | |
Note that there's also a setCurrentProgramStateInformation() method, which tries | |
to restore just the current program, not the state of the entire processor. | |
See also the helper function getXmlFromBinary() for loading settings as XML. | |
@see setCurrentProgramStateInformation | |
*/ | |
virtual void setStateInformation (const void* data, int sizeInBytes) = 0; | |
/** The host will call this method if it wants to restore the state of just the processor's | |
current program. | |
Not all hosts support this, and if you don't implement it, the base class | |
method just calls setStateInformation() instead. If you do implement it, be | |
sure to also implement getCurrentProgramStateInformation. | |
@see setStateInformation, getCurrentProgramStateInformation | |
*/ | |
virtual void setCurrentProgramStateInformation (const void* data, int sizeInBytes); | |
/** This method is called when the total number of input or output channels is changed. */ | |
virtual void numChannelsChanged(); | |
/** This method is called when the number of buses is changed. */ | |
virtual void numBusesChanged(); | |
/** This method is called when the layout of the audio processor changes. */ | |
virtual void processorLayoutsChanged(); | |
//============================================================================== | |
/** Adds a listener that will be called when an aspect of this processor changes. */ | |
virtual void addListener (AudioProcessorListener* newListener); | |
/** Removes a previously added listener. */ | |
virtual void removeListener (AudioProcessorListener* listenerToRemove); | |
//============================================================================== | |
/** Tells the processor to use this playhead object. | |
The processor will not take ownership of the object, so the caller must delete it when | |
it is no longer being used. | |
*/ | |
virtual void setPlayHead (AudioPlayHead* newPlayHead); | |
//============================================================================== | |
/** This is called by the processor to specify its details before being played. Use this | |
version of the function if you are not interested in any sidechain and/or aux buses | |
and do not care about the layout of channels. Otherwise use setRateAndBufferSizeDetails.*/ | |
void setPlayConfigDetails (int numIns, int numOuts, double sampleRate, int blockSize); | |
/** This is called by the processor to specify its details before being played. You | |
should call this function after having informed the processor about the channel | |
and bus layouts via setBusesLayout. | |
@see setBusesLayout | |
*/ | |
void setRateAndBufferSizeDetails (double sampleRate, int blockSize) noexcept; | |
//============================================================================== | |
/** AAX plug-ins need to report a unique "plug-in id" for every audio layout | |
configuration that your AudioProcessor supports on the main bus. Override this | |
function if you want your AudioProcessor to use a custom "plug-in id" (for example | |
to stay backward compatible with older versions of JUCE). | |
The default implementation will compute a unique integer from the input and output | |
layout and add this value to the 4 character code 'jcaa' (for native AAX) or 'jyaa' | |
(for AudioSuite plug-ins). | |
*/ | |
virtual int32 getAAXPluginIDForMainBusConfig (const AudioChannelSet& mainInputLayout, | |
const AudioChannelSet& mainOutputLayout, | |
bool idForAudioSuite) const; | |
//============================================================================== | |
/** Some plug-ins support sharing response curve data with the host so that it can | |
display this curve on a console or in the mixer panel. For example, ProTools | |
allows you to see the total EQ curve of a track. It does this by interrogating | |
each plug-in for their internal EQ curve. */ | |
struct CurveData | |
{ | |
enum class Type : int | |
{ | |
EQ, // an EQ curve - input is in Hz, output is in dB | |
Dynamics, // a dynamics curve - input and output is in dB | |
GainReduction, // a gain reduction curve - input and output is in dB | |
Unknown = -1 | |
}; | |
std::function<float (float)> curve; // a function which represents your curve (such as an eq) | |
Range<float> xRange, yRange; // the data range of your curve | |
// For some curve types, your plug-in may already measure the current input and output values. | |
// An host can use to indicate where on the curve the current signal is (for example | |
// by putting a dot on the curve). Simply leave these strings empty if you do not want to | |
// support this. | |
String xMeterID, yMeterID; | |
}; | |
virtual CurveData getResponseCurve (CurveData::Type /*curveType*/) const { return {}; } | |
//============================================================================== | |
/** Not for public use - this is called before deleting an editor component. */ | |
void editorBeingDeleted (AudioProcessorEditor*) noexcept; | |
/** Flags to indicate the type of plugin context in which a processor is being used. */ | |
enum WrapperType | |
{ | |
wrapperType_Undefined = 0, | |
wrapperType_VST, | |
wrapperType_VST3, | |
wrapperType_AudioUnit, | |
wrapperType_AudioUnitv3, | |
wrapperType_AAX, | |
wrapperType_Standalone, | |
wrapperType_Unity, | |
wrapperType_LV2 | |
}; | |
/** When loaded by a plugin wrapper, this flag will be set to indicate the type | |
of plugin within which the processor is running. | |
*/ | |
const WrapperType wrapperType; | |
/** Returns a textual description of a WrapperType value */ | |
static const char* getWrapperTypeDescription (AudioProcessor::WrapperType) noexcept; | |
/** A struct containing information about the DAW track inside which your | |
AudioProcessor is loaded. */ | |
struct TrackProperties | |
{ | |
String name; // The name of the track - this will be empty if the track name is not known | |
Colour colour; // The colour of the track - this will be transparentBlack if the colour is not known | |
// other properties may be added in the future | |
}; | |
/** Informs the AudioProcessor that track properties such as the track's name or | |
colour has been changed. | |
If you are hosting this AudioProcessor then use this method to inform the | |
AudioProcessor about which track the AudioProcessor is loaded on. This method | |
may only be called on the message thread. | |
If you are implementing an AudioProcessor then you can override this callback | |
to do something useful with the track properties such as changing the colour | |
of your AudioProcessor's editor. It's entirely up to the host when and how | |
often this callback will be called. | |
The default implementation of this callback will do nothing. | |
*/ | |
virtual void updateTrackProperties (const TrackProperties& properties); | |
//============================================================================== | |
/** Helper function that just converts an xml element into a binary blob. | |
Use this in your processor's getStateInformation() method if you want to | |
store its state as xml. | |
Then use getXmlFromBinary() to reverse this operation and retrieve the XML | |
from a binary blob. | |
*/ | |
static void copyXmlToBinary (const XmlElement& xml, | |
juce::MemoryBlock& destData); | |
/** Retrieves an XML element that was stored as binary with the copyXmlToBinary() method. | |
This might return nullptr if the data's unsuitable or corrupted. | |
*/ | |
static std::unique_ptr<XmlElement> getXmlFromBinary (const void* data, int sizeInBytes); | |
/** @internal */ | |
static void JUCE_CALLTYPE setTypeOfNextNewPlugin (WrapperType); | |
protected: | |
/** Callback to query if the AudioProcessor supports a specific layout. | |
This callback is called when the host probes the supported bus layouts via | |
the checkBusesLayoutSupported method. You should override this callback if you | |
would like to limit the layouts that your AudioProcessor supports. The default | |
implementation will accept any layout. JUCE does basic sanity checks so that | |
the provided layouts parameter will have the same number of buses as your | |
AudioProcessor. | |
@see checkBusesLayoutSupported | |
*/ | |
virtual bool isBusesLayoutSupported (const BusesLayout&) const { return true; } | |
/** Callback to check if a certain bus layout can now be applied. | |
Most subclasses will not need to override this method and should instead | |
override the isBusesLayoutSupported callback to reject certain layout changes. | |
This callback is called when the user requests a layout change. It will only be | |
called if processing of the AudioProcessor has been stopped by a previous call to | |
releaseResources or after the construction of the AudioProcessor. It will be called | |
just before the actual layout change. By returning false you will abort the layout | |
change and setBusesLayout will return false indicating that the layout change | |
was not successful. | |
The default implementation will simply call isBusesLayoutSupported. | |
You only need to override this method if there is a chance that your AudioProcessor | |
may not accept a layout although you have previously claimed to support it via the | |
isBusesLayoutSupported callback. This can occur if your AudioProcessor's supported | |
layouts depend on other plug-in parameters which may have changed since the last | |
call to isBusesLayoutSupported, such as the format of an audio file which can be | |
selected by the user in the AudioProcessor's editor. This callback gives the | |
AudioProcessor a last chance to reject a layout if conditions have changed as it | |
is always called just before the actual layout change. | |
As it is never called while the AudioProcessor is processing audio, it can also | |
be used for AudioProcessors which wrap other plug-in formats to apply the current | |
layout to the underlying plug-in. This callback gives such AudioProcessors a | |
chance to reject the layout change should an error occur with the underlying plug-in | |
during the layout change. | |
@see isBusesLayoutSupported, setBusesLayout | |
*/ | |
virtual bool canApplyBusesLayout (const BusesLayout& layouts) const { return isBusesLayoutSupported (layouts); } | |
/** This method will be called when a new bus layout needs to be applied. | |
Most subclasses will not need to override this method and should just use the default | |
implementation. | |
*/ | |
virtual bool applyBusLayouts (const BusesLayout& layouts); | |
//============================================================================== | |
/** Structure used for AudioProcessor Callbacks */ | |
struct BusProperties | |
{ | |
/** The name of the bus */ | |
String busName; | |
/** The default layout of the bus */ | |
AudioChannelSet defaultLayout; | |
/** Is this bus activated by default? */ | |
bool isActivatedByDefault; | |
}; | |
/** Structure used for AudioProcessor Callbacks */ | |
struct BusesProperties | |
{ | |
/** The layouts of the input buses */ | |
Array<BusProperties> inputLayouts; | |
/** The layouts of the output buses */ | |
Array<BusProperties> outputLayouts; | |
void addBus (bool isInput, const String& name, const AudioChannelSet& defaultLayout, bool isActivatedByDefault = true); | |
[[nodiscard]] BusesProperties withInput (const String& name, const AudioChannelSet& defaultLayout, bool isActivatedByDefault = true) const; | |
[[nodiscard]] BusesProperties withOutput (const String& name, const AudioChannelSet& defaultLayout, bool isActivatedByDefault = true) const; | |
}; | |
/** Callback to query if adding/removing buses currently possible. | |
This callback is called when the host calls addBus or removeBus. | |
Similar to canApplyBusesLayout, this callback is only called while | |
the AudioProcessor is stopped and gives the processor a last | |
chance to reject a requested bus change. It can also be used to apply | |
the bus count change to an underlying wrapped plug-in. | |
When adding a bus, isAddingBuses will be true and the plug-in is | |
expected to fill out outNewBusProperties with the properties of the | |
bus which will be created just after the successful return of this callback. | |
Implementations of AudioProcessor will rarely need to override this | |
method. Only override this method if your processor supports adding | |
and removing buses and if it needs more fine grain control over the | |
naming of new buses or may reject bus number changes although canAddBus | |
or canRemoveBus returned true. | |
The default implementation will return false if canAddBus/canRemoveBus | |
returns false (the default behavior). Otherwise, this method returns | |
"Input #busIndex" for input buses and "Output #busIndex" for output buses | |
where busIndex is the index for newly created buses. The default layout | |
in this case will be the layout of the previous bus of the same direction. | |
*/ | |
virtual bool canApplyBusCountChange (bool isInput, bool isAddingBuses, | |
BusProperties& outNewBusProperties); | |
//============================================================================== | |
/** @internal */ | |
std::atomic<AudioPlayHead*> playHead { nullptr }; | |
/** @internal */ | |
void sendParamChangeMessageToListeners (int parameterIndex, float newValue); | |
public: | |
#ifndef DOXYGEN | |
// These methods are all deprecated in favour of using AudioProcessorParameter | |
// and AudioProcessorParameterGroup | |
[[deprecated]] virtual int getNumParameters(); | |
[[deprecated]] virtual const String getParameterName (int parameterIndex); | |
[[deprecated]] virtual String getParameterID (int index); | |
[[deprecated]] virtual float getParameter (int parameterIndex); | |
[[deprecated]] virtual String getParameterName (int parameterIndex, int maximumStringLength); | |
[[deprecated]] virtual const String getParameterText (int parameterIndex); | |
[[deprecated]] virtual String getParameterText (int parameterIndex, int maximumStringLength); | |
[[deprecated]] virtual int getParameterNumSteps (int parameterIndex); | |
[[deprecated]] virtual bool isParameterDiscrete (int parameterIndex) const; | |
[[deprecated]] virtual float getParameterDefaultValue (int parameterIndex); | |
[[deprecated]] virtual String getParameterLabel (int index) const; | |
[[deprecated]] virtual bool isParameterOrientationInverted (int index) const; | |
[[deprecated]] virtual void setParameter (int parameterIndex, float newValue); | |
[[deprecated]] virtual bool isParameterAutomatable (int parameterIndex) const; | |
[[deprecated]] virtual bool isMetaParameter (int parameterIndex) const; | |
[[deprecated]] virtual AudioProcessorParameter::Category getParameterCategory (int parameterIndex) const; | |
[[deprecated]] void beginParameterChangeGesture (int parameterIndex); | |
[[deprecated]] void endParameterChangeGesture (int parameterIndex); | |
[[deprecated]] void setParameterNotifyingHost (int parameterIndex, float newValue); | |
// These functions are deprecated: your audio processor can inform the host | |
// on its bus and channel layouts and names using the AudioChannelSet and various bus classes. | |
[[deprecated]] int getNumInputChannels() const noexcept { return getTotalNumInputChannels(); } | |
[[deprecated]] int getNumOutputChannels() const noexcept { return getTotalNumOutputChannels(); } | |
[[deprecated]] const String getInputSpeakerArrangement() const noexcept { return cachedInputSpeakerArrString; } | |
[[deprecated]] const String getOutputSpeakerArrangement() const noexcept { return cachedOutputSpeakerArrString; } | |
[[deprecated]] virtual const String getInputChannelName (int channelIndex) const; | |
[[deprecated]] virtual const String getOutputChannelName (int channelIndex) const; | |
[[deprecated]] virtual bool isInputChannelStereoPair (int index) const; | |
[[deprecated]] virtual bool isOutputChannelStereoPair (int index) const; | |
#endif | |
private: | |
//============================================================================== | |
struct InOutChannelPair | |
{ | |
InOutChannelPair() = default; | |
InOutChannelPair (int16 inCh, int16 outCh) noexcept : inChannels (inCh), outChannels (outCh) {} | |
InOutChannelPair (const int16 (&config)[2]) noexcept : inChannels (config[0]), outChannels (config[1]) {} | |
bool operator== (const InOutChannelPair& other) const noexcept | |
{ | |
return other.inChannels == inChannels && other.outChannels == outChannels; | |
} | |
int16 inChannels = 0, outChannels = 0; | |
}; | |
template <size_t numLayouts> | |
static Array<InOutChannelPair> layoutListToArray (const short (&configuration) [numLayouts][2]) | |
{ | |
Array<InOutChannelPair> layouts; | |
for (size_t i = 0; i < numLayouts; ++i) | |
layouts.add (InOutChannelPair (configuration[(int) i])); | |
return layouts; | |
} | |
static Array<InOutChannelPair> layoutListToArray (const std::initializer_list<const short[2]>& configuration) | |
{ | |
Array<InOutChannelPair> layouts; | |
for (auto&& i : configuration) | |
layouts.add (InOutChannelPair (i)); | |
return layouts; | |
} | |
template <typename This> | |
static auto getBusImpl (This& t, bool isInput, int busIndex) -> decltype (t.getBus (isInput, busIndex)) | |
{ | |
return (isInput ? t.inputBuses : t.outputBuses)[busIndex]; | |
} | |
//============================================================================== | |
static BusesProperties busesPropertiesFromLayoutArray (const Array<InOutChannelPair>&); | |
BusesLayout getNextBestLayoutInList (const BusesLayout&, const Array<InOutChannelPair>&) const; | |
static bool containsLayout (const BusesLayout&, const Array<InOutChannelPair>&); | |
//============================================================================== | |
void createBus (bool isInput, const BusProperties&); | |
//============================================================================== | |
Array<AudioProcessorListener*> listeners; | |
Component::SafePointer<AudioProcessorEditor> activeEditor; | |
double currentSampleRate = 0; | |
int blockSize = 0, latencySamples = 0; | |
bool suspended = false; | |
std::atomic<bool> nonRealtime { false }; | |
ProcessingPrecision processingPrecision = singlePrecision; | |
CriticalSection callbackLock, listenerLock, activeEditorLock; | |
friend class Bus; | |
mutable OwnedArray<Bus> inputBuses, outputBuses; | |
String cachedInputSpeakerArrString, cachedOutputSpeakerArrString; | |
int cachedTotalIns = 0, cachedTotalOuts = 0; | |
AudioProcessorParameterGroup parameterTree; | |
Array<AudioProcessorParameter*> flatParameterList; | |
AudioProcessorParameter* getParamChecked (int) const; | |
#if JUCE_DEBUG | |
#if ! JUCE_DISABLE_AUDIOPROCESSOR_BEGIN_END_GESTURE_CHECKING | |
BigInteger changingParams; | |
#endif | |
bool textRecursionCheck = false; | |
std::unordered_set<String> paramIDs, groupIDs; | |
#if ! JUCE_DISABLE_CAUTIOUS_PARAMETER_ID_CHECKING | |
std::unordered_set<String> trimmedParamIDs; | |
#endif | |
#endif | |
void checkForDuplicateTrimmedParamID (AudioProcessorParameter*); | |
void validateParameter (AudioProcessorParameter*); | |
void checkForDuplicateParamID (AudioProcessorParameter*); | |
void checkForDuplicateGroupIDs (const AudioProcessorParameterGroup&); | |
AudioProcessorListener* getListenerLocked (int) const noexcept; | |
void updateSpeakerFormatStrings(); | |
void audioIOChanged (bool busNumberChanged, bool channelNumChanged); | |
void getNextBestLayout (const BusesLayout&, BusesLayout&) const; | |
template <typename floatType> | |
void processBypassed (AudioBuffer<floatType>&, MidiBuffer&); | |
friend class AudioProcessorParameter; | |
friend class LADSPAPluginInstance; | |
[[deprecated ("This method is no longer used - you can delete it from your AudioProcessor classes.")]] | |
virtual bool silenceInProducesSilenceOut() const { return false; } | |
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (AudioProcessor) | |
}; | |
} // namespace juce |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment