Created
October 14, 2021 23:53
-
-
Save EricRabil/931a60bafd075e8c2271b0a9a42b8beb to your computer and use it in GitHub Desktop.
Minimal derivative of objc internal headers for interacting with the runtime
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include <atomic> | |
#include <cstddef> // for nullptr_t | |
#include <stdint.h> | |
#include <assert.h> | |
#include <iterator> | |
#include <functional> | |
// MARK: - Defines | |
#if __LP64__ | |
typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits | |
#else | |
typedef uint16_t mask_t; | |
#endif | |
#define ASSERT(x) assert(x) | |
#if __arm64__ | |
// ARM64 uses a new tagged pointer scheme where normal tags are in | |
// the low bits, extended tags are in the high bits, and half of the | |
// extended tag space is reserved for unobfuscated payloads. | |
# define OBJC_SPLIT_TAGGED_POINTERS 1 | |
#else | |
# define OBJC_SPLIT_TAGGED_POINTERS 0 | |
#endif | |
#if (TARGET_OS_OSX || TARGET_OS_MACCATALYST) && __x86_64__ | |
// 64-bit Mac - tag bit is LSB | |
# define OBJC_MSB_TAGGED_POINTERS 0 | |
#else | |
// Everything else - tag bit is MSB | |
# define OBJC_MSB_TAGGED_POINTERS 1 | |
#endif | |
#define _OBJC_TAG_INDEX_MASK 0x7UL | |
#if OBJC_SPLIT_TAGGED_POINTERS | |
#define _OBJC_TAG_SLOT_COUNT 8 | |
#define _OBJC_TAG_SLOT_MASK 0x7UL | |
#else | |
// array slot includes the tag bit itself | |
#define _OBJC_TAG_SLOT_COUNT 16 | |
#define _OBJC_TAG_SLOT_MASK 0xfUL | |
#endif | |
#define _OBJC_TAG_EXT_INDEX_MASK 0xff | |
// array slot has no extra bits | |
#define _OBJC_TAG_EXT_SLOT_COUNT 256 | |
#define _OBJC_TAG_EXT_SLOT_MASK 0xff | |
#if OBJC_SPLIT_TAGGED_POINTERS | |
# define _OBJC_TAG_MASK (1UL<<63) | |
# define _OBJC_TAG_INDEX_SHIFT 0 | |
# define _OBJC_TAG_SLOT_SHIFT 0 | |
# define _OBJC_TAG_PAYLOAD_LSHIFT 1 | |
# define _OBJC_TAG_PAYLOAD_RSHIFT 4 | |
# define _OBJC_TAG_EXT_MASK (_OBJC_TAG_MASK | 0x7UL) | |
# define _OBJC_TAG_NO_OBFUSCATION_MASK ((1UL<<62) | _OBJC_TAG_EXT_MASK) | |
# define _OBJC_TAG_CONSTANT_POINTER_MASK \ | |
~(_OBJC_TAG_EXT_MASK | ((uintptr_t)_OBJC_TAG_EXT_SLOT_MASK << _OBJC_TAG_EXT_SLOT_SHIFT)) | |
# define _OBJC_TAG_EXT_INDEX_SHIFT 55 | |
# define _OBJC_TAG_EXT_SLOT_SHIFT 55 | |
# define _OBJC_TAG_EXT_PAYLOAD_LSHIFT 9 | |
# define _OBJC_TAG_EXT_PAYLOAD_RSHIFT 12 | |
#elif OBJC_MSB_TAGGED_POINTERS | |
# define _OBJC_TAG_MASK (1UL<<63) | |
# define _OBJC_TAG_INDEX_SHIFT 60 | |
# define _OBJC_TAG_SLOT_SHIFT 60 | |
# define _OBJC_TAG_PAYLOAD_LSHIFT 4 | |
# define _OBJC_TAG_PAYLOAD_RSHIFT 4 | |
# define _OBJC_TAG_EXT_MASK (0xfUL<<60) | |
# define _OBJC_TAG_EXT_INDEX_SHIFT 52 | |
# define _OBJC_TAG_EXT_SLOT_SHIFT 52 | |
# define _OBJC_TAG_EXT_PAYLOAD_LSHIFT 12 | |
# define _OBJC_TAG_EXT_PAYLOAD_RSHIFT 12 | |
#else | |
# define _OBJC_TAG_MASK 1UL | |
# define _OBJC_TAG_INDEX_SHIFT 1 | |
# define _OBJC_TAG_SLOT_SHIFT 0 | |
# define _OBJC_TAG_PAYLOAD_LSHIFT 0 | |
# define _OBJC_TAG_PAYLOAD_RSHIFT 4 | |
# define _OBJC_TAG_EXT_MASK 0xfUL | |
# define _OBJC_TAG_EXT_INDEX_SHIFT 4 | |
# define _OBJC_TAG_EXT_SLOT_SHIFT 4 | |
# define _OBJC_TAG_EXT_PAYLOAD_LSHIFT 0 | |
# define _OBJC_TAG_EXT_PAYLOAD_RSHIFT 12 | |
#endif | |
// class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags) | |
// The extra bits are optimized for the retain/release and alloc/dealloc paths. | |
// Values for class_ro_t->flags | |
// These are emitted by the compiler and are part of the ABI. | |
// Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang | |
// class is a metaclass | |
#define RO_META (1<<0) | |
// class is a root class | |
#define RO_ROOT (1<<1) | |
// class has .cxx_construct/destruct implementations | |
#define RO_HAS_CXX_STRUCTORS (1<<2) | |
// class has +load implementation | |
// #define RO_HAS_LOAD_METHOD (1<<3) | |
// class has visibility=hidden set | |
#define RO_HIDDEN (1<<4) | |
// class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak | |
#define RO_EXCEPTION (1<<5) | |
// class has ro field for Swift metadata initializer callback | |
#define RO_HAS_SWIFT_INITIALIZER (1<<6) | |
// class compiled with ARC | |
#define RO_IS_ARC (1<<7) | |
// class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS) | |
#define RO_HAS_CXX_DTOR_ONLY (1<<8) | |
// class is not ARC but has ARC-style weak ivar layout | |
#define RO_HAS_WEAK_WITHOUT_ARC (1<<9) | |
// class does not allow associated objects on instances | |
#define RO_FORBIDS_ASSOCIATED_OBJECTS (1<<10) | |
// class is in an unloadable bundle - must never be set by compiler | |
#define RO_FROM_BUNDLE (1<<29) | |
// class is unrealized future class - must never be set by compiler | |
#define RO_FUTURE (1<<30) | |
// class is realized - must never be set by compiler | |
#define RO_REALIZED (1<<31) | |
// Values for class_rw_t->flags | |
// These are not emitted by the compiler and are never used in class_ro_t. | |
// Their presence should be considered in future ABI versions. | |
// class_t->data is class_rw_t, not class_ro_t | |
#define RW_REALIZED (1<<31) | |
// class is unresolved future class | |
#define RW_FUTURE (1<<30) | |
// class is initialized | |
#define RW_INITIALIZED (1<<29) | |
// class is initializing | |
#define RW_INITIALIZING (1<<28) | |
// class_rw_t->ro is heap copy of class_ro_t | |
#define RW_COPIED_RO (1<<27) | |
// class allocated but not yet registered | |
#define RW_CONSTRUCTING (1<<26) | |
// class allocated and registered | |
#define RW_CONSTRUCTED (1<<25) | |
// available for use; was RW_FINALIZE_ON_MAIN_THREAD | |
// #define RW_24 (1<<24) | |
// class +load has been called | |
#define RW_LOADED (1<<23) | |
#if !SUPPORT_NONPOINTER_ISA | |
// class instances may have associative references | |
#define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22) | |
#endif | |
// class has instance-specific GC layout | |
#define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21) | |
// class does not allow associated objects on its instances | |
#define RW_FORBIDS_ASSOCIATED_OBJECTS (1<<20) | |
// class has started realizing but not yet completed it | |
#define RW_REALIZING (1<<19) | |
#if CONFIG_USE_PREOPT_CACHES | |
// this class and its descendants can't have preopt caches with inlined sels | |
#define RW_NOPREOPT_SELS (1<<2) | |
// this class and its descendants can't have preopt caches | |
#define RW_NOPREOPT_CACHE (1<<1) | |
#endif | |
// class is a metaclass (copied from ro) | |
#define RW_META RO_META // (1<<0) | |
// NOTE: MORE RW_ FLAGS DEFINED BELOW | |
// Values for class_rw_t->flags (RW_*), cache_t->_flags (FAST_CACHE_*), | |
// or class_t->bits (FAST_*). | |
// | |
// FAST_* and FAST_CACHE_* are stored on the class, reducing pointer indirection. | |
#if __LP64__ | |
// class is a Swift class from the pre-stable Swift ABI | |
#define FAST_IS_SWIFT_LEGACY (1UL<<0) | |
// class is a Swift class from the stable Swift ABI | |
#define FAST_IS_SWIFT_STABLE (1UL<<1) | |
// class or superclass has default retain/release/autorelease/retainCount/ | |
// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference | |
#define FAST_HAS_DEFAULT_RR (1UL<<2) | |
// data pointer | |
#define FAST_DATA_MASK 0x00007ffffffffff8UL | |
#if __arm64__ | |
// class or superclass has .cxx_construct/.cxx_destruct implementation | |
// FAST_CACHE_HAS_CXX_DTOR is the first bit so that setting it in | |
// isa_t::has_cxx_dtor is a single bfi | |
#define FAST_CACHE_HAS_CXX_DTOR (1<<0) | |
#define FAST_CACHE_HAS_CXX_CTOR (1<<1) | |
// Denormalized RO_META to avoid an indirection | |
#define FAST_CACHE_META (1<<2) | |
#else | |
// Denormalized RO_META to avoid an indirection | |
#define FAST_CACHE_META (1<<0) | |
// class or superclass has .cxx_construct/.cxx_destruct implementation | |
// FAST_CACHE_HAS_CXX_DTOR is chosen to alias with isa_t::has_cxx_dtor | |
#define FAST_CACHE_HAS_CXX_CTOR (1<<1) | |
#define FAST_CACHE_HAS_CXX_DTOR (1<<2) | |
#endif | |
// Fast Alloc fields: | |
// This stores the word-aligned size of instances + "ALLOC_DELTA16", | |
// or 0 if the instance size doesn't fit. | |
// | |
// These bits occupy the same bits than in the instance size, so that | |
// the size can be extracted with a simple mask operation. | |
// | |
// FAST_CACHE_ALLOC_MASK16 allows to extract the instance size rounded | |
// rounded up to the next 16 byte boundary, which is a fastpath for | |
// _objc_rootAllocWithZone() | |
#define FAST_CACHE_ALLOC_MASK 0x1ff8 | |
#define FAST_CACHE_ALLOC_MASK16 0x1ff0 | |
#define FAST_CACHE_ALLOC_DELTA16 0x0008 | |
// class's instances requires raw isa | |
#define FAST_CACHE_REQUIRES_RAW_ISA (1<<13) | |
// class or superclass has default alloc/allocWithZone: implementation | |
// Note this is is stored in the metaclass. | |
#define FAST_CACHE_HAS_DEFAULT_AWZ (1<<14) | |
// class or superclass has default new/self/class/respondsToSelector/isKindOfClass | |
#define FAST_CACHE_HAS_DEFAULT_CORE (1<<15) | |
#else | |
// class or superclass has .cxx_construct implementation | |
#define RW_HAS_CXX_CTOR (1<<18) | |
// class or superclass has .cxx_destruct implementation | |
#define RW_HAS_CXX_DTOR (1<<17) | |
// class or superclass has default alloc/allocWithZone: implementation | |
// Note this is is stored in the metaclass. | |
#define RW_HAS_DEFAULT_AWZ (1<<16) | |
// class's instances requires raw isa | |
#if SUPPORT_NONPOINTER_ISA | |
#define RW_REQUIRES_RAW_ISA (1<<15) | |
#endif | |
// class or superclass has default retain/release/autorelease/retainCount/ | |
// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference | |
#define RW_HAS_DEFAULT_RR (1<<14) | |
// class or superclass has default new/self/class/respondsToSelector/isKindOfClass | |
#define RW_HAS_DEFAULT_CORE (1<<13) | |
// class is a Swift class from the pre-stable Swift ABI | |
#define FAST_IS_SWIFT_LEGACY (1UL<<0) | |
// class is a Swift class from the stable Swift ABI | |
#define FAST_IS_SWIFT_STABLE (1UL<<1) | |
// data pointer | |
#define FAST_DATA_MASK 0xfffffffcUL | |
#endif // __LP64__ | |
#if !(TARGET_OS_OSX && __i386__) | |
typedef Class _Nullable | |
(*_objc_swiftMetadataInitializer)(Class _Nonnull cls, void * _Nullable arg); | |
#endif | |
#if __has_feature(ptrauth_calls) | |
#if !__arm64__ | |
#error ptrauth other than arm64e is unimplemented | |
#endif | |
// Method lists use process-independent signature for compatibility. | |
using MethodListIMP = IMP __ptrauth_objc_method_list_imp; | |
#else | |
using MethodListIMP = IMP; | |
#endif | |
// On some architectures, method lists and method caches store signed IMPs. | |
// fixme simply include ptrauth.h once all build trains have it | |
#if __has_include (<ptrauth.h>) | |
#include <ptrauth.h> | |
#else | |
#define ptrauth_strip(__value, __key) __value | |
#define ptrauth_blend_discriminator(__pointer, __integer) ((uintptr_t)0) | |
#define ptrauth_sign_constant(__value, __key, __data) __value | |
#define ptrauth_sign_unauthenticated(__value, __key, __data) __value | |
#define ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, __new_data) __value | |
#define ptrauth_auth_function(__value, __old_key, __old_data) __value | |
#define ptrauth_auth_data(__value, __old_key, __old_data) __value | |
#define ptrauth_string_discriminator(__string) ((int)0) | |
#define ptrauth_sign_generic_data(__value, __data) ((ptrauth_generic_signature_t)0) | |
#define __ptrauth_function_pointer | |
#define __ptrauth_return_address | |
#define __ptrauth_block_invocation_pointer | |
#define __ptrauth_block_copy_helper | |
#define __ptrauth_block_destroy_helper | |
#define __ptrauth_block_byref_copy_helper | |
#define __ptrauth_block_byref_destroy_helper | |
#define __ptrauth_objc_method_list_imp | |
#define __ptrauth_cxx_vtable_pointer | |
#define __ptrauth_cxx_vtt_vtable_pointer | |
#define __ptrauth_swift_heap_object_destructor | |
#define __ptrauth_cxx_virtual_function_pointer(__declkey) | |
#define __ptrauth_swift_function_pointer(__typekey) | |
#define __ptrauth_swift_class_method_pointer(__declkey) | |
#define __ptrauth_swift_protocol_witness_function_pointer(__declkey) | |
#define __ptrauth_swift_value_witness_function_pointer(__key) | |
#endif | |
// Workaround <rdar://problem/64531063> Definitions of ptrauth_sign_unauthenticated and friends generate unused variables warnings | |
#if __has_feature(ptrauth_calls) | |
#define UNUSED_WITHOUT_PTRAUTH | |
#else | |
#define UNUSED_WITHOUT_PTRAUTH __unused | |
#endif | |
#if __has_feature(ptrauth_calls) | |
#if !__arm64__ | |
#error ptrauth other than arm64e is unimplemented | |
#endif | |
// Method lists use process-independent signature for compatibility. | |
using MethodListIMP = IMP __ptrauth_objc_method_list_imp; | |
#else | |
using MethodListIMP = IMP; | |
#endif | |
#define fastpath(x) (__builtin_expect(bool(x), 1)) | |
#define slowpath(x) (__builtin_expect(bool(x), 0)) | |
// A struct that wraps a pointer using the provided template. | |
// The provided Auth parameter is used to sign and authenticate | |
// the pointer as it is read and written. | |
template<typename T, typename Auth> | |
struct WrappedPtr { | |
private: | |
T *ptr; | |
public: | |
WrappedPtr(T *p) { | |
*this = p; | |
} | |
WrappedPtr(const WrappedPtr<T, Auth> &p) { | |
*this = p; | |
} | |
WrappedPtr<T, Auth> &operator =(T *p) { | |
ptr = p; | |
return *this; | |
} | |
WrappedPtr<T, Auth> &operator =(const WrappedPtr<T, Auth> &p) { | |
*this = (T *)p; | |
return *this; | |
} | |
operator T*() const { return get(); } | |
T *operator->() const { return get(); } | |
T *get() const { return ptr; } | |
// When asserts are enabled, ensure that we can read a byte from | |
// the underlying pointer. This can be used to catch ptrauth | |
// errors early for easier debugging. | |
void validate() const { | |
#if !NDEBUG | |
char *p = (char *)get(); | |
char dummy; | |
memset_s(&dummy, 1, *p, 1); | |
ASSERT(dummy == *p); | |
#endif | |
} | |
}; | |
// A "ptrauth" struct that just passes pointers through unchanged. | |
struct PtrauthRaw { | |
template <typename T> | |
static T *sign(T *ptr, __unused const void *address) { | |
return ptr; | |
} | |
template <typename T> | |
static T *auth(T *ptr, __unused const void *address) { | |
return ptr; | |
} | |
}; | |
// A ptrauth struct that stores pointers raw, and strips ptrauth | |
// when reading. | |
struct PtrauthStrip { | |
template <typename T> | |
static T *sign(T *ptr, __unused const void *address) { | |
return ptr; | |
} | |
template <typename T> | |
static T *auth(T *ptr, __unused const void *address) { | |
return ptrauth_strip(ptr, ptrauth_key_process_dependent_data); | |
} | |
}; | |
// A ptrauth struct that signs and authenticates pointers using the | |
// DB key with the given discriminator and address diversification. | |
template <unsigned discriminator> | |
struct Ptrauth { | |
template <typename T> | |
static T *sign(T *ptr, UNUSED_WITHOUT_PTRAUTH const void *address) { | |
if (!ptr) | |
return nullptr; | |
return ptrauth_sign_unauthenticated(ptr, ptrauth_key_process_dependent_data, ptrauth_blend_discriminator(address, discriminator)); | |
} | |
template <typename T> | |
static T *auth(T *ptr, UNUSED_WITHOUT_PTRAUTH const void *address) { | |
if (!ptr) | |
return nullptr; | |
return ptrauth_auth_data(ptr, ptrauth_key_process_dependent_data, ptrauth_blend_discriminator(address, discriminator)); | |
} | |
}; | |
// A template that produces a WrappedPtr to the given type using a | |
// plain unauthenticated pointer. | |
template <typename T> using RawPtr = WrappedPtr<T, PtrauthRaw>; | |
#if __has_feature(ptrauth_calls) | |
// Get a ptrauth type that uses a string discriminator. | |
#if __BUILDING_OBJCDT__ | |
#define PTRAUTH_STR(name) PtrauthStrip | |
#else | |
#define PTRAUTH_STR(name) Ptrauth<ptrauth_string_discriminator(#name)> | |
#endif | |
// When ptrauth is available, declare a template that wraps a type | |
// in a WrappedPtr that uses an authenticated pointer using the | |
// process-dependent data key, address diversification, and a | |
// discriminator based on the name passed in. | |
// | |
// When ptrauth is not available, equivalent to RawPtr. | |
#define DECLARE_AUTHED_PTR_TEMPLATE(name) \ | |
template <typename T> using name ## _authed_ptr \ | |
= WrappedPtr<T, PTRAUTH_STR(name)>; | |
#else | |
#define PTRAUTH_STR(name) PtrauthRaw | |
#define DECLARE_AUTHED_PTR_TEMPLATE(name) \ | |
template <typename T> using name ## _authed_ptr = RawPtr<T>; | |
#endif | |
// MARK: - Templates | |
// Mix-in for classes that must not be copied. | |
class nocopy_t { | |
private: | |
nocopy_t(const nocopy_t&) = delete; | |
const nocopy_t& operator=(const nocopy_t&) = delete; | |
protected: | |
constexpr nocopy_t() = default; | |
~nocopy_t() = default; | |
}; | |
// Version of std::atomic that does not allow implicit conversions | |
// to/from the wrapped type, and requires an explicit memory order | |
// be passed to load() and store(). | |
template <typename T> | |
struct explicit_atomic : public std::atomic<T> { | |
explicit explicit_atomic(T initial) noexcept : std::atomic<T>(std::move(initial)) {} | |
operator T() const = delete; | |
T load(std::memory_order order) const noexcept { | |
return std::atomic<T>::load(order); | |
} | |
void store(T desired, std::memory_order order) noexcept { | |
std::atomic<T>::store(desired, order); | |
} | |
// Convert a normal pointer to an atomic pointer. This is a | |
// somewhat dodgy thing to do, but if the atomic type is lock | |
// free and the same size as the non-atomic type, we know the | |
// representations are the same, and the compiler generates good | |
// code. | |
static explicit_atomic<T> *from_pointer(T *ptr) { | |
static_assert(sizeof(explicit_atomic<T> *) == sizeof(T *), | |
"Size of atomic must match size of original"); | |
explicit_atomic<T> *atomic = (explicit_atomic<T> *)ptr; | |
ASSERT(atomic->is_lock_free()); | |
return atomic; | |
} | |
}; | |
// A pointer modifier that does nothing to the pointer. | |
struct PointerModifierNop { | |
template <typename ListType, typename T> | |
static T *modify(__unused const ListType &list, T *ptr) { return ptr; } | |
}; | |
/*********************************************************************** | |
* entsize_list_tt<Element, List, FlagMask, PointerModifier> | |
* Generic implementation of an array of non-fragile structs. | |
* | |
* Element is the struct type (e.g. method_t) | |
* List is the specialization of entsize_list_tt (e.g. method_list_t) | |
* FlagMask is used to stash extra bits in the entsize field | |
* (e.g. method list fixup markers) | |
* PointerModifier is applied to the element pointers retrieved from | |
* the array. | |
**********************************************************************/ | |
template <typename Element, typename List, uint32_t FlagMask, typename PointerModifier = PointerModifierNop> | |
struct entsize_list_tt { | |
uint32_t entsizeAndFlags; | |
uint32_t count; | |
uint32_t entsize() const { | |
return entsizeAndFlags & ~FlagMask; | |
} | |
uint32_t flags() const { | |
return entsizeAndFlags & FlagMask; | |
} | |
Element& getOrEnd(uint32_t i) const { | |
ASSERT(i <= count); | |
return *PointerModifier::modify(*this, (Element *)((uint8_t *)this + sizeof(*this) + i*entsize())); | |
} | |
Element& get(uint32_t i) const { | |
ASSERT(i < count); | |
return getOrEnd(i); | |
} | |
size_t byteSize() const { | |
return byteSize(entsize(), count); | |
} | |
static size_t byteSize(uint32_t entsize, uint32_t count) { | |
return sizeof(entsize_list_tt) + count*entsize; | |
} | |
struct iterator; | |
const iterator begin() const { | |
return iterator(*static_cast<const List*>(this), 0); | |
} | |
iterator begin() { | |
return iterator(*static_cast<const List*>(this), 0); | |
} | |
const iterator end() const { | |
return iterator(*static_cast<const List*>(this), count); | |
} | |
iterator end() { | |
return iterator(*static_cast<const List*>(this), count); | |
} | |
struct iterator { | |
uint32_t entsize; | |
uint32_t index; // keeping track of this saves a divide in operator- | |
Element* element; | |
typedef std::random_access_iterator_tag iterator_category; | |
typedef Element value_type; | |
typedef ptrdiff_t difference_type; | |
typedef Element* pointer; | |
typedef Element& reference; | |
iterator() { } | |
iterator(const List& list, uint32_t start = 0) | |
: entsize(list.entsize()) | |
, index(start) | |
, element(&list.getOrEnd(start)) | |
{ } | |
const iterator& operator += (ptrdiff_t delta) { | |
element = (Element*)((uint8_t *)element + delta*entsize); | |
index += (int32_t)delta; | |
return *this; | |
} | |
const iterator& operator -= (ptrdiff_t delta) { | |
element = (Element*)((uint8_t *)element - delta*entsize); | |
index -= (int32_t)delta; | |
return *this; | |
} | |
const iterator operator + (ptrdiff_t delta) const { | |
return iterator(*this) += delta; | |
} | |
const iterator operator - (ptrdiff_t delta) const { | |
return iterator(*this) -= delta; | |
} | |
iterator& operator ++ () { *this += 1; return *this; } | |
iterator& operator -- () { *this -= 1; return *this; } | |
iterator operator ++ (int) { | |
iterator result(*this); *this += 1; return result; | |
} | |
iterator operator -- (int) { | |
iterator result(*this); *this -= 1; return result; | |
} | |
ptrdiff_t operator - (const iterator& rhs) const { | |
return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index; | |
} | |
Element& operator * () const { return *element; } | |
Element* operator -> () const { return element; } | |
operator Element& () const { return *element; } | |
bool operator == (const iterator& rhs) const { | |
return this->element == rhs.element; | |
} | |
bool operator != (const iterator& rhs) const { | |
return this->element != rhs.element; | |
} | |
bool operator < (const iterator& rhs) const { | |
return this->element < rhs.element; | |
} | |
bool operator > (const iterator& rhs) const { | |
return this->element > rhs.element; | |
} | |
}; | |
}; | |
/*********************************************************************** | |
* RelativePointer<T> | |
* A pointer stored as an offset from the address of that offset. | |
* | |
* The target address is computed by taking the address of this struct | |
* and adding the offset stored within it. This is a 32-bit signed | |
* offset giving ±2GB of range. | |
**********************************************************************/ | |
template <typename T> | |
struct RelativePointer: nocopy_t { | |
int32_t offset; | |
T get() const { | |
if (offset == 0) | |
return nullptr; | |
uintptr_t base = (uintptr_t)&offset; | |
uintptr_t signExtendedOffset = (uintptr_t)(intptr_t)offset; | |
uintptr_t pointer = base + signExtendedOffset; | |
return (T)pointer; | |
} | |
}; | |
/*********************************************************************** | |
* list_array_tt<Element, List, Ptr> | |
* Generic implementation for metadata that can be augmented by categories. | |
* | |
* Element is the underlying metadata type (e.g. method_t) | |
* List is the metadata's list type (e.g. method_list_t) | |
* List is a template applied to Element to make Element*. Useful for | |
* applying qualifiers to the pointer type. | |
* | |
* A list_array_tt has one of three values: | |
* - empty | |
* - a pointer to a single list | |
* - an array of pointers to lists | |
* | |
* countLists/beginLists/endLists iterate the metadata lists | |
* count/begin/end iterate the underlying metadata elements | |
**********************************************************************/ | |
template <typename Element, typename List, template<typename> class Ptr> | |
class list_array_tt { | |
struct array_t { | |
uint32_t count; | |
Ptr<List> lists[0]; | |
static size_t byteSize(uint32_t count) { | |
return sizeof(array_t) + count*sizeof(lists[0]); | |
} | |
size_t byteSize() { | |
return byteSize(count); | |
} | |
}; | |
protected: | |
class iterator { | |
const Ptr<List> *lists; | |
const Ptr<List> *listsEnd; | |
typename List::iterator m, mEnd; | |
public: | |
iterator(const Ptr<List> *begin, const Ptr<List> *end) | |
: lists(begin), listsEnd(end) | |
{ | |
if (begin != end) { | |
m = (*begin)->begin(); | |
mEnd = (*begin)->end(); | |
} | |
} | |
const Element& operator * () const { | |
return *m; | |
} | |
Element& operator * () { | |
return *m; | |
} | |
bool operator != (const iterator& rhs) const { | |
if (lists != rhs.lists) return true; | |
if (lists == listsEnd) return false; // m is undefined | |
if (m != rhs.m) return true; | |
return false; | |
} | |
const iterator& operator ++ () { | |
ASSERT(m != mEnd); | |
m++; | |
if (m == mEnd) { | |
ASSERT(lists != listsEnd); | |
lists++; | |
if (lists != listsEnd) { | |
m = (*lists)->begin(); | |
mEnd = (*lists)->end(); | |
} | |
} | |
return *this; | |
} | |
}; | |
private: | |
union { | |
Ptr<List> list; | |
uintptr_t arrayAndFlag; | |
}; | |
bool hasArray() const { | |
return arrayAndFlag & 1; | |
} | |
array_t *array() const { | |
return (array_t *)(arrayAndFlag & ~1); | |
} | |
void setArray(array_t *array) { | |
arrayAndFlag = (uintptr_t)array | 1; | |
} | |
void validate() { | |
for (auto cursor = beginLists(), end = endLists(); cursor != end; cursor++) | |
cursor->validate(); | |
} | |
public: | |
list_array_tt() : list(nullptr) { } | |
list_array_tt(List *l) : list(l) { } | |
list_array_tt(const list_array_tt &other) { | |
*this = other; | |
} | |
list_array_tt &operator =(const list_array_tt &other) { | |
if (other.hasArray()) { | |
arrayAndFlag = other.arrayAndFlag; | |
} else { | |
list = other.list; | |
} | |
return *this; | |
} | |
uint32_t count() const { | |
uint32_t result = 0; | |
for (auto lists = beginLists(), end = endLists(); | |
lists != end; | |
++lists) | |
{ | |
result += (*lists)->count; | |
} | |
return result; | |
} | |
iterator begin() const { | |
return iterator(beginLists(), endLists()); | |
} | |
iterator end() const { | |
auto e = endLists(); | |
return iterator(e, e); | |
} | |
inline uint32_t countLists(const std::function<const array_t * (const array_t *)> & peek) const { | |
if (hasArray()) { | |
return peek(array())->count; | |
} else if (list) { | |
return 1; | |
} else { | |
return 0; | |
} | |
} | |
uint32_t countLists() { | |
return countLists([](array_t *x) { return x; }); | |
} | |
const Ptr<List>* beginLists() const { | |
if (hasArray()) { | |
return array()->lists; | |
} else { | |
return &list; | |
} | |
} | |
const Ptr<List>* endLists() const { | |
if (hasArray()) { | |
return array()->lists + array()->count; | |
} else if (list) { | |
return &list + 1; | |
} else { | |
return &list; | |
} | |
} | |
void attachLists(List* const * addedLists, uint32_t addedCount) { | |
if (addedCount == 0) return; | |
if (hasArray()) { | |
// many lists -> many lists | |
uint32_t oldCount = array()->count; | |
uint32_t newCount = oldCount + addedCount; | |
array_t *newArray = (array_t *)malloc(array_t::byteSize(newCount)); | |
newArray->count = newCount; | |
array()->count = newCount; | |
for (int i = oldCount - 1; i >= 0; i--) | |
newArray->lists[i + addedCount] = array()->lists[i]; | |
for (unsigned i = 0; i < addedCount; i++) | |
newArray->lists[i] = addedLists[i]; | |
free(array()); | |
setArray(newArray); | |
validate(); | |
} | |
else if (!list && addedCount == 1) { | |
// 0 lists -> 1 list | |
list = addedLists[0]; | |
validate(); | |
} | |
else { | |
// 1 list -> many lists | |
Ptr<List> oldList = list; | |
uint32_t oldCount = oldList ? 1 : 0; | |
uint32_t newCount = oldCount + addedCount; | |
setArray((array_t *)malloc(array_t::byteSize(newCount))); | |
array()->count = newCount; | |
if (oldList) array()->lists[addedCount] = oldList; | |
for (unsigned i = 0; i < addedCount; i++) | |
array()->lists[i] = addedLists[i]; | |
validate(); | |
} | |
} | |
void tryFree() { | |
if (hasArray()) { | |
for (uint32_t i = 0; i < array()->count; i++) { | |
try_free(array()->lists[i]); | |
} | |
try_free(array()); | |
} | |
else if (list) { | |
try_free(list); | |
} | |
} | |
template<typename Other> | |
void duplicateInto(Other &other) { | |
if (hasArray()) { | |
array_t *a = array(); | |
other.setArray((array_t *)memdup(a, a->byteSize())); | |
for (uint32_t i = 0; i < a->count; i++) { | |
other.array()->lists[i] = a->lists[i]->duplicate(); | |
} | |
} else if (list) { | |
other.list = list->duplicate(); | |
} else { | |
other.list = nil; | |
} | |
} | |
}; | |
template <typename T> struct PointerUnionTypeSelectorReturn { | |
using Return = T; | |
}; | |
/// Get a type based on whether two types are the same or not. | |
/// | |
/// For: | |
/// | |
/// \code | |
/// using Ret = typename PointerUnionTypeSelector<T1, T2, EQ, NE>::Return; | |
/// \endcode | |
/// | |
/// Ret will be EQ type if T1 is same as T2 or NE type otherwise. | |
template <typename T1, typename T2, typename RET_EQ, typename RET_NE> | |
struct PointerUnionTypeSelector { | |
using Return = typename PointerUnionTypeSelectorReturn<RET_NE>::Return; | |
}; | |
template <typename T, typename RET_EQ, typename RET_NE> | |
struct PointerUnionTypeSelector<T, T, RET_EQ, RET_NE> { | |
using Return = typename PointerUnionTypeSelectorReturn<RET_EQ>::Return; | |
}; | |
template <typename T1, typename T2, typename RET_EQ, typename RET_NE> | |
struct PointerUnionTypeSelectorReturn< | |
PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>> { | |
using Return = | |
typename PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>::Return; | |
}; | |
template <class T1, class T2> | |
class PointerUnion { | |
uintptr_t _value; | |
uintptr_t tag; | |
static_assert(alignof(T1) >= 2, "alignment requirement"); | |
static_assert(alignof(T2) >= 2, "alignment requirement"); | |
struct IsPT1 { | |
static const uintptr_t Num = 0; | |
}; | |
struct IsPT2 { | |
static const uintptr_t Num = 1; | |
}; | |
template <typename T> struct UNION_DOESNT_CONTAIN_TYPE {}; | |
uintptr_t getPointer() const { | |
return _value; | |
} | |
uintptr_t getTag() const { | |
return tag; | |
} | |
public: | |
explicit PointerUnion(const std::atomic<uintptr_t> &raw) | |
: _value(raw.load(std::memory_order_relaxed)) | |
{ } | |
PointerUnion(T1 *t, const void *address) { | |
_value = t; | |
tag = IsPT1::Num; | |
} | |
PointerUnion(T2 *t, const void *address) { | |
_value = t; | |
tag = IsPT2::Num; | |
} | |
void storeAt(std::atomic<uintptr_t> &raw, std::memory_order order) const { | |
raw.store(_value, order); | |
} | |
template <typename T> | |
bool is() const { | |
using Ty = typename PointerUnionTypeSelector<T1 *, T, IsPT1, | |
PointerUnionTypeSelector<T2 *, T, IsPT2, | |
UNION_DOESNT_CONTAIN_TYPE<T>>>::Return; | |
return getTag() == Ty::Num; | |
} | |
template <typename T> T get(const void *address) const { | |
address = (void*)getPointer(); | |
return (T)address; | |
} | |
template <typename T> T dyn_cast(const void *address) const { | |
if (is<T>()) | |
return get<T>(address); | |
return T(); | |
} | |
}; | |
template <class PT1, class PT2, class PT3, class PT4 = void> | |
class PointerUnion4 { | |
uintptr_t _value; | |
static_assert(alignof(PT1) >= 4, "alignment requirement"); | |
static_assert(alignof(PT2) >= 4, "alignment requirement"); | |
static_assert(alignof(PT3) >= 4, "alignment requirement"); | |
static_assert(alignof(PT4) >= 4, "alignment requirement"); | |
struct IsPT1 { | |
static const uintptr_t Num = 0; | |
}; | |
struct IsPT2 { | |
static const uintptr_t Num = 1; | |
}; | |
struct IsPT3 { | |
static const uintptr_t Num = 2; | |
}; | |
struct IsPT4 { | |
static const uintptr_t Num = 3; | |
}; | |
template <typename T> struct UNION_DOESNT_CONTAIN_TYPE {}; | |
uintptr_t getPointer() const { | |
return _value & ~3; | |
} | |
uintptr_t getTag() const { | |
return _value & 3; | |
} | |
public: | |
explicit PointerUnion4(const std::atomic<uintptr_t> &raw) | |
: _value(raw.load(std::memory_order_relaxed)) | |
{ } | |
PointerUnion4(PT1 t) : _value((uintptr_t)t) { } | |
PointerUnion4(PT2 t) : _value((uintptr_t)t | 1) { } | |
PointerUnion4(PT3 t) : _value((uintptr_t)t | 2) { } | |
PointerUnion4(PT4 t) : _value((uintptr_t)t | 3) { } | |
void storeAt(std::atomic<uintptr_t> &raw, std::memory_order order) const { | |
raw.store(_value, order); | |
} | |
template <typename T> | |
bool is() const { | |
using Ty = typename PointerUnionTypeSelector<PT1, T, IsPT1, | |
PointerUnionTypeSelector<PT2, T, IsPT2, | |
PointerUnionTypeSelector<PT3, T, IsPT3, | |
PointerUnionTypeSelector<PT4, T, IsPT4, | |
UNION_DOESNT_CONTAIN_TYPE<T>>>>>::Return; | |
return getTag() == Ty::Num; | |
} | |
template <typename T> T get() const { | |
ASSERT(is<T>() && "Invalid accessor called"); | |
return reinterpret_cast<T>(getPointer()); | |
} | |
template <typename T> T dyn_cast() const { | |
if (is<T>()) | |
return get<T>(); | |
return T(); | |
} | |
}; | |
// MARK: - Cache | |
/* dyld_shared_cache_builder and obj-C agree on these definitions */ | |
struct preopt_cache_entry_t { | |
uint32_t sel_offs; | |
uint32_t imp_offs; | |
}; | |
/* dyld_shared_cache_builder and obj-C agree on these definitions */ | |
struct preopt_cache_t { | |
int32_t fallback_class_offset; | |
union { | |
struct { | |
uint16_t shift : 5; | |
uint16_t mask : 11; | |
}; | |
uint16_t hash_params; | |
}; | |
uint16_t occupied : 14; | |
uint16_t has_inlines : 1; | |
uint16_t bit_one : 1; | |
preopt_cache_entry_t entries[]; | |
inline int capacity() const { | |
return mask + 1; | |
} | |
}; | |
// MARK: - Core | |
static inline bool | |
_objc_isTaggedPointer(const void * _Nullable ptr) | |
{ | |
return ((uintptr_t)ptr & _OBJC_TAG_MASK) == _OBJC_TAG_MASK; | |
} | |
// MARK: - Properties | |
struct property_t { | |
const char *name; | |
const char *attributes; | |
}; | |
struct property_list_t : entsize_list_tt<property_t, property_list_t, 0> { | |
}; | |
class property_array_t : | |
public list_array_tt<property_t, property_list_t, RawPtr> | |
{ | |
}; | |
// MARK: - Methods | |
struct method_t { | |
static const uint32_t smallMethodListFlag = 0x80000000; | |
// The representation of a "big" method. This is the traditional | |
// representation of three pointers storing the selector, types | |
// and implementation. | |
struct big { | |
SEL name; | |
const char *types; | |
MethodListIMP imp; | |
}; | |
big &big() const { | |
ASSERT(!isSmall()); | |
return *(struct big *)this; | |
} | |
// The representation of a "small" method. This stores three | |
// relative offsets to the name, types, and implementation. | |
struct small { | |
// The name field either refers to a selector (in the shared | |
// cache) or a selref (everywhere else). | |
RelativePointer<const void *> name; | |
RelativePointer<const char *> types; | |
RelativePointer<IMP> imp; | |
}; | |
bool isSmall() const { | |
return ((uintptr_t)this & 1) == 1; | |
} | |
small &small() const { | |
ASSERT(isSmall()); | |
return *(struct small *)((uintptr_t)this & ~(uintptr_t)1); | |
} | |
static const auto bigSize = sizeof(struct big); | |
static const auto smallSize = sizeof(struct small); | |
// The pointer modifier used with method lists. When the method | |
// list contains small methods, set the bottom bit of the pointer. | |
// We use that bottom bit elsewhere to distinguish between big | |
// and small methods. | |
struct pointer_modifier { | |
template <typename ListType> | |
static method_t *modify(const ListType &list, method_t *ptr) { | |
if (list.flags() & smallMethodListFlag) | |
return (method_t *)((uintptr_t)ptr | 1); | |
return ptr; | |
} | |
}; | |
}; | |
struct method_list_t : entsize_list_tt<method_t, method_list_t, 0xffff0003, method_t::pointer_modifier> { | |
}; | |
DECLARE_AUTHED_PTR_TEMPLATE(method_list_t) | |
class method_array_t : | |
public list_array_tt<method_t, method_list_t, method_list_t_authed_ptr> | |
{ | |
}; | |
// MARK: - Protocols | |
struct protocol_t : objc_object { | |
const char *mangledName; | |
struct protocol_list_t *protocols; | |
method_list_t *instanceMethods; | |
method_list_t *classMethods; | |
method_list_t *optionalInstanceMethods; | |
method_list_t *optionalClassMethods; | |
property_list_t *instanceProperties; | |
uint32_t size; // sizeof(protocol_t) | |
uint32_t flags; | |
// Fields below this point are not always present on disk. | |
const char **_extendedMethodTypes; | |
const char *_demangledName; | |
property_list_t *_classProperties; | |
}; | |
typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped | |
struct protocol_list_t { | |
// count is pointer-sized by accident. | |
uintptr_t count; | |
protocol_ref_t list[0]; // variable-size | |
}; | |
class protocol_array_t : | |
public list_array_tt<protocol_ref_t, protocol_list_t, RawPtr> | |
{ | |
}; | |
// MARK: - IVar | |
struct ivar_t { | |
#if __x86_64__ | |
// *offset was originally 64-bit on some x86_64 platforms. | |
// We read and write only 32 bits of it. | |
// Some metadata provides all 64 bits. This is harmless for unsigned | |
// little-endian values. | |
// Some code uses all 64 bits. class_addIvar() over-allocates the | |
// offset for their benefit. | |
#endif | |
int32_t *offset; | |
const char *name; | |
const char *type; | |
// alignment is sometimes -1; use alignment() instead | |
uint32_t alignment_raw; | |
uint32_t size; | |
}; | |
struct ivar_list_t : entsize_list_tt<ivar_t, ivar_list_t, 0> { | |
}; | |
// MARK: - Class | |
struct cache_t { | |
private: | |
explicit_atomic<uintptr_t> _bucketsAndMaybeMask; | |
union { | |
struct { | |
explicit_atomic<mask_t> _maybeMask; | |
#if __LP64__ | |
uint16_t _flags; | |
#endif | |
uint16_t _occupied; | |
}; | |
explicit_atomic<preopt_cache_t *> _originalPreoptCache; | |
}; | |
}; | |
typedef objc_class* Class; | |
struct class_ro_t { | |
uint32_t flags; | |
uint32_t instanceStart; | |
uint32_t instanceSize; | |
#ifdef __LP64__ | |
uint32_t reserved; | |
#endif | |
const uint8_t * ivarLayout; | |
void * name; | |
// With ptrauth, this is signed if it points to a small list, but | |
// may be unsigned if it points to a big list. | |
void *baseMethodList; | |
protocol_list_t * baseProtocols; | |
const ivar_list_t * ivars; | |
const uint8_t * weakIvarLayout; | |
property_list_t *baseProperties; | |
// This field exists only when RO_HAS_SWIFT_INITIALIZER is set. | |
_objc_swiftMetadataInitializer __ptrauth_objc_method_list_imp _swiftMetadataInitializer_NEVER_USE[0]; | |
}; | |
struct class_rw_ext_t { | |
const class_ro_t ro; | |
method_array_t methods; | |
property_array_t properties; | |
protocol_array_t protocols; | |
char *demangledName; | |
uint32_t version; | |
}; | |
struct class_rw_t { | |
// Be warned that Symbolication knows the layout of this structure. | |
uint32_t flags; | |
uint16_t witness; | |
#if SUPPORT_INDEXED_ISA | |
uint16_t index; | |
#endif | |
class_rw_ext_t* ext; | |
Class firstSubclass; | |
Class nextSiblingClass; | |
}; | |
struct class_data_bits_t { | |
friend objc_class; | |
// Values are the FAST_ flags above. | |
uintptr_t bits; | |
class_ro_t* data() const { | |
return (class_ro_t *)(bits & FAST_DATA_MASK); | |
} | |
}; | |
struct objc_class : objc_object { | |
Class superclass; | |
cache_t cache; // formerly cache pointer and vtable | |
class_ro_t* bits; // class_rw_t * plus custom rr/alloc flags | |
Class getSuperclass() const { | |
#if __has_feature(ptrauth_calls) | |
# if ISA_SIGNING_AUTH_MODE == ISA_SIGNING_AUTH | |
if (superclass == Nil) | |
return Nil; | |
#if SUPERCLASS_SIGNING_TREAT_UNSIGNED_AS_NIL | |
void *stripped = ptrauth_strip((void *)superclass, ISA_SIGNING_KEY); | |
if ((void *)superclass == stripped) { | |
void *resigned = ptrauth_sign_unauthenticated(stripped, ISA_SIGNING_KEY, ptrauth_blend_discriminator(&superclass, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS)); | |
if ((void *)superclass != resigned) | |
return Nil; | |
} | |
#endif | |
void *result = ptrauth_auth_data((void *)superclass, ISA_SIGNING_KEY, ptrauth_blend_discriminator(&superclass, ISA_SIGNING_DISCRIMINATOR_CLASS_SUPERCLASS)); | |
return (Class)result; | |
# else | |
return (Class)ptrauth_strip((void *)superclass, ISA_SIGNING_KEY); | |
# endif | |
#else | |
return superclass; | |
#endif | |
} | |
}; |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment