diff --git a/CMakeLists.txt b/CMakeLists.txt index b7f771c29..2bef26395 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,6 +40,20 @@ int main(int argc, char *argv[]) { if(DISPATCH_HAVE_EXTENDED_SLPI_20348) add_compile_definitions(DISPATCH_HAVE_EXTENDED_SLPI_20348) endif() + + check_c_source_compiles([=[ +#include +int main(int argc, char *argv[]) { + switch ((LOGICAL_PROCESSOR_RELATIONSHIP)0) { + case RelationProcessorModule: + return 0; + } + return 0; +} +]=] DISPATCH_HAVE_EXTENDED_SLPI_22000) + if(DISPATCH_HAVE_EXTENDED_SLPI_22000) + add_compile_definitions(DISPATCH_HAVE_EXTENDED_SLPI_22000) + endif() endif() set(CMAKE_C_STANDARD 11) diff --git a/PATCHES b/PATCHES index b4483135a..42b01784b 100644 --- a/PATCHES +++ b/PATCHES @@ -434,3 +434,113 @@ github commits starting with 29bdc2f from [3975b58] APPLIED rdar://44568645 [81dc900] APPLIED rdar://44568645 [6162a1d] APPLIED rdar://44568645 +[c55ff6f] APPLIED rdar://54572081 +[c4a7149] APPLIED rdar://54572081 +[edce1fe] APPLIED rdar://54572081 +[ac525a4] APPLIED rdar://54572081 +[0710b29] APPLIED rdar://54572081 +[e99de71] APPLIED rdar://54572081 +[6d83ad5] APPLIED rdar://54572081 +[3ed78b5] APPLIED rdar://54572081 +[f6376cb] APPLIED rdar://54572081 +[9acbab3] APPLIED rdar://54572081 +[ca08b5f] APPLIED rdar://54572081 +[775f9f2] APPLIED rdar://54572081 +[db37bbc] APPLIED rdar://54572081 +[9852dcb] APPLIED rdar://54572081 +[9ec95bf] APPLIED rdar://54572081 +[bd2367c] APPLIED rdar://54572081 +[a736ea7] APPLIED rdar://54572081 +[3e4ea66] APPLIED rdar://54572081 +[c85c0d8] APPLIED rdar://54572081 +[7187ea2] APPLIED rdar://54572081 +[30eeb14] APPLIED rdar://54572081 +[6a5c6d8] APPLIED rdar://54572081 +[64a12c6] APPLIED rdar://54572081 +[09ec354] APPLIED rdar://54572081 +[5bcd598] APPLIED rdar://54572081 +[7874a92] APPLIED rdar://54572081 +[619775e] APPLIED rdar://54572081 +[e3ae79b] APPLIED rdar://54572081 +[fb368f6] APPLIED rdar://54572081 +[afa6cc3] APPLIED rdar://54572081 +[e6df818] APPLIED rdar://54572081 +[7144ee3] APPLIED rdar://54572081 +[60ffcc2] APPLIED rdar://54572081 +[618b070] APPLIED rdar://54572081 +[dde5892] APPLIED rdar://54572081 +[81c9bf6] APPLIED rdar://54572081 +[4b85ca6] APPLIED rdar://54572081 +[ff3bf51] APPLIED rdar://54572081 +[bc00e13] APPLIED rdar://54572081 +[d44acc0] APPLIED rdar://54572081 +[4659503] APPLIED rdar://54572081 +[60fdf80] APPLIED rdar://54572081 +[7a74af4] APPLIED rdar://54572081 +[f20349f] APPLIED rdar://54572081 +[ef9364c] APPLIED rdar://54572081 +[9d485ca] APPLIED rdar://54572081 +[cbd70d1] APPLIED rdar://54572081 +[6e1825a] APPLIED rdar://54572081 +[319bd33] APPLIED rdar://54572081 +[6c5b3ba] APPLIED rdar://54572081 +[7e7677b] APPLIED rdar://54572081 +[9002f70] APPLIED rdar://54572081 +[cc04868] APPLIED rdar://54572081 +[dc0dd64] APPLIED rdar://54572081 +[a5f5a92] APPLIED rdar://54572081 +[e5ba042] APPLIED rdar://54572081 +[a3bff44] APPLIED rdar://54572081 +[2e3d5c0] APPLIED rdar://54572081 +[1482ec9] APPLIED rdar://54572081 +[6bf6cb1] APPLIED rdar://54572081 +[aa13cad] APPLIED rdar://54572081 +[b073d89] APPLIED rdar://54572081 +[7784917] APPLIED rdar://54572081 +[717b3f7] APPLIED rdar://54572081 +[37010f0] APPLIED rdar://54572081 +[251dba4] APPLIED rdar://54572081 +[a18aa1f] APPLIED rdar://54572081 +[e8d020e] APPLIED rdar://54572081 +[90a84a1] APPLIED rdar://54572081 +[7721660] APPLIED rdar://54572081 +[c5af10f] APPLIED rdar://54572081 +[f01432d] APPLIED rdar://54572081 +[d0394bf] APPLIED rdar://54572081 +[2b14a98] APPLIED rdar://54572081 +[d32596b] APPLIED rdar://54572081 +[52bc6b2] APPLIED rdar://54572081 +[4169c8d] APPLIED rdar://54572081 +[318f6e5] APPLIED rdar://54572081 +[6a36af8] APPLIED rdar://54572081 +[d11d565] APPLIED rdar://54572081 +[d9740c2] APPLIED rdar://54572081 +[fc917b4] APPLIED rdar://54572081 +[f911a44] APPLIED rdar://54572081 +[6d32c4d] APPLIED rdar://54572081 +[9005cb4] APPLIED rdar://54572081 +[68875cb] APPLIED rdar://54572081 +[fc73866] APPLIED rdar://54572081 +[3cf1bf3] APPLIED rdar://54572081 +[3da29dd] APPLIED rdar://81276248 +[90a45ce] APPLIED rdar://81276248 +[37c8c28] APPLIED rdar://81276248 +[c023edd] APPLIED rdar://81276248 +[ab8a151] APPLIED rdar://81276248 +[c66cb25] APPLIED rdar://81276248 +[289e552] APPLIED rdar://81276248 +[afd6b6d] APPLIED rdar://81276248 +[4c91d20] APPLIED rdar://81276248 +[2accb0b] APPLIED rdar://81276248 +[b0b314c] APPLIED rdar://81276248 +[c992dac] APPLIED rdar://81276248 +[80b1772] APPLIED rdar://81276248 +[1986f39] APPLIED rdar://81276248 +[598ce42] APPLIED rdar://81276248 +[feb4421] APPLIED rdar://81276248 +[f152471] APPLIED rdar://81276248 +[457b110] APPLIED rdar://81276248 +[f13ea5d] APPLIED rdar://81276248 +[1c303fa] APPLIED rdar://81276248 +[34f383d] APPLIED rdar://81276248 +[7870521] APPLIED rdar://81276248 diff --git a/config/config.h b/config/config.h index 79fc5b2cc..c1ef8aaeb 100644 --- a/config/config.h +++ b/config/config.h @@ -61,6 +61,10 @@ you don't. */ #define HAVE_DECL_VQ_VERYLOWDISK 1 +/* Define to 1 if you have the declaration of `VQ_SERVEREVENT', and to 0 if + you don't. */ +#define HAVE_DECL_VQ_SERVEREVENT 1 + /* Define to 1 if you have the declaration of `VQ_QUOTA', and to 0 if you don't. */ #define HAVE_DECL_VQ_QUOTA 1 @@ -73,10 +77,6 @@ you don't. */ #define HAVE_DECL_VQ_DESIRED_DISK 1 -/* Define to 1 if you have the declaration of `VQ_FREE_SPACE_CHANGE', and to 0 if - you don't. */ -#define HAVE_DECL_VQ_FREE_SPACE_CHANGE 1 - /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 @@ -125,6 +125,12 @@ /* Define if you have the Objective-C runtime */ #define HAVE_OBJC 1 +/* Define to 1 if you have the `posix_fadvise' function. */ +#define HAVE_POSIX_FADVISE 0 + +/* Define to 1 if you have the `posix_spawnp' function. */ +#define HAVE_POSIX_SPAWNP 1 + /* Define to 1 if you have the `pthread_key_init_np' function. */ #define HAVE_PTHREAD_KEY_INIT_NP 1 diff --git a/dispatch/base.h b/dispatch/base.h index 7123f83ad..0a2370bd8 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -264,13 +264,35 @@ #endif #endif -#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) || defined(_WIN32) +#if __has_attribute(enum_extensibility) +#define __DISPATCH_ENUM_ATTR __attribute__((__enum_extensibility__(open))) +#define __DISPATCH_ENUM_ATTR_CLOSED __attribute__((__enum_extensibility__(closed))) +#else +#define __DISPATCH_ENUM_ATTR +#define __DISPATCH_ENUM_ATTR_CLOSED +#endif // __has_attribute(enum_extensibility) + +#if __has_attribute(flag_enum) +#define __DISPATCH_OPTIONS_ATTR __attribute__((__flag_enum__)) +#else +#define __DISPATCH_OPTIONS_ATTR +#endif // __has_attribute(flag_enum) + + +#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) || \ + __has_extension(cxx_fixed_enum) || defined(_WIN32) #define DISPATCH_ENUM(name, type, ...) \ - typedef enum : type { __VA_ARGS__ } name##_t + typedef enum : type { __VA_ARGS__ } __DISPATCH_ENUM_ATTR name##_t +#define DISPATCH_OPTIONS(name, type, ...) \ + typedef enum : type { __VA_ARGS__ } __DISPATCH_OPTIONS_ATTR __DISPATCH_ENUM_ATTR name##_t #else #define DISPATCH_ENUM(name, type, ...) \ - enum { __VA_ARGS__ }; typedef type name##_t -#endif + enum { __VA_ARGS__ } __DISPATCH_ENUM_ATTR; typedef type name##_t +#define DISPATCH_OPTIONS(name, type, ...) \ + enum { __VA_ARGS__ } __DISPATCH_OPTIONS_ATTR __DISPATCH_ENUM_ATTR; typedef type name##_t +#endif // __has_feature(objc_fixed_enum) ... + + #if __has_feature(enumerator_attributes) #define DISPATCH_ENUM_API_AVAILABLE(...) API_AVAILABLE(__VA_ARGS__) @@ -283,12 +305,11 @@ #define DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT(...) #endif -#if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \ - SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#ifdef __swift__ #define DISPATCH_SWIFT3_OVERLAY 1 -#else +#else // __swift__ #define DISPATCH_SWIFT3_OVERLAY 0 -#endif // SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#endif // __swift__ #if __has_feature(attribute_availability_swift) #define DISPATCH_SWIFT_UNAVAILABLE(_msg) \ diff --git a/dispatch/block.h b/dispatch/block.h index e6bf4f864..6aa3c8f2d 100644 --- a/dispatch/block.h +++ b/dispatch/block.h @@ -100,7 +100,7 @@ __BEGIN_DECLS * for synchronous execution or when the dispatch block object is invoked * directly. */ -DISPATCH_ENUM(dispatch_block_flags, unsigned long, +DISPATCH_OPTIONS(dispatch_block_flags, unsigned long, DISPATCH_BLOCK_BARRIER DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x1, DISPATCH_BLOCK_DETACHED diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index 0c7bdd43a..9b517f36c 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -54,7 +54,7 @@ #endif #endif -#define DISPATCH_API_VERSION 20180109 +#define DISPATCH_API_VERSION 20181008 #ifndef __DISPATCH_BUILDING_DISPATCH__ #ifndef __DISPATCH_INDIRECT__ @@ -62,6 +62,7 @@ #endif #include +#include #include #include #include @@ -73,6 +74,7 @@ #include #include #include +#include #undef __DISPATCH_INDIRECT__ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ diff --git a/dispatch/object.h b/dispatch/object.h index 02815f3f2..8211fbd49 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -26,6 +26,10 @@ #include // for HeaderDoc #endif +#if __has_include() +#include +#endif + DISPATCH_ASSUME_NONNULL_BEGIN /*! @@ -95,6 +99,7 @@ typedef union { struct dispatch_queue_attr_s *_dqa; struct dispatch_group_s *_dg; struct dispatch_source_s *_ds; + struct dispatch_channel_s *_dch; struct dispatch_mach_s *_dm; struct dispatch_mach_msg_s *_dmsg; struct dispatch_semaphore_s *_dsema; @@ -178,6 +183,16 @@ typedef void (^dispatch_block_t)(void); __BEGIN_DECLS +/*! + * @typedef dispatch_qos_class_t + * Alias for qos_class_t type. + */ +#if __has_include() +typedef qos_class_t dispatch_qos_class_t; +#else +typedef unsigned int dispatch_qos_class_t; +#endif + /*! * @function dispatch_retain * @@ -374,6 +389,49 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_resume(dispatch_object_t object); +/*! + * @function dispatch_set_qos_class_floor + * + * @abstract + * Sets the QOS class floor on a dispatch queue, source or workloop. + * + * @discussion + * The QOS class of workitems submitted to this object asynchronously will be + * elevated to at least the specified QOS class floor. The QOS of the workitem + * will be used if higher than the floor even when the workitem has been created + * without "ENFORCE" semantics. + * + * Setting the QOS class floor is equivalent to the QOS effects of configuring + * a queue whose target queue has a QoS class set to the same value. + * + * @param object + * A dispatch queue, workloop, or source to configure. + * The object must be inactive. + * + * Passing another object type or an object that has been activated is undefined + * and will cause the process to be terminated. + * + * @param qos_class + * A QOS class value: + * - QOS_CLASS_USER_INTERACTIVE + * - QOS_CLASS_USER_INITIATED + * - QOS_CLASS_DEFAULT + * - QOS_CLASS_UTILITY + * - QOS_CLASS_BACKGROUND + * Passing any other value is undefined. + * + * @param relative_priority + * A relative priority within the QOS class. This value is a negative + * offset from the maximum supported scheduler priority for the given class. + * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY + * is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_set_qos_class_floor(dispatch_object_t object, + dispatch_qos_class_t qos_class, int relative_priority); + #ifdef __BLOCKS__ /*! * @function dispatch_wait diff --git a/dispatch/queue.h b/dispatch/queue.h index 969dc880a..c4820b6c4 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -26,10 +26,6 @@ #include // for HeaderDoc #endif -#if __has_include() -#include -#endif - DISPATCH_ASSUME_NONNULL_BEGIN /*! @@ -336,6 +332,102 @@ void dispatch_sync_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); +/*! + * @function dispatch_async_and_wait + * + * @abstract + * Submits a block for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a workitem to a dispatch queue like dispatch_async(), however + * dispatch_async_and_wait() will not return until the workitem has finished. + * + * Like functions of the dispatch_sync family, dispatch_async_and_wait() is + * subject to dead-lock (See dispatch_sync() for details). + * + * However, dispatch_async_and_wait() differs from functions of the + * dispatch_sync family in two fundamental ways: how it respects queue + * attributes and how it chooses the execution context invoking the workitem. + * + * Differences with dispatch_sync() + * + * Work items submitted to a queue with dispatch_async_and_wait() observe all + * queue attributes of that queue when invoked (inluding autorelease frequency + * or QOS class). + * + * When the runtime has brought up a thread to invoke the asynchronous workitems + * already submitted to the specified queue, that servicing thread will also be + * used to execute synchronous work submitted to the queue with + * dispatch_async_and_wait(). + * + * However, if the runtime has not brought up a thread to service the specified + * queue (because it has no workitems enqueued, or only synchronous workitems), + * then dispatch_async_and_wait() will invoke the workitem on the calling thread, + * similar to the behaviour of functions in the dispatch_sync family. + * + * As an exception, if the queue the work is submitted to doesn't target + * a global concurrent queue (for example because it targets the main queue), + * then the workitem will never be invoked by the thread calling + * dispatch_async_and_wait(). + * + * In other words, dispatch_async_and_wait() is similar to submitting + * a dispatch_block_create()d workitem to a queue and then waiting on it, as + * shown in the code example below. However, dispatch_async_and_wait() is + * significantly more efficient when a new thread is not required to execute + * the workitem (as it will use the stack of the submitting thread instead of + * requiring heap allocations). + * + * + * dispatch_block_t b = dispatch_block_create(0, block); + * dispatch_async(queue, b); + * dispatch_block_wait(b, DISPATCH_TIME_FOREVER); + * Block_release(b); + * + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block to be invoked on the target dispatch queue. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_async_and_wait(dispatch_queue_t queue, + DISPATCH_NOESCAPE dispatch_block_t block); +#endif + +/*! + * @function dispatch_async_and_wait_f + * + * @abstract + * Submits a function for synchronous execution on a dispatch queue. + * + * @discussion + * See dispatch_async_and_wait() for details. + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_async_and_wait_f(). + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_async_and_wait_f(dispatch_queue_t queue, + void *_Nullable context, dispatch_function_t work); + #if defined(__APPLE__) && \ (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && \ @@ -404,7 +496,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_apply(size_t iterations, dispatch_queue_t DISPATCH_APPLY_QUEUE_ARG_NULLABILITY queue, - DISPATCH_NOESCAPE void (^block)(size_t)); + DISPATCH_NOESCAPE void (^block)(size_t iteration)); #endif /*! @@ -439,7 +531,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_apply_f(size_t iterations, dispatch_queue_t DISPATCH_APPLY_QUEUE_ARG_NULLABILITY queue, - void *_Nullable context, void (*work)(void *_Nullable, size_t)); + void *_Nullable context, void (*work)(void *_Nullable context, size_t iteration)); /*! * @function dispatch_get_current_queue @@ -549,16 +641,6 @@ dispatch_get_main_queue(void) typedef long dispatch_queue_priority_t; -/*! - * @typedef dispatch_qos_class_t - * Alias for qos_class_t type. - */ -#if __has_include() -typedef qos_class_t dispatch_qos_class_t; -#else -typedef unsigned int dispatch_qos_class_t; -#endif - /*! * @function dispatch_get_global_queue * @@ -1214,7 +1296,8 @@ dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, * Submits a block to a dispatch queue like dispatch_async(), but marks that * block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT queues). * - * See dispatch_async() for details. + * See dispatch_async() for details and "Dispatch Barrier API" for a description + * of the barrier semantics. * * @param queue * The target dispatch queue to which the block is submitted. @@ -1245,7 +1328,8 @@ dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block); * that function as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT * queues). * - * See dispatch_async_f() for details. + * See dispatch_async_f() for details and "Dispatch Barrier API" for a + * description of the barrier semantics. * * @param queue * The target dispatch queue to which the function is submitted. @@ -1278,7 +1362,8 @@ dispatch_barrier_async_f(dispatch_queue_t queue, * Submits a block to a dispatch queue like dispatch_sync(), but marks that * block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT queues). * - * See dispatch_sync() for details. + * See dispatch_sync() for details and "Dispatch Barrier API" for a description + * of the barrier semantics. * * @param queue * The target dispatch queue to which the block is submitted. @@ -1327,6 +1412,67 @@ void dispatch_barrier_sync_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); +/*! + * @function dispatch_barrier_async_and_wait + * + * @abstract + * Submits a block for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a block to a dispatch queue like dispatch_async_and_wait(), but marks + * that block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT + * queues). + * + * See "Dispatch Barrier API" for a description of the barrier semantics. + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param work + * The application-defined block to invoke on the target queue. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_barrier_async_and_wait(dispatch_queue_t queue, + DISPATCH_NOESCAPE dispatch_block_t block); +#endif + +/*! + * @function dispatch_barrier_async_and_wait_f + * + * @abstract + * Submits a function for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a function to a dispatch queue like dispatch_async_and_wait_f(), but + * marks that function as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT + * queues). + * + * See "Dispatch Barrier API" for a description of the barrier semantics. + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_barrier_async_and_wait_f(). + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_barrier_async_and_wait_f(dispatch_queue_t queue, + void *_Nullable context, dispatch_function_t work); + /*! * @functiongroup Dispatch queue-specific contexts * This API allows different subsystems to associate context to a shared queue @@ -1511,9 +1657,9 @@ dispatch_assert_queue_barrier(dispatch_queue_t queue); * Verifies that the current block is not executing on a given dispatch queue. * * @discussion - * This function is the equivalent of dispatch_queue_assert() with the test for + * This function is the equivalent of dispatch_assert_queue() with the test for * equality inverted. That means that it will terminate the application when - * dispatch_queue_assert() would return, and vice-versa. See discussion there. + * dispatch_assert_queue() would return, and vice-versa. See discussion there. * * The variant dispatch_assert_queue_not_debug() is compiled out when the * preprocessor macro NDEBUG is defined. (See also assert(3)). diff --git a/dispatch/source.h b/dispatch/source.h index 597d23a4f..5ce826022 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -123,7 +123,8 @@ DISPATCH_SOURCE_TYPE_DECL(mach_send); * @const DISPATCH_SOURCE_TYPE_MACH_RECV * @discussion A dispatch source that monitors a Mach port for pending messages. * The handle is a Mach port with a receive right (mach_port_t). - * The mask is unused (pass zero for now). + * The mask is a mask of desired events from dispatch_source_mach_recv_flags_t, + * but no flags are currently defined (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv) API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() @@ -218,6 +219,12 @@ DISPATCH_SOURCE_TYPE_DECL(write); typedef unsigned long dispatch_source_mach_send_flags_t; +/*! + * @typedef dispatch_source_mach_recv_flags_t + * Type of dispatch_source_mach_recv flags + */ +typedef unsigned long dispatch_source_mach_recv_flags_t; + /*! * @typedef dispatch_source_memorypressure_flags_t * Type of dispatch_source_memorypressure flags @@ -582,7 +589,7 @@ dispatch_source_get_handle(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_DATA_OR: n/a * DISPATCH_SOURCE_TYPE_DATA_REPLACE: n/a * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t - * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a + * DISPATCH_SOURCE_TYPE_MACH_RECV: dispatch_source_mach_recv_flags_t * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE dispatch_source_memorypressure_flags_t * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_flags_t * DISPATCH_SOURCE_TYPE_READ: n/a @@ -619,7 +626,7 @@ dispatch_source_get_mask(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_DATA_OR: application defined data * DISPATCH_SOURCE_TYPE_DATA_REPLACE: application defined data * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t - * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a + * DISPATCH_SOURCE_TYPE_MACH_RECV: dispatch_source_mach_recv_flags_t * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE dispatch_source_memorypressure_flags_t * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_flags_t * DISPATCH_SOURCE_TYPE_READ: estimated bytes available to read diff --git a/dispatch/workloop.h b/dispatch/workloop.h new file mode 100644 index 000000000..98c4f8a41 --- /dev/null +++ b/dispatch/workloop.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2017-2019 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_WORKLOOP__ +#define __DISPATCH_WORKLOOP__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +DISPATCH_ASSUME_NONNULL_BEGIN + +__BEGIN_DECLS + +/*! + * @typedef dispatch_workloop_t + * + * @abstract + * Dispatch workloops invoke workitems submitted to them in priority order. + * + * @discussion + * A dispatch workloop is a flavor of dispatch_queue_t that is a priority + * ordered queue (using the QOS class of the submitted workitems as the + * ordering). + * + * Between each workitem invocation, the workloop will evaluate whether higher + * priority workitems have since been submitted, either directly to the + * workloop or to any queues that target the workloop, and execute these first. + * + * Serial queues targeting a workloop maintain FIFO execution of their + * workitems. However, the workloop may reorder workitems submitted to + * independent serial queues targeting it with respect to each other, + * based on their priorities, while preserving FIFO execution with respect to + * each serial queue. + * + * A dispatch workloop is a "subclass" of dispatch_queue_t which can be passed + * to all APIs accepting a dispatch queue, except for functions from the + * dispatch_sync() family. dispatch_async_and_wait() must be used for workloop + * objects. Functions from the dispatch_sync() family on queues targeting + * a workloop are still permitted but discouraged for performance reasons. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct dispatch_workloop_s *dispatch_workloop_t; +#else +DISPATCH_DECL_SUBCLASS(dispatch_workloop, dispatch_queue); +#endif + +/*! + * @function dispatch_workloop_create + * + * @abstract + * Creates a new dispatch workloop to which workitems may be submitted. + * + * @param label + * A string label to attach to the workloop. + * + * @result + * The newly created dispatch workloop. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_workloop_t +dispatch_workloop_create(const char *_Nullable label); + +/*! + * @function dispatch_workloop_create_inactive + * + * @abstract + * Creates a new inactive dispatch workloop that can be setup and then + * activated. + * + * @discussion + * Creating an inactive workloop allows for it to receive further configuration + * before it is activated, and workitems can be submitted to it. + * + * Submitting workitems to an inactive workloop is undefined and will cause the + * process to be terminated. + * + * @param label + * A string label to attach to the workloop. + * + * @result + * The newly created dispatch workloop. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_workloop_t +dispatch_workloop_create_inactive(const char *_Nullable label); + +/*! + * @function dispatch_workloop_set_autorelease_frequency + * + * @abstract + * Sets the autorelease frequency of the workloop. + * + * @discussion + * See dispatch_queue_attr_make_with_autorelease_frequency(). + * The default policy for a workloop is + * DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM. + * + * @param workloop + * The dispatch workloop to modify. + * + * This workloop must be inactive, passing an activated object is undefined + * and will cause the process to be terminated. + * + * @param frequency + * The requested autorelease frequency. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t workloop, + dispatch_autorelease_frequency_t frequency); + +/*! + * @function dispatch_workloop_set_os_workgroup + * + * @abstract + * Associates an os_workgroup_t with the specified dispatch workloop. + * + * The worker thread will be a member of the specified os_workgroup_t while executing + * work items submitted to the workloop. + * + * @param workloop + * The dispatch workloop to modify. + * + * This workloop must be inactive, passing an activated object is undefined + * and will cause the process to be terminated. + * + * @param workgroup + * The workgroup to associate with this workloop. + * + * The workgroup specified is retained and the previously associated workgroup + * (if any) is released. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_workloop_set_os_workgroup(dispatch_workloop_t workloop, + os_workgroup_t workgroup); + +__END_DECLS + +DISPATCH_ASSUME_NONNULL_END + +#endif diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index 5d58c56ca..c3cce9b81 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -122,14 +122,12 @@ 6E4BACC31D48A42100B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACC51D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACC71D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; - 6E4BACC81D48A42400B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACCA1D48A89500B562AE /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; 6E4BACF51D49A04600B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACF61D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACF71D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACF91D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACFB1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; - 6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E5662E11F8C2E3E00BC2474 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; 6E5662E21F8C2E4F00BC2474 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; 6E5662E31F8C2E5100BC2474 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; @@ -155,7 +153,6 @@ 6E9C6CAA20F9848D00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; 6E9C6CAB20F9848E00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; 6E9C6CAC20F9848E00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; - 6E9C6CAD20F9848F00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; 6EA283D71CAB93920041B2E0 /* libdispatch.codes in Copy Trace Definitions */ = {isa = PBXBuildFile; fileRef = 6EA283D01CAB93270041B2E0 /* libdispatch.codes */; }; 6EA793891D458A5800929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; 6EA7938E1D458A5C00929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; @@ -165,16 +162,16 @@ 6EA962991D48622800759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629B1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629D1D48622B00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; - 6EA9629E1D48622C00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629F1D48625000759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A01D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A11D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A31D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A51D48625400759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; - 6EA962A61D48625500759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; 6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6EC8DC271E3E84610044B652 /* channel_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EC8DC261E3E84610044B652 /* channel_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 6EC8DC281E3E847A0044B652 /* channel_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EC8DC261E3E84610044B652 /* channel_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; 6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; 6ED64B431BBD898600C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; @@ -204,7 +201,6 @@ 6EF2CAAE1C8899EA001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; - 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; 6EF2CAB51C889D67001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; @@ -226,28 +222,55 @@ 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + 9B2A588123A412B400A7BB27 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9B3713F623D24594001C5C88 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + 9B404D6C255A191A0014912B /* apply_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B404D6B255A191A0014912B /* apply_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9B404DAA255A1E6F0014912B /* apply_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B404D6B255A191A0014912B /* apply_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9B404DC0255A1E7D0014912B /* apply_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B404D6B255A191A0014912B /* apply_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9B81557B234AFC9800DB5CA3 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9B8ED5792350C79100507521 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; }; + 9B8ED5A6235183D100507521 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9B9DB6F9234ECE92003F962B /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; }; + 9BA656E4236BB55000D13FAE /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; }; + 9BA656E6236BB56700D13FAE /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; }; + 9BA7221523E293CB0058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; }; + 9BA7221623E293FD0058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; }; + 9BA7221723E294140058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; }; + 9BCAF76F23A8540A00E4F685 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; }; + 9BCAF77123A8550100E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF77223A8550B00E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF77323A8551300E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF77423A8551E00E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF77523A8552600E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF79423AAEDED00E4F685 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; + 9BCAF79623AAEDF700E4F685 /* workgroup_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */; }; + 9BE3E56F23CE62BB006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BE3E57423CE62C2006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BE3E57523CE62C9006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BE3E57623CE62D8006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BE3E57723CE62E9006FE059 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; }; + 9BE3E57823CE62E9006FE059 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; }; + 9BE3E57923CE62E9006FE059 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; }; + 9BE3E57A23CE62E9006FE059 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; }; + 9BE3E57B23CE6325006FE059 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; }; + 9BE3E58323CE637F006FE059 /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; }; + 9BE3E58423CE637F006FE059 /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; }; + 9BE52545238747D30041C2A0 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; }; + 9BE5254A238747ED0041C2A0 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; }; + 9BE5254B238747ED0041C2A0 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; }; + 9BE5254C238747ED0041C2A0 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; }; + 9BE5254D238747F90041C2A0 /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; }; + 9BE5254E238747F90041C2A0 /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; }; + 9BE525502387480F0041C2A0 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BFD342C23C94F2500B08420 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; }; + 9BFD342D23C94F3500B08420 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; + 9BFD342F23C94F6D00B08420 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; }; + 9BFD343023C94F8C00B08420 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BFD343C23CD032800B08420 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; + B609581E221DFA2A00F39D1F /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; + B609581F221DFA4B00F39D1F /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; B683588F1FA77F5A00AA0D58 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; B68358901FA77F5B00AA0D58 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; - C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; - C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; - C00B0DF41C5AEBBE000330B3 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; - C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; - C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; - C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; - C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; - C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; - C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; - C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; - C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; - C00B0DFD1C5AEBBE000330B3 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; - C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; - C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; - C00B0E001C5AEBBE000330B3 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; - C00B0E011C5AEBBE000330B3 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; - C00B0E021C5AEBBE000330B3 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; - C00B0E031C5AEBBE000330B3 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; - C00B0E041C5AEBBE000330B3 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; - C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; C01866A61C5973210040FC07 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; C01866A71C5973210040FC07 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; C01866A81C5973210040FC07 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; @@ -274,6 +297,30 @@ C93D6165143E190E00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; C93D6167143E190F00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; C9C5F80E143C1771006DC718 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + E4053A5A26EAF06C00362F72 /* workgroup_object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */; }; + E4053A5B26EAF07700362F72 /* workgroup_object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */; }; + E4053A5C26EAF07900362F72 /* workgroup_object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */; }; + E4053A5D26EAF12D00362F72 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; }; + E4053A5E26EAF16600362F72 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + E4053A5F26EAF16700362F72 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + E4053A6026EAF1A600362F72 /* workgroup_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */; }; + E4053A6226EAF1B000362F72 /* workgroup_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */; }; + E4053A6326EAF25500362F72 /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; + E4053A6426EAF25600362F72 /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; + E4053A6526EAF27A00362F72 /* target.h in Headers */ = {isa = PBXBuildFile; fileRef = F7DC045A2060BBBE00C90737 /* target.h */; }; + E4053A6626EAF27B00362F72 /* target.h in Headers */ = {isa = PBXBuildFile; fileRef = F7DC045A2060BBBE00C90737 /* target.h */; }; + E4053A6726EAF2A000362F72 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A3109923C7003403D5 /* time.h */; }; + E4053A6826EAF2A700362F72 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; + E4053A6926EAF2A800362F72 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; + E4053A6A26EAF4BD00362F72 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C901445E1C73A7FE002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Public, ); }; }; + E4053A6B26EAF54F00362F72 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; }; + E4053A6C26EAF55000362F72 /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; }; + E4053A6D26EAF55000362F72 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; }; + E4053A6E26EAF55000362F72 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; }; + E4053A6F26EAF55000362F72 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; }; + E4053A7026EAF55000362F72 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; + E4053A7126EAF55000362F72 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + E4053A7226EAF67D00362F72 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C90144641C73A845002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Private, ); }; }; E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; E417A38512A472C5004D659D /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; @@ -290,6 +337,99 @@ E43A72841AF85BCB00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; E43A72851AF85BCC00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; E43A72871AF85BCD00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + E43B88322241F19000215272 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED960E8361E600161930 /* dispatch.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88332241F19000215272 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88342241F19000215272 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = 961B994F0F3E85C30006BC96 /* object.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88352241F19000215272 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; + E43B88362241F19000215272 /* channel_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EC8DC261E3E84610044B652 /* channel_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88372241F19000215272 /* queue.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8B0E8361E600161930 /* queue.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88382241F19000215272 /* source.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8D0E8361E600161930 /* source.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88392241F19000215272 /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; + E43B883A2241F19000215272 /* voucher_activity_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */; }; + E43B883B2241F19000215272 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B883C2241F19000215272 /* group.h in Headers */ = {isa = PBXBuildFile; fileRef = FC5C9C1D0EADABE3006E462D /* group.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B883D2241F19000215272 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; + E43B883E2241F19000215272 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B883F2241F19000215272 /* io.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C310D30CC7004407EA /* io.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88402241F19000215272 /* voucher_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E7418066276009FFDB6 /* voucher_internal.h */; }; + E43B88412241F19000215272 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C901445E1C73A7FE002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88422241F19000215272 /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; + E43B88432241F19000215272 /* data.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C510D30D0C004407EA /* data.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88442241F19000215272 /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + E43B88452241F19000215272 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = 96032E4C0F5CC8D100241C5F /* time.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88462241F19000215272 /* private.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED930E8361E600161930 /* private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88472241F19000215272 /* block.h in Headers */ = {isa = PBXBuildFile; fileRef = E4D76A9218E325D200B1F98B /* block.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88482241F19000215272 /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88492241F19000215272 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B884A2241F19000215272 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C90144641C73A845002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B884B2241F19000215272 /* source_private.h in Headers */ = {isa = PBXBuildFile; fileRef = FCEF047F0F5661960067401F /* source_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B884C2241F19000215272 /* target.h in Headers */ = {isa = PBXBuildFile; fileRef = F7DC045A2060BBBE00C90737 /* target.h */; }; + E43B884D2241F19000215272 /* benchmark.h in Headers */ = {isa = PBXBuildFile; fileRef = 961B99350F3E83980006BC96 /* benchmark.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B884E2241F19000215272 /* internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8F0E8361E600161930 /* internal.h */; settings = {ATTRIBUTES = (); }; }; + E43B884F2241F19000215272 /* workloop_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E70181C1F4EB51B0077C1DC /* workloop_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88502241F19000215272 /* object_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 965ECC200F3EAB71004DDD89 /* object_internal.h */; }; + E43B88512241F19000215272 /* queue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D950F3EA2170041FF5D /* queue_internal.h */; }; + E43B88522241F19000215272 /* source_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC0B34780FA2851C0080FFA0 /* source_internal.h */; }; + E43B88532241F19000215272 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; }; + E43B88542241F19000215272 /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; + E43B88552241F19000215272 /* voucher_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E711805C473009FFDB6 /* voucher_private.h */; }; + E43B88562241F19000215272 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; }; + E43B88572241F19000215272 /* tsd.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A4109923C7003403D5 /* tsd.h */; }; + E43B88582241F19000215272 /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; + E43B88592241F19000215272 /* atomic.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D820F3EA1020041FF5D /* atomic.h */; }; + E43B885A2241F19000215272 /* shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D830F3EA1020041FF5D /* shims.h */; }; + E43B885B2241F19000215272 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A3109923C7003403D5 /* time.h */; }; + E43B885C2241F19000215272 /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; + E43B885D2241F19000215272 /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; + E43B885E2241F19000215272 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; + E43B885F2241F19000215272 /* layout_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BE17C6318EA305E002CA4E8 /* layout_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88602241F19000215272 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; + E43B88612241F19000215272 /* config.h in Headers */ = {isa = PBXBuildFile; fileRef = FC9C70E7105EC9620074F9CA /* config.h */; }; + E43B88622241F19000215272 /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; + E43B88632241F19000215272 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; + E43B88642241F19000215272 /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; + E43B88652241F19000215272 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; + E43B88662241F19000215272 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; }; + E43B88672241F19000215272 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; + E43B88682241F19000215272 /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; + E43B88692241F19000215272 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; + E43B886A2241F19000215272 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B886B2241F19000215272 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; + E43B886C2241F19000215272 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; + E43B886D2241F19000215272 /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B886E2241F19000215272 /* mach_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4ECBAA415253C25002C313C /* mach_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B886F2241F19000215272 /* allocator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */; }; + E43B88702241F19000215272 /* introspection_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44F9DA816543F79001DCD38 /* introspection_internal.h */; }; + E43B88722241F19000215272 /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E43B88732241F19000215272 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + E43B88742241F19000215272 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + E43B88752241F19000215272 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + E43B88762241F19000215272 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + E43B88772241F19000215272 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E43B88782241F19000215272 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + E43B88792241F19000215272 /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; + E43B887A2241F19000215272 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + E43B887B2241F19000215272 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + E43B887C2241F19000215272 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + E43B887D2241F19000215272 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + E43B887E2241F19000215272 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + E43B887F2241F19000215272 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + E43B88802241F19000215272 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + E43B88812241F19000215272 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; + E43B88822241F19000215272 /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + E43B88832241F19000215272 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + E43B88842241F19000215272 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + E43B88852241F19000215272 /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + E43B88862241F19000215272 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + E43B88872241F19000215272 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + E43B88882241F19000215272 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E43B88892241F19000215272 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + E43B888A2241F19000215272 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E43B888B2241F19000215272 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + E43B888C2241F19000215272 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + E43B888D2241F19000215272 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + E43B888E2241F19000215272 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + E43B888F2241F19000215272 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; @@ -324,13 +464,12 @@ E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; E44F9DBE1654405B001DCD38 /* tsd.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A4109923C7003403D5 /* tsd.h */; }; E44F9DBF165440EF001DCD38 /* config.h in Headers */ = {isa = PBXBuildFile; fileRef = FC9C70E7105EC9620074F9CA /* config.h */; }; - E44F9DC016544115001DCD38 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; - E44F9DC116544115001DCD38 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; }; E454569314746F1B00106147 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; E454569414746F1B00106147 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; E4630251176162D200E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; E4630252176162D300E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; E4630253176162D400E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; + E4834144225D27F600954FC6 /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; E48AF55A16E70FD9004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E48AF55B16E72D44004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; @@ -366,7 +505,6 @@ E49BB6EC1E70748100868613 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; E49BB6ED1E70748100868613 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; E49BB7091E70A39700868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; - E49BB70A1E70A3B000868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F24AB125D57FA0057C971 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED960E8361E600161930 /* dispatch.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -563,13 +701,6 @@ remoteGlobalIDString = FCFA5A9F10D1AE050074F59A; remoteInfo = ddt; }; - C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; - proxyType = 1; - remoteGlobalIDString = C00B0DF01C5AEBBE000330B3; - remoteInfo = "libdispatch dyld stub"; - }; C01866C11C597AEA0040FC07 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -598,6 +729,20 @@ remoteGlobalIDString = D2AAC045055464E500DB518D; remoteInfo = libdispatch; }; + E43B882A2241F19000215272 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E4EC121612514715000DDBD1; + remoteInfo = "libdispatch mp resolved"; + }; + E43B882C2241F19000215272 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E49BB6CE1E70748100868613; + remoteInfo = "libdispatch armv81 resolved"; + }; E47D6ECC125FEBA10070D91C /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -637,8 +782,8 @@ 5A27262510F26F1900751FBC /* io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = io.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore_internal.h; sourceTree = ""; }; 5AAB45BF10D30B79004407EA /* data.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = data.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; - 5AAB45C310D30CC7004407EA /* io.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io.h; sourceTree = ""; tabWidth = 8; }; - 5AAB45C510D30D0C004407EA /* data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data.h; sourceTree = ""; tabWidth = 8; }; + 5AAB45C310D30CC7004407EA /* io.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io.h; sourceTree = ""; }; + 5AAB45C510D30D0C004407EA /* data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data.h; sourceTree = ""; }; 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_kernel.a; sourceTree = BUILT_PRODUCTS_DIR; }; 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose_kernel.xcconfig; sourceTree = ""; }; 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_label.c; sourceTree = ""; }; @@ -655,7 +800,6 @@ 6E326ABB1C229895002A6505 /* dispatch_read2.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_read2.c; sourceTree = ""; }; 6E326ABD1C22A577002A6505 /* dispatch_io_net.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_io_net.c; sourceTree = ""; }; 6E326ABE1C22A577002A6505 /* dispatch_io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_io.c; sourceTree = ""; }; - 6E326AD81C233209002A6505 /* dispatch_sync_gc.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = dispatch_sync_gc.m; sourceTree = ""; }; 6E326AD91C233209002A6505 /* dispatch_sync_on_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_sync_on_main.c; sourceTree = ""; }; 6E326ADC1C234396002A6505 /* dispatch_readsync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_readsync.c; sourceTree = ""; }; 6E326ADE1C23451A002A6505 /* dispatch_concur.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_concur.c; sourceTree = ""; }; @@ -710,9 +854,10 @@ 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose.xcconfig; sourceTree = ""; }; 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_inline_internal.h; sourceTree = ""; }; 6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_deadname.c; sourceTree = ""; }; - 6EC670C61E37E201004F10D6 /* dispatch_network_event_thread.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_network_event_thread.c; sourceTree = ""; }; 6EC670C71E37E201004F10D6 /* perf_mach_async.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_mach_async.c; sourceTree = ""; }; 6EC670C81E37E201004F10D6 /* perf_pipepingpong.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_pipepingpong.c; sourceTree = ""; }; + 6EC8DBE61E3E832C0044B652 /* dispatch_channel.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_channel.c; sourceTree = ""; }; + 6EC8DC261E3E84610044B652 /* channel_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_private.h; sourceTree = ""; }; 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_kevent_cancel_races.c; sourceTree = ""; }; 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_private.h; sourceTree = ""; }; 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_state_machine.c; sourceTree = ""; }; @@ -754,7 +899,23 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 9B2A588023A412B400A7BB27 /* eventlink.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = eventlink.c; sourceTree = ""; }; + 9B3713F123D24594001C5C88 /* clock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = clock.h; sourceTree = ""; }; + 9B38A012234C6D0400E6B90F /* workgroup_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_base.h; sourceTree = ""; }; + 9B404D6B255A191A0014912B /* apply_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = apply_private.h; sourceTree = ""; }; 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = "queue-tip.xcodeproj"; path = "tools/queue-tip/queue-tip.xcodeproj"; sourceTree = ""; }; + 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = workgroup_object_private.h; sourceTree = ""; }; + 9B81556E234AF0D200DB5CA3 /* workgroup.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup.h; sourceTree = ""; }; + 9B815576234AFC9800DB5CA3 /* workgroup.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = workgroup.c; path = src/workgroup.c; sourceTree = SOURCE_ROOT; }; + 9B8ED5782350C79100507521 /* workgroup_object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_object.h; sourceTree = ""; }; + 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_interval.h; sourceTree = ""; }; + 9BA656DF236BB55000D13FAE /* workgroup_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_private.h; sourceTree = ""; }; + 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_interval_private.h; sourceTree = ""; }; + 9BA7221023E293CB0058472E /* workgroup_parallel.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_parallel.h; sourceTree = ""; }; + 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eventlink_private.h; sourceTree = ""; }; + 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eventlink_internal.h; sourceTree = ""; }; + 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_internal.h; sourceTree = ""; }; + B6095819221DFA2A00F39D1F /* workloop.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workloop.h; sourceTree = ""; }; B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_no_blocks.c; sourceTree = ""; }; B68330BC1EBCF6080003E71C /* dispatch_wl.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_wl.c; sourceTree = ""; }; B683588A1FA77F4900AA0D58 /* time_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = time_private.h; sourceTree = ""; }; @@ -765,14 +926,12 @@ B6AE9A561D7F53C100AC007F /* perf_async_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_async_bench.m; sourceTree = ""; }; B6AE9A581D7F53CB00AC007F /* perf_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_bench.m; sourceTree = ""; }; B6FA01801F0AD522004479BF /* dispatch_pthread_root_queue.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_pthread_root_queue.c; sourceTree = ""; }; - C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_dyld_stub.a; sourceTree = BUILT_PRODUCTS_DIR; }; - C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-dyld-stub.xcconfig"; sourceTree = ""; }; C01866BD1C5973210040FC07 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = "libdispatch-mp-static.xcconfig"; sourceTree = ""; }; C01866BF1C5976C90040FC07 /* run-on-install.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "run-on-install.sh"; sourceTree = ""; }; C901445E1C73A7FE002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = darwin/module.modulemap; sourceTree = ""; }; C90144641C73A845002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = darwin/module.modulemap; sourceTree = ""; }; - C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; tabWidth = 8; }; + C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; }; C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = ddt.xcodeproj; path = tools/ddt/ddt.xcodeproj; sourceTree = ""; }; C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = dispatch_objc.m; sourceTree = ""; }; C9C5F80D143C1771006DC718 /* transform.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = transform.c; sourceTree = ""; }; @@ -788,6 +947,7 @@ E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.aliases; sourceTree = ""; }; E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; explicitFileType = sourcecode.dtrace; fileEncoding = 4; path = provider.d; sourceTree = ""; }; E43A724F1AF85BBC00BAA921 /* block.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = block.cpp; sourceTree = ""; }; + E43B889A2241F19000215272 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = ""; }; E44757D917F4572600B82CA1 /* inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inline_internal.h; sourceTree = ""; }; E448727914C6215D00BB45C2 /* libdispatch.order */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.order; sourceTree = ""; }; @@ -799,10 +959,10 @@ E44EBE3B1251659900645D88 /* init.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = init.c; sourceTree = ""; }; E44F9DA816543F79001DCD38 /* introspection_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_internal.h; sourceTree = ""; }; E454569214746F1B00106147 /* object_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_private.h; sourceTree = ""; }; - E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; + E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; - E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = io_private.h; path = private/io_private.h; sourceTree = SOURCE_ROOT; tabWidth = 8; }; + E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io_private.h; sourceTree = ""; }; E48EC97B1835BADD00EAC4F1 /* yield.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = yield.h; sourceTree = ""; }; E49BB6F21E70748100868613 /* libdispatch_armv81.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_armv81.a; sourceTree = BUILT_PRODUCTS_DIR; }; E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -819,6 +979,20 @@ E4BA743813A8900B0095BDF1 /* dispatch_read.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_read.3; sourceTree = ""; }; E4BA743913A8911B0095BDF1 /* getprogname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = getprogname.h; sourceTree = ""; }; E4C1ED6E1263E714000D3C8B /* data_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_internal.h; sourceTree = ""; }; + E4C97EFF263868F800628947 /* dispatch_once.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_once.c; sourceTree = ""; }; + E4C97F04263868F800628947 /* dispatch_async_and_wait.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_async_and_wait.c; sourceTree = ""; }; + E4C97F05263868F800628947 /* os_workgroup_multilang.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_multilang.c; sourceTree = ""; }; + E4C97F06263868F800628947 /* os_eventlink.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_eventlink.c; sourceTree = ""; }; + E4C97F07263868F800628947 /* os_workgroup_basic.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_basic.c; sourceTree = ""; }; + E4C97F08263868F800628947 /* dispatch_qos_cf.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_qos_cf.c; sourceTree = ""; }; + E4C97F09263868F800628947 /* os_workgroup_empty2.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_empty2.c; sourceTree = ""; }; + E4C97F0A263868F800628947 /* os_workgroup_entitled.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_entitled.c; sourceTree = ""; }; + E4C97F0B263868F800628947 /* dispatch_plusplus.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = dispatch_plusplus.cpp; sourceTree = ""; }; + E4C97F0C263868F800628947 /* os_eventlink_empty.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_eventlink_empty.c; sourceTree = ""; }; + E4C97F0D263868F800628947 /* os_workgroup_empty.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_empty.c; sourceTree = ""; }; + E4C97F0E263868F800628947 /* dispatch_mach.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_mach.c; sourceTree = ""; }; + E4C97F0F263868F800628947 /* dispatch_workloop.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_workloop.c; sourceTree = ""; }; + E4C97F10263868F800628947 /* dispatch_cooperative.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_cooperative.c; sourceTree = ""; }; E4D76A9218E325D200B1F98B /* block.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = block.h; sourceTree = ""; }; E4EB4A2614C35ECE00AA0FA9 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-headers.sh"; sourceTree = ""; }; @@ -919,6 +1093,7 @@ E44DB71E11D2FF080074F2AD /* Build Support */, 6E9B6AE21BB39793009E324D /* OS Public Headers */, E4EB4A2914C35F1800AA0FA9 /* OS Private Headers */, + 9BCAF77023A8544100E4F685 /* OS Project Headers */, FC7BEDAA0E83625200161930 /* Dispatch Public Headers */, FC7BEDAF0E83626100161930 /* Dispatch Private Headers */, FC7BEDB60E8363DC00161930 /* Dispatch Project Headers */, @@ -946,6 +1121,7 @@ E43A724F1AF85BBC00BAA921 /* block.cpp */, 5AAB45BF10D30B79004407EA /* data.c */, E420866F16027AE500EEE210 /* data.m */, + 9B2A588023A412B400A7BB27 /* eventlink.c */, E44EBE3B1251659900645D88 /* init.c */, E4B515DC164B32E000E003AF /* introspection.c */, 5A27262510F26F1900751FBC /* io.c */, @@ -961,6 +1137,7 @@ C9C5F80D143C1771006DC718 /* transform.c */, 6E9955CE1C3B218E0071D40C /* venture.c */, E44A8E6A1805C3E0009FFDB6 /* voucher.c */, + 9B815576234AFC9800DB5CA3 /* workgroup.c */, 6E9C6CA220F9848000EA81C0 /* yield.c */, 6EA283D01CAB93270041B2E0 /* libdispatch.codes */, 6E29394C1FB9526E00FDAC90 /* libdispatch.plist */, @@ -982,9 +1159,9 @@ E4EC122D12514715000DDBD1 /* libdispatch_mp.a */, E49BB6F21E70748100868613 /* libdispatch_armv81.a */, C01866BD1C5973210040FC07 /* libdispatch.a */, - C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */, 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */, 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */, + E43B889A2241F19000215272 /* libdispatch.dylib */, ); name = Products; sourceTree = ""; @@ -1024,7 +1201,13 @@ 6E9B6AE21BB39793009E324D /* OS Public Headers */ = { isa = PBXGroup; children = ( + 9B38A012234C6D0400E6B90F /* workgroup_base.h */, + 9BA7221023E293CB0058472E /* workgroup_parallel.h */, + 9B81556E234AF0D200DB5CA3 /* workgroup.h */, + 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */, + 9B8ED5782350C79100507521 /* workgroup_object.h */, E4EB4A2614C35ECE00AA0FA9 /* object.h */, + 9B3713F123D24594001C5C88 /* clock.h */, ); name = "OS Public Headers"; path = os; @@ -1061,12 +1244,15 @@ 6E8E4EC31C1A57760004F5CC /* dispatch_after.c */, 92F3FE8F1BEC686300025962 /* dispatch_api.c */, 6E67D8D31C16C20B00FC98AC /* dispatch_apply.c */, + E4C97F04263868F800628947 /* dispatch_async_and_wait.c */, 6E9926711D01295F000CB89A /* dispatch_block.c */, 924D8EAA1C116B9F002AC2BC /* dispatch_c99.c */, 6E326AB11C224830002A6505 /* dispatch_cascade.c */, 6E67D8D91C16C94B00FC98AC /* dispatch_cf_main.c */, + 6EC8DBE61E3E832C0044B652 /* dispatch_channel.c */, 6E326ADE1C23451A002A6505 /* dispatch_concur.c */, 6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */, + E4C97F10263868F800628947 /* dispatch_cooperative.c */, 6E8E4EC71C1A61680004F5CC /* dispatch_data.m */, 6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */, 6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */, @@ -1075,15 +1261,18 @@ 6E326ABD1C22A577002A6505 /* dispatch_io_net.c */, 6E326ABE1C22A577002A6505 /* dispatch_io.c */, 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */, - 6EC670C61E37E201004F10D6 /* dispatch_network_event_thread.c */, + E4C97F0E263868F800628947 /* dispatch_mach.c */, B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */, C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */, + E4C97EFF263868F800628947 /* dispatch_once.c */, 6E67D9131C17676D00FC98AC /* dispatch_overcommit.c */, 6E67D9151C1768B300FC98AC /* dispatch_pingpong.c */, + E4C97F0B263868F800628947 /* dispatch_plusplus.cpp */, 6E326B441C239B61002A6505 /* dispatch_priority.c */, 6E326AB51C225477002A6505 /* dispatch_proc.c */, B6FA01801F0AD522004479BF /* dispatch_pthread_root_queue.c */, 6E326AB31C224870002A6505 /* dispatch_qos.c */, + E4C97F08263868F800628947 /* dispatch_qos_cf.c */, B6AE9A4A1D7F53B300AC007F /* dispatch_queue_create.c */, 6E67D9111C17669C00FC98AC /* dispatch_queue_finalizer.c */, 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */, @@ -1097,7 +1286,6 @@ 6E326AE01C234780002A6505 /* dispatch_starfish.c */, 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */, 6E326B121C239431002A6505 /* dispatch_suspend_timer.c */, - 6E326AD81C233209002A6505 /* dispatch_sync_gc.m */, 6E326AD91C233209002A6505 /* dispatch_sync_on_main.c */, 6E326B131C239431002A6505 /* dispatch_timer_bit.c */, 6E326B151C239431002A6505 /* dispatch_timer_set_time.c */, @@ -1109,8 +1297,16 @@ 6E8E4EC91C1A670B0004F5CC /* dispatch_vm.c */, 6E326AB71C225FCA002A6505 /* dispatch_vnode.c */, B68330BC1EBCF6080003E71C /* dispatch_wl.c */, + E4C97F0F263868F800628947 /* dispatch_workloop.c */, 6E67D9171C17BA7200FC98AC /* nsoperation.m */, + E4C97F0C263868F800628947 /* os_eventlink_empty.c */, + E4C97F06263868F800628947 /* os_eventlink.c */, 6E4FC9D11C84123600520351 /* os_venture_basic.c */, + E4C97F07263868F800628947 /* os_workgroup_basic.c */, + E4C97F0D263868F800628947 /* os_workgroup_empty.c */, + E4C97F09263868F800628947 /* os_workgroup_empty2.c */, + E4C97F0A263868F800628947 /* os_workgroup_entitled.c */, + E4C97F05263868F800628947 /* os_workgroup_multilang.c */, B6AE9A561D7F53C100AC007F /* perf_async_bench.m */, B6AE9A581D7F53CB00AC007F /* perf_bench.m */, 6EC670C71E37E201004F10D6 /* perf_mach_async.c */, @@ -1132,6 +1328,16 @@ name = Products; sourceTree = ""; }; + 9BCAF77023A8544100E4F685 /* OS Project Headers */ = { + isa = PBXGroup; + children = ( + 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */, + 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */, + ); + name = "OS Project Headers"; + path = src; + sourceTree = ""; + }; C6A0FF2B0290797F04C91782 /* Documentation */ = { isa = PBXGroup; children = ( @@ -1172,7 +1378,6 @@ E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */, E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */, C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */, - C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */, E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */, 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */, 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */, @@ -1233,9 +1438,13 @@ isa = PBXGroup; children = ( E454569214746F1B00106147 /* object_private.h */, + 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */, 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */, 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */, 6E9955571C3AF7710071D40C /* venture_private.h */, + 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */, + 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */, + 9BA656DF236BB55000D13FAE /* workgroup_private.h */, E44A8E711805C473009FFDB6 /* voucher_private.h */, E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */, ); @@ -1330,6 +1539,7 @@ 721F5C5C0F15520500FF03A6 /* semaphore.h */, FC7BED8D0E8361E600161930 /* source.h */, 96032E4C0F5CC8D100241C5F /* time.h */, + B6095819221DFA2A00F39D1F /* workloop.h */, E421E5F81716ADA10090DC9B /* introspection.h */, ); name = "Dispatch Public Headers"; @@ -1339,18 +1549,20 @@ FC7BEDAF0E83626100161930 /* Dispatch Private Headers */ = { isa = PBXGroup; children = ( - FC7BED930E8361E600161930 /* private.h */, + 9B404D6B255A191A0014912B /* apply_private.h */, + 961B99350F3E83980006BC96 /* benchmark.h */, + 6EC8DC261E3E84610044B652 /* channel_private.h */, C913AC0E143BD34800B78976 /* data_private.h */, E48AF55916E70FD9004105FF /* io_private.h */, + 2BE17C6318EA305E002CA4E8 /* layout_private.h */, + E4ECBAA415253C25002C313C /* mach_private.h */, + C90144641C73A845002638FC /* module.modulemap */, + FC7BED930E8361E600161930 /* private.h */, 96BC39BC0F3EBAB100C59689 /* queue_private.h */, - 6E70181C1F4EB51B0077C1DC /* workloop_private.h */, FCEF047F0F5661960067401F /* source_private.h */, - E4ECBAA415253C25002C313C /* mach_private.h */, B683588A1FA77F4900AA0D58 /* time_private.h */, - C90144641C73A845002638FC /* module.modulemap */, - 961B99350F3E83980006BC96 /* benchmark.h */, + 6E70181C1F4EB51B0077C1DC /* workloop_private.h */, E4B515D7164B2DFB00E003AF /* introspection_private.h */, - 2BE17C6318EA305E002CA4E8 /* layout_private.h */, ); name = "Dispatch Private Headers"; path = private; @@ -1360,20 +1572,20 @@ isa = PBXGroup; children = ( 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */, - FC7BED8F0E8361E600161930 /* internal.h */, - E44757D917F4572600B82CA1 /* inline_internal.h */, E4C1ED6E1263E714000D3C8B /* data_internal.h */, + E44757D917F4572600B82CA1 /* inline_internal.h */, + FC7BED8F0E8361E600161930 /* internal.h */, 5A0095A110F274B0000E2A31 /* io_internal.h */, 6E4BACC91D48A89500B562AE /* mach_internal.h */, 965ECC200F3EAB71004DDD89 /* object_internal.h */, 96929D950F3EA2170041FF5D /* queue_internal.h */, 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */, + 96929D830F3EA1020041FF5D /* shims.h */, FC0B34780FA2851C0080FFA0 /* source_internal.h */, + E422A0D412A557B5005E5BDB /* trace.h */, 6E9956061C3B21AA0071D40C /* venture_internal.h */, E44A8E7418066276009FFDB6 /* voucher_internal.h */, - E422A0D412A557B5005E5BDB /* trace.h */, E44F9DA816543F79001DCD38 /* introspection_internal.h */, - 96929D830F3EA1020041FF5D /* shims.h */, 6E5ACCAE1D3BF27F007DA2B4 /* event */, 6EF0B2661BA8C43D007FA4F6 /* firehose */, FC1832A0109923B3003403D5 /* shims */, @@ -1410,41 +1622,57 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 9BA7221523E293CB0058472E /* workgroup_parallel.h in Headers */, + 9B8ED5792350C79100507521 /* workgroup_object.h in Headers */, FC7BEDA50E8361E600161930 /* dispatch.h in Headers */, 72CC94300ECCD8750031B751 /* base.h in Headers */, 961B99500F3E85C30006BC96 /* object.h in Headers */, - E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */, + 9B9DB6F9234ECE92003F962B /* workgroup_interval.h in Headers */, + 6EC8DC271E3E84610044B652 /* channel_private.h in Headers */, FC7BED9A0E8361E600161930 /* queue.h in Headers */, + 9BE3E57B23CE6325006FE059 /* workgroup.h in Headers */, FC7BED9C0E8361E600161930 /* source.h in Headers */, 6E9955581C3AF7710071D40C /* venture_private.h in Headers */, E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */, 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */, FC5C9C1E0EADABE3006E462D /* group.h in Headers */, - 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */, + 9BCAF76F23A8540A00E4F685 /* eventlink_private.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, 5AAB45C410D30CC7004407EA /* io.h in Headers */, - E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */, C90144651C73A8A3002638FC /* module.modulemap in Headers */, - E4630253176162D400E11F4C /* atomic_sfb.h in Headers */, 5AAB45C610D30D0C004407EA /* data.h in Headers */, - 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */, + E4053A5D26EAF12D00362F72 /* workgroup_base.h in Headers */, 96032E4D0F5CC8D100241C5F /* time.h in Headers */, FC7BEDA20E8361E600161930 /* private.h in Headers */, E4D76A9318E325D200B1F98B /* block.h in Headers */, + 9BA656E4236BB55000D13FAE /* workgroup_private.h in Headers */, C913AC0F143BD34800B78976 /* data_private.h in Headers */, 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, + 9B404D6C255A191A0014912B /* apply_private.h in Headers */, C90144661C73A9F6002638FC /* module.modulemap in Headers */, FCEF04800F5661960067401F /* source_private.h in Headers */, - F7DC045B2060BBBE00C90737 /* target.h in Headers */, + 9BA656E6236BB56700D13FAE /* workgroup_interval_private.h in Headers */, 961B99360F3E83980006BC96 /* benchmark.h in Headers */, - FC7BED9E0E8361E600161930 /* internal.h in Headers */, + E4053A5A26EAF06C00362F72 /* workgroup_object_private.h in Headers */, + B609581E221DFA2A00F39D1F /* workloop.h in Headers */, 6E7018211F4EB51B0077C1DC /* workloop_private.h in Headers */, + 9B3713F623D24594001C5C88 /* clock.h in Headers */, + E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */, + E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */, + E454569314746F1B00106147 /* object_private.h in Headers */, 965ECC210F3EAB71004DDD89 /* object_internal.h in Headers */, + FC7BED9E0E8361E600161930 /* internal.h in Headers */, + E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */, 96929D960F3EA2170041FF5D /* queue_internal.h in Headers */, + E4630253176162D400E11F4C /* atomic_sfb.h in Headers */, + E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */, FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */, + 9BCAF79623AAEDF700E4F685 /* workgroup_internal.h in Headers */, + 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */, + 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */, + F7DC045B2060BBBE00C90737 /* target.h in Headers */, 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */, E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */, - E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */, 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */, FC1832A8109923C7003403D5 /* tsd.h in Headers */, 6EA793891D458A5800929B1B /* event_config.h in Headers */, @@ -1464,10 +1692,9 @@ 6E5ACCBA1D3C4D0B007DA2B4 /* event_internal.h in Headers */, 6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */, E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, - E454569314746F1B00106147 /* object_private.h in Headers */, + 9BCAF79423AAEDED00E4F685 /* eventlink_internal.h in Headers */, B683588F1FA77F5A00AA0D58 /* time_private.h in Headers */, 6E5662E11F8C2E3E00BC2474 /* workqueue_internal.h in Headers */, - E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */, E48AF55A16E70FD9004105FF /* io_private.h in Headers */, E4ECBAA515253C25002C313C /* mach_private.h in Headers */, 2BBF5A60154B64D8002B20F9 /* allocator_internal.h in Headers */, @@ -1475,14 +1702,108 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + E43B88312241F19000215272 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + 9BA7221723E294140058472E /* workgroup_parallel.h in Headers */, + E43B88322241F19000215272 /* dispatch.h in Headers */, + E43B88332241F19000215272 /* base.h in Headers */, + E43B88342241F19000215272 /* object.h in Headers */, + E43B88352241F19000215272 /* inline_internal.h in Headers */, + E43B88362241F19000215272 /* channel_private.h in Headers */, + E43B88372241F19000215272 /* queue.h in Headers */, + E43B88382241F19000215272 /* source.h in Headers */, + E43B88392241F19000215272 /* venture_private.h in Headers */, + E43B883A2241F19000215272 /* voucher_activity_private.h in Headers */, + E43B883B2241F19000215272 /* semaphore.h in Headers */, + 9BE5254A238747ED0041C2A0 /* workgroup_interval.h in Headers */, + 9BE5254B238747ED0041C2A0 /* workgroup_object.h in Headers */, + 9BE5254C238747ED0041C2A0 /* workgroup_base.h in Headers */, + 9BE52545238747D30041C2A0 /* workgroup.h in Headers */, + E43B883C2241F19000215272 /* group.h in Headers */, + E43B883D2241F19000215272 /* priority.h in Headers */, + E43B883E2241F19000215272 /* once.h in Headers */, + E43B883F2241F19000215272 /* io.h in Headers */, + 9BFD342F23C94F6D00B08420 /* eventlink_private.h in Headers */, + E43B88402241F19000215272 /* voucher_internal.h in Headers */, + E43B88412241F19000215272 /* module.modulemap in Headers */, + E4053A6226EAF1B000362F72 /* workgroup_internal.h in Headers */, + E43B88422241F19000215272 /* atomic_sfb.h in Headers */, + E43B88432241F19000215272 /* data.h in Headers */, + E43B88442241F19000215272 /* firehose_internal.h in Headers */, + E43B88452241F19000215272 /* time.h in Headers */, + E43B88462241F19000215272 /* private.h in Headers */, + E43B88472241F19000215272 /* block.h in Headers */, + E43B88482241F19000215272 /* data_private.h in Headers */, + 9B404DC0255A1E7D0014912B /* apply_private.h in Headers */, + E43B88492241F19000215272 /* queue_private.h in Headers */, + E43B884A2241F19000215272 /* module.modulemap in Headers */, + 9BE5254D238747F90041C2A0 /* workgroup_interval_private.h in Headers */, + 9BE5254E238747F90041C2A0 /* workgroup_private.h in Headers */, + E43B884B2241F19000215272 /* source_private.h in Headers */, + 9BFD343C23CD032800B08420 /* eventlink_internal.h in Headers */, + E43B884C2241F19000215272 /* target.h in Headers */, + E43B884D2241F19000215272 /* benchmark.h in Headers */, + E43B884E2241F19000215272 /* internal.h in Headers */, + E4834144225D27F600954FC6 /* workloop.h in Headers */, + E43B884F2241F19000215272 /* workloop_private.h in Headers */, + E43B88502241F19000215272 /* object_internal.h in Headers */, + E43B88512241F19000215272 /* queue_internal.h in Headers */, + E43B88522241F19000215272 /* source_internal.h in Headers */, + E43B88532241F19000215272 /* semaphore_internal.h in Headers */, + E43B88542241F19000215272 /* data_internal.h in Headers */, + E4053A5C26EAF07900362F72 /* workgroup_object_private.h in Headers */, + E43B88552241F19000215272 /* voucher_private.h in Headers */, + E43B88562241F19000215272 /* io_internal.h in Headers */, + E43B88572241F19000215272 /* tsd.h in Headers */, + E43B88582241F19000215272 /* event_config.h in Headers */, + E43B88592241F19000215272 /* atomic.h in Headers */, + E43B885A2241F19000215272 /* shims.h in Headers */, + E43B885B2241F19000215272 /* time.h in Headers */, + E43B885C2241F19000215272 /* mach_internal.h in Headers */, + E43B885D2241F19000215272 /* firehose_buffer_internal.h in Headers */, + E43B885E2241F19000215272 /* yield.h in Headers */, + E43B885F2241F19000215272 /* layout_private.h in Headers */, + E43B88602241F19000215272 /* perfmon.h in Headers */, + E43B88612241F19000215272 /* config.h in Headers */, + E43B88622241F19000215272 /* venture_internal.h in Headers */, + E43B88632241F19000215272 /* lock.h in Headers */, + E43B88642241F19000215272 /* trace.h in Headers */, + E43B88652241F19000215272 /* getprogname.h in Headers */, + E43B88662241F19000215272 /* event_internal.h in Headers */, + E43B88672241F19000215272 /* firehose_inline_internal.h in Headers */, + E43B88682241F19000215272 /* hw_config.h in Headers */, + E43B88692241F19000215272 /* object_private.h in Headers */, + E43B886A2241F19000215272 /* time_private.h in Headers */, + E43B886B2241F19000215272 /* workqueue_internal.h in Headers */, + E43B886C2241F19000215272 /* object.h in Headers */, + E43B886D2241F19000215272 /* io_private.h in Headers */, + E43B886E2241F19000215272 /* mach_private.h in Headers */, + E4053A5F26EAF16700362F72 /* clock.h in Headers */, + E43B886F2241F19000215272 /* allocator_internal.h in Headers */, + E43B88702241F19000215272 /* introspection_internal.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; E49F24AA125D57FA0057C971 /* Headers */ = { isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 9BA7221623E293FD0058472E /* workgroup_parallel.h in Headers */, + 6EC8DC281E3E847A0044B652 /* channel_private.h in Headers */, E49F24AB125D57FA0057C971 /* dispatch.h in Headers */, E49F24AC125D57FA0057C971 /* base.h in Headers */, + E4053A6326EAF25500362F72 /* mach_internal.h in Headers */, 6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */, 6E7018221F4EB5220077C1DC /* workloop_private.h in Headers */, + 9BE3E57723CE62E9006FE059 /* workgroup_interval.h in Headers */, + 9BE3E57823CE62E9006FE059 /* workgroup_object.h in Headers */, + E4053A6526EAF27A00362F72 /* target.h in Headers */, + 9BE3E57923CE62E9006FE059 /* workgroup_base.h in Headers */, + E4053A5E26EAF16600362F72 /* clock.h in Headers */, + 9BE3E57A23CE62E9006FE059 /* workgroup.h in Headers */, + E4053A6A26EAF4BD00362F72 /* module.modulemap in Headers */, E49F24AD125D57FA0057C971 /* object.h in Headers */, E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */, E49F24AE125D57FA0057C971 /* queue.h in Headers */, @@ -1491,17 +1812,23 @@ E4B3C3FF18C50D0E0039F49F /* voucher_activity_private.h in Headers */, E49F24B0125D57FA0057C971 /* semaphore.h in Headers */, E49F24B1125D57FA0057C971 /* group.h in Headers */, + 9BFD342C23C94F2500B08420 /* eventlink_private.h in Headers */, E49F24B2125D57FA0057C971 /* once.h in Headers */, E49F24B3125D57FA0057C971 /* io.h in Headers */, 6E5662E21F8C2E4F00BC2474 /* workqueue_internal.h in Headers */, + E4053A7226EAF67D00362F72 /* module.modulemap in Headers */, E44A8E7618066276009FFDB6 /* voucher_internal.h in Headers */, E4630252176162D300E11F4C /* atomic_sfb.h in Headers */, E49F24B4125D57FA0057C971 /* data.h in Headers */, E49F24B5125D57FA0057C971 /* time.h in Headers */, + 9BE3E58323CE637F006FE059 /* workgroup_interval_private.h in Headers */, E49F24B6125D57FA0057C971 /* private.h in Headers */, + 9BE3E58423CE637F006FE059 /* workgroup_private.h in Headers */, + 9B404DAA255A1E6F0014912B /* apply_private.h in Headers */, E4D76A9418E325D200B1F98B /* block.h in Headers */, E49F24B7125D57FA0057C971 /* queue_private.h in Headers */, E49F24B8125D57FA0057C971 /* source_private.h in Headers */, + 9BFD342D23C94F3500B08420 /* eventlink_internal.h in Headers */, E49F24B9125D57FA0057C971 /* benchmark.h in Headers */, E49F24BA125D57FA0057C971 /* internal.h in Headers */, E49F24BC125D57FA0057C971 /* object_internal.h in Headers */, @@ -1513,6 +1840,7 @@ 6ED64B501BBD8A1400C35F4D /* firehose_internal.h in Headers */, E49F24BF125D57FA0057C971 /* io_internal.h in Headers */, E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */, + B609581F221DFA4B00F39D1F /* workloop.h in Headers */, E49F24C1125D57FA0057C971 /* tsd.h in Headers */, E49F24C2125D57FA0057C971 /* atomic.h in Headers */, E49F24C3125D57FA0057C971 /* shims.h in Headers */, @@ -1525,6 +1853,7 @@ E49F24C6125D57FA0057C971 /* config.h in Headers */, E422A0D612A557B5005E5BDB /* trace.h in Headers */, 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */, + E4053A6926EAF2A800362F72 /* priority.h in Headers */, 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */, E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */, E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */, @@ -1533,6 +1862,8 @@ E4EB4A2814C35ECE00AA0FA9 /* object.h in Headers */, E4ECBAA615253D17002C313C /* mach_private.h in Headers */, E48AF55B16E72D44004105FF /* io_private.h in Headers */, + E4053A5B26EAF07700362F72 /* workgroup_object_private.h in Headers */, + E4053A6026EAF1A600362F72 /* workgroup_internal.h in Headers */, 2BBF5A61154B64D8002B20F9 /* allocator_internal.h in Headers */, E43A710615783F7E0012D38D /* data_private.h in Headers */, E44F9DAD1654400E001DCD38 /* introspection_internal.h in Headers */, @@ -1545,6 +1876,13 @@ files = ( E4B515D8164B2DFB00E003AF /* introspection_private.h in Headers */, E44F9DAF16544026001DCD38 /* internal.h in Headers */, + E4053A6B26EAF54F00362F72 /* workgroup_base.h in Headers */, + E4053A6C26EAF55000362F72 /* workgroup_parallel.h in Headers */, + E4053A6D26EAF55000362F72 /* workgroup.h in Headers */, + E4053A6E26EAF55000362F72 /* workgroup_interval.h in Headers */, + E4053A6F26EAF55000362F72 /* workgroup_object.h in Headers */, + E4053A7026EAF55000362F72 /* object.h in Headers */, + E4053A7126EAF55000362F72 /* clock.h in Headers */, E421E5F91716ADA10090DC9B /* introspection.h in Headers */, 6E5662E31F8C2E5100BC2474 /* workqueue_internal.h in Headers */, E44F9DB216544032001DCD38 /* object_internal.h in Headers */, @@ -1552,9 +1890,11 @@ 6ED64B531BBD8A2300C35F4D /* firehose_buffer_internal.h in Headers */, E44F9DB51654403F001DCD38 /* source_internal.h in Headers */, E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */, + E4053A6726EAF2A000362F72 /* time.h in Headers */, E44F9DB01654402B001DCD38 /* data_internal.h in Headers */, 6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */, 6E9956081C3B21B30071D40C /* venture_internal.h in Headers */, + E4053A6826EAF2A700362F72 /* priority.h in Headers */, E44F9DB11654402E001DCD38 /* io_internal.h in Headers */, E4630251176162D200E11F4C /* atomic_sfb.h in Headers */, E44F9DBE1654405B001DCD38 /* tsd.h in Headers */, @@ -1568,12 +1908,12 @@ E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */, E44F9DBF165440EF001DCD38 /* config.h in Headers */, E44A8E7718066276009FFDB6 /* voucher_internal.h in Headers */, + E4053A6426EAF25600362F72 /* mach_internal.h in Headers */, E44F9DB616544043001DCD38 /* trace.h in Headers */, E44F9DB916544056001DCD38 /* getprogname.h in Headers */, E48EC97E1835BADD00EAC4F1 /* yield.h in Headers */, E44F9DBA1654405B001DCD38 /* hw_config.h in Headers */, - E44F9DC116544115001DCD38 /* object_private.h in Headers */, - E44F9DC016544115001DCD38 /* object.h in Headers */, + E4053A6626EAF27B00362F72 /* target.h in Headers */, E44F9DAE16544022001DCD38 /* allocator_internal.h in Headers */, E44F9DAB16543F94001DCD38 /* introspection_internal.h in Headers */, ); @@ -1584,7 +1924,7 @@ /* Begin PBXLegacyTarget section */ 92F3FECA1BEC69E500025962 /* darwintests */ = { isa = PBXLegacyTarget; - buildArgumentsString = "$(ACTION)"; + buildArgumentsString = "-j -k $(ACTION)"; buildConfigurationList = 92F3FECB1BEC69E500025962 /* Build configuration list for PBXLegacyTarget "darwintests" */; buildPhases = ( ); @@ -1632,28 +1972,11 @@ productReference = 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */; productType = "com.apple.product-type.library.static"; }; - C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */ = { - isa = PBXNativeTarget; - buildConfigurationList = C00B0E071C5AEBBE000330B3 /* Build configuration list for PBXNativeTarget "libdispatch dyld stub" */; - buildPhases = ( - C00B0DF11C5AEBBE000330B3 /* Sources */, - C00B0E061C5AEBBE000330B3 /* Symlink libdispatch.a -> libdispatch_dyld_target.a */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = "libdispatch dyld stub"; - productName = libdispatch; - productReference = C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */; - productType = "com.apple.product-type.library.static"; - }; C01866A41C5973210040FC07 /* libdispatch mp static */ = { isa = PBXNativeTarget; buildConfigurationList = C01866BA1C5973210040FC07 /* Build configuration list for PBXNativeTarget "libdispatch mp static" */; buildPhases = ( C01866A51C5973210040FC07 /* Sources */, - C01866C01C59777B0040FC07 /* Symlink to the loaderd path */, ); buildRules = ( ); @@ -1684,13 +2007,32 @@ E47D6ECD125FEBA10070D91C /* PBXTargetDependency */, E49BB6F81E7074C100868613 /* PBXTargetDependency */, C01866C21C597AEA0040FC07 /* PBXTargetDependency */, - C00B0E141C5AEED6000330B3 /* PBXTargetDependency */, ); name = libdispatch; productName = libdispatch; productReference = D2AAC046055464E500DB518D /* libdispatch.dylib */; productType = "com.apple.product-type.library.dynamic"; }; + E43B88262241F19000215272 /* libdispatch_driverkit */ = { + isa = PBXNativeTarget; + buildConfigurationList = E43B88972241F19000215272 /* Build configuration list for PBXNativeTarget "libdispatch_driverkit" */; + buildPhases = ( + E43B88312241F19000215272 /* Headers */, + E43B88712241F19000215272 /* Sources */, + E43B88922241F19000215272 /* Install Headers */, + 9BE52551238748C60041C2A0 /* ShellScript */, + ); + buildRules = ( + ); + dependencies = ( + E43B88292241F19000215272 /* PBXTargetDependency */, + E43B882B2241F19000215272 /* PBXTargetDependency */, + ); + name = libdispatch_driverkit; + productName = libdispatch; + productReference = E43B889A2241F19000215272 /* libdispatch.dylib */; + productType = "com.apple.product-type.library.dynamic"; + }; E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */ = { isa = PBXNativeTarget; buildConfigurationList = E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch armv81 resolved" */; @@ -1768,9 +2110,8 @@ 08FB7793FE84155DC02AAC07 /* Project object */ = { isa = PBXProject; attributes = { - BuildIndependentTargetsInParallel = YES; DefaultBuildSystemTypeForWorkspace = Latest; - LastUpgradeCheck = 1010; + LastUpgradeCheck = 1100; TargetAttributes = { 3F3C9326128E637B0042B1F7 = { ProvisioningStyle = Manual; @@ -1801,9 +2142,6 @@ CreatedOnToolsVersion = 9.3; ProvisioningStyle = Automatic; }; - C00B0DF01C5AEBBE000330B3 = { - ProvisioningStyle = Manual; - }; C01866A41C5973210040FC07 = { ProvisioningStyle = Manual; }; @@ -1857,8 +2195,8 @@ E4EC121612514715000DDBD1 /* libdispatch mp resolved */, E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */, E4B51595164B2DA300E003AF /* libdispatch introspection */, + E43B88262241F19000215272 /* libdispatch_driverkit */, C01866A41C5973210040FC07 /* libdispatch mp static */, - C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */, 6E43553E215B5D9D00C13177 /* libdispatch_introspection */, 6EA833C22162D6380045EFDC /* libdispatch_introspection_Sim */, 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */, @@ -1954,38 +2292,25 @@ shellScript = ". \"${SRCROOT}/xcodescripts/check-order.sh\"\n"; showEnvVarsInLog = 0; }; - C00B0E061C5AEBBE000330B3 /* Symlink libdispatch.a -> libdispatch_dyld_target.a */ = { + 9BE52551238748C60041C2A0 /* ShellScript */ = { isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; + buildActionMask = 8; files = ( ); - inputPaths = ( - "$(SRCROOT)/xcodescripts/run-on-install.sh", - ); - name = "Symlink libdispatch.a -> libdispatch_dyld_target.a"; - outputPaths = ( - "${DSTROOT}${INSTALL_PATH}/libdispatch.a", - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf ${PRODUCT_NAME}.a ${SCRIPT_OUTPUT_FILE_0}"; - showEnvVarsInLog = 0; - }; - C01866C01C59777B0040FC07 /* Symlink to the loaderd path */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( + inputFileListPaths = ( ); inputPaths = ( - "$(SRCROOT)/xcodescripts/run-on-install.sh", + "$(SRCROOT)/xcodescripts/postprocess-headers.sh", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_object.h", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_interval.h", + ); + outputFileListPaths = ( ); - name = "Symlink to the loaderd path"; outputPaths = ( - "${DSTROOT}/usr/local/lib/loaderd/${PRODUCT_NAME}.a", ); - runOnlyForDeploymentPostprocessing = 0; + runOnlyForDeploymentPostprocessing = 1; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf ../../../..${INSTALL_PATH}/${PRODUCT_NAME}.a ${DSTROOT}/usr/local/lib/loaderd/${PRODUCT_NAME}.a"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E4128EB213B9612700ABB2CB /* Postprocess Headers */ = { @@ -1995,13 +2320,16 @@ ); inputPaths = ( "$(SRCROOT)/xcodescripts/postprocess-headers.sh", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_object.h", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_interval.h", + "$(DSTROOT)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/eventlink_private.h", ); name = "Postprocess Headers"; outputPaths = ( ); runOnlyForDeploymentPostprocessing = 1; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E421E5FC1716B8E10090DC9B /* Install DTrace Header */ = { @@ -2022,6 +2350,37 @@ shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; + E43B88922241F19000215272 /* Install Headers */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/install-headers.sh", + "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/workgroup.h", + "$(SRCROOT)/os/workgroup_base.h", + "$(SRCROOT)/os/workgroup_interval.h", + "$(SRCROOT)/os/workgroup_object.h", + "$(SRCROOT)/os/workgroup_parallel.h", + "$(SRCROOT)/os/clock.h", + "$(SRCROOT)/os/object_private.h", + "$(SRCROOT)/os/venture_private.h", + "$(SRCROOT)/os/voucher_private.h", + "$(SRCROOT)/os/voucher_activity_private.h", + "$(SRCROOT)/os/workgroup_private.h", + "$(SRCROOT)/os/workgroup_object_private.h", + "$(SRCROOT)/os/workgroup_interval_private.h", + "$(SRCROOT)/os/eventlink_private.h", + ); + name = "Install Headers"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; + showEnvVarsInLog = 0; + }; E482F1C512DBAA110030614D /* Postprocess Headers */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 8; @@ -2029,6 +2388,10 @@ ); inputPaths = ( "$(SRCROOT)/xcodescripts/postprocess-headers.sh", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_object.h", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_interval.h", + "$(DSTROOT)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/eventlink_private.h", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_parallel.h", ); name = "Postprocess Headers"; outputPaths = ( @@ -2076,7 +2439,7 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\""; + shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E49F24D7125D57FA0057C971 /* Install Manpages */ = { @@ -2103,22 +2466,27 @@ inputPaths = ( "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/workgroup.h", + "$(SRCROOT)/os/workgroup_base.h", + "$(SRCROOT)/os/workgroup_interval.h", + "$(SRCROOT)/os/workgroup_object.h", + "$(SRCROOT)/os/workgroup_parallel.h", + "$(SRCROOT)/os/clock.h", "$(SRCROOT)/os/object_private.h", "$(SRCROOT)/os/venture_private.h", "$(SRCROOT)/os/voucher_private.h", "$(SRCROOT)/os/voucher_activity_private.h", + "$(SRCROOT)/os/workgroup_private.h", + "$(SRCROOT)/os/workgroup_interval_private.h", + "$(SRCROOT)/os/workgroup_object_private.h", + "$(SRCROOT)/os/eventlink_private.h", ); name = "Install Headers"; outputPaths = ( - "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E4EB4A3014C3A14000AA0FA9 /* Install Headers */ = { @@ -2129,22 +2497,28 @@ inputPaths = ( "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/workgroup.h", + "$(SRCROOT)/os/workgroup_base.h", + "$(SRCROOT)/os/workgroup_interval.h", + "$(SRCROOT)/os/workgroup_object.h", + "$(SRCROOT)/os/workgroup_parallel.h", + "$(SRCROOT)/os/clock.h", "$(SRCROOT)/os/object_private.h", "$(SRCROOT)/os/venture_private.h", "$(SRCROOT)/os/voucher_private.h", "$(SRCROOT)/os/voucher_activity_private.h", + "$(SRCROOT)/os/workgroup_interval_private.h", + "$(SRCROOT)/os/workgroup_private.h", + "$(SRCROOT)/os/workgroup_interval_private.h", + "$(SRCROOT)/os/eventlink_private.h", + "$(SRCROOT)/os/workgroup_object_private.h", ); name = "Install Headers"; outputPaths = ( - "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E4EC121712514715000DDBD1 /* Mig Headers */ = { @@ -2169,7 +2543,7 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_3}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_3}\"\n"; showEnvVarsInLog = 0; }; E4EC122512514715000DDBD1 /* Symlink normal variant */ = { @@ -2185,7 +2559,7 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\""; + shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E4FB8E8F218CD68A004B7A25 /* Install Plists */ = { @@ -2231,44 +2605,12 @@ ); runOnlyForDeploymentPostprocessing = 0; }; - C00B0DF11C5AEBBE000330B3 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 6E9C6CAD20F9848F00EA81C0 /* yield.c in Sources */, - C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */, - C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */, - C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */, - C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */, - C00B0DF41C5AEBBE000330B3 /* init.c in Sources */, - C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */, - C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */, - 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */, - C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */, - C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */, - C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */, - C00B0DFD1C5AEBBE000330B3 /* apply.c in Sources */, - C00B0E001C5AEBBE000330B3 /* source.c in Sources */, - 6E4BACC81D48A42400B562AE /* mach.c in Sources */, - 6EA9629E1D48622C00759D53 /* event.c in Sources */, - 6EA962A61D48625500759D53 /* event_kevent.c in Sources */, - 6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */, - C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */, - C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */, - C00B0E031C5AEBBE000330B3 /* io.c in Sources */, - C00B0E021C5AEBBE000330B3 /* data.c in Sources */, - C00B0E041C5AEBBE000330B3 /* transform.c in Sources */, - C00B0E011C5AEBBE000330B3 /* time.c in Sources */, - C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */, - C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */, - E49BB70A1E70A3B000868613 /* venture.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; C01866A51C5973210040FC07 /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BFD343023C94F8C00B08420 /* eventlink.c in Sources */, + 9BE3E57623CE62D8006FE059 /* workgroup.c in Sources */, 6E9C6CAC20F9848E00EA81C0 /* yield.c in Sources */, C01866A61C5973210040FC07 /* protocol.defs in Sources */, C01866AB1C5973210040FC07 /* firehose.defs in Sources */, @@ -2315,9 +2657,11 @@ 6EF2CAAC1C8899D5001ABE83 /* lock.c in Sources */, 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */, 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */, + 9B2A588123A412B400A7BB27 /* eventlink.c in Sources */, FC7BED990E8361E600161930 /* queue.c in Sources */, 9676A0E10F3E755D00713ADB /* apply.c in Sources */, 96A8AA870F41E7A400CD570B /* source.c in Sources */, + 9B81557B234AFC9800DB5CA3 /* workgroup.c in Sources */, 6E9C6CA720F9848100EA81C0 /* yield.c in Sources */, 6E4BACBD1D48A41500B562AE /* mach.c in Sources */, 6EA962971D48622600759D53 /* event.c in Sources */, @@ -2336,10 +2680,51 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + E43B88712241F19000215272 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 9BCAF77523A8552600E4F685 /* eventlink.c in Sources */, + 9BE525502387480F0041C2A0 /* workgroup.c in Sources */, + E43B88722241F19000215272 /* provider.d in Sources */, + E43B88732241F19000215272 /* protocol.defs in Sources */, + E43B88742241F19000215272 /* firehose.defs in Sources */, + E43B88752241F19000215272 /* firehose_reply.defs in Sources */, + E43B88762241F19000215272 /* resolver.c in Sources */, + E43B88772241F19000215272 /* init.c in Sources */, + E43B88782241F19000215272 /* object.c in Sources */, + E43B88792241F19000215272 /* object.m in Sources */, + E43B887A2241F19000215272 /* block.cpp in Sources */, + E43B887B2241F19000215272 /* lock.c in Sources */, + E43B887C2241F19000215272 /* semaphore.c in Sources */, + E43B887D2241F19000215272 /* once.c in Sources */, + E43B887E2241F19000215272 /* queue.c in Sources */, + E43B887F2241F19000215272 /* apply.c in Sources */, + E43B88802241F19000215272 /* source.c in Sources */, + E43B88812241F19000215272 /* yield.c in Sources */, + E43B88822241F19000215272 /* mach.c in Sources */, + E43B88832241F19000215272 /* event.c in Sources */, + E43B88842241F19000215272 /* event_kevent.c in Sources */, + E43B88852241F19000215272 /* event_epoll.c in Sources */, + E43B88862241F19000215272 /* voucher.c in Sources */, + E43B88872241F19000215272 /* firehose_buffer.c in Sources */, + E43B88882241F19000215272 /* io.c in Sources */, + E43B88892241F19000215272 /* data.c in Sources */, + E43B888A2241F19000215272 /* data.m in Sources */, + E43B888B2241F19000215272 /* transform.c in Sources */, + E43B888C2241F19000215272 /* time.c in Sources */, + E43B888D2241F19000215272 /* allocator.c in Sources */, + E43B888E2241F19000215272 /* benchmark.c in Sources */, + E43B888F2241F19000215272 /* venture.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; E49BB6D01E70748100868613 /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77323A8551300E4F685 /* eventlink.c in Sources */, + 9BE3E57523CE62C9006FE059 /* workgroup.c in Sources */, E49BB6D11E70748100868613 /* provider.d in Sources */, E49BB6D21E70748100868613 /* protocol.defs in Sources */, E49BB6D41E70748100868613 /* firehose.defs in Sources */, @@ -2377,6 +2762,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77223A8550B00E4F685 /* eventlink.c in Sources */, + 9BE3E56F23CE62BB006FE059 /* workgroup.c in Sources */, E43570BA126E93380097AB9F /* provider.d in Sources */, E49F24C8125D57FA0057C971 /* protocol.defs in Sources */, 6ED64B461BBD89AF00C35F4D /* firehose.defs in Sources */, @@ -2414,6 +2801,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77423A8551E00E4F685 /* eventlink.c in Sources */, + 9B8ED5A6235183D100507521 /* workgroup.c in Sources */, E4B515BD164B2DA300E003AF /* provider.d in Sources */, E4B515BE164B2DA300E003AF /* protocol.defs in Sources */, 6ED64B481BBD89B100C35F4D /* firehose.defs in Sources */, @@ -2452,6 +2841,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77123A8550100E4F685 /* eventlink.c in Sources */, + 9BE3E57423CE62C2006FE059 /* workgroup.c in Sources */, E417A38512A472C5004D659D /* provider.d in Sources */, E44EBE5612517EBE00645D88 /* protocol.defs in Sources */, 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */, @@ -2528,11 +2919,6 @@ name = ddt; targetProxy = 9BEBA57720127D4400E6FD0D /* PBXContainerItemProxy */; }; - C00B0E141C5AEED6000330B3 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */; - targetProxy = C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */; - }; C01866C21C597AEA0040FC07 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = C01866A41C5973210040FC07 /* libdispatch mp static */; @@ -2548,6 +2934,16 @@ target = D2AAC045055464E500DB518D /* libdispatch */; targetProxy = E4128E4913B94BCE00ABB2CB /* PBXContainerItemProxy */; }; + E43B88292241F19000215272 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E4EC121612514715000DDBD1 /* libdispatch mp resolved */; + targetProxy = E43B882A2241F19000215272 /* PBXContainerItemProxy */; + }; + E43B882B2241F19000215272 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */; + targetProxy = E43B882C2241F19000215272 /* PBXContainerItemProxy */; + }; E47D6ECD125FEBA10070D91C /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = E4EC121612514715000DDBD1 /* libdispatch mp resolved */; @@ -2713,43 +3109,53 @@ }; name = Debug; }; - C00B0E081C5AEBBE000330B3 /* Release */ = { + C01866BB1C5973210040FC07 /* Release */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; + baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; buildSettings = { }; name = Release; }; - C00B0E091C5AEBBE000330B3 /* Debug */ = { + C01866BC1C5973210040FC07 /* Debug */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; + baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; buildSettings = { }; name = Debug; }; - C01866BB1C5973210040FC07 /* Release */ = { + C927F35B10FD7F0600C5AB8B /* Release */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; buildSettings = { }; name = Release; }; - C01866BC1C5973210040FC07 /* Debug */ = { + C927F35C10FD7F0600C5AB8B /* Debug */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; buildSettings = { }; name = Debug; }; - C927F35B10FD7F0600C5AB8B /* Release */ = { + E43B88982241F19000215272 /* Release */ = { isa = XCBuildConfiguration; + baseConfigurationReference = E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */; buildSettings = { + ARCHS = "$(ARCHS_STANDARD)"; + DRIVERKIT = 1; + DRIVERKITROOT = /System/DriverKit; + SDKROOT = driverkit.internal; + SUPPORTED_PLATFORMS = macosx; }; name = Release; }; - C927F35C10FD7F0600C5AB8B /* Debug */ = { + E43B88992241F19000215272 /* Debug */ = { isa = XCBuildConfiguration; + baseConfigurationReference = E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */; buildSettings = { + ARCHS = "$(ARCHS_STANDARD)"; + DRIVERKIT = 1; + DRIVERKITROOT = /System/DriverKit; + SDKROOT = driverkit.internal; + SUPPORTED_PLATFORMS = macosx; }; name = Debug; }; @@ -2945,15 +3351,6 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; - C00B0E071C5AEBBE000330B3 /* Build configuration list for PBXNativeTarget "libdispatch dyld stub" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - C00B0E081C5AEBBE000330B3 /* Release */, - C00B0E091C5AEBBE000330B3 /* Debug */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; C01866BA1C5973210040FC07 /* Build configuration list for PBXNativeTarget "libdispatch mp static" */ = { isa = XCConfigurationList; buildConfigurations = ( @@ -2972,6 +3369,15 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + E43B88972241F19000215272 /* Build configuration list for PBXNativeTarget "libdispatch_driverkit" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + E43B88982241F19000215272 /* Release */, + E43B88992241F19000215272 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch armv81 resolved" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/man/dispatch.3 b/man/dispatch.3 index 6e5cfed48..b1c4309d4 100644 --- a/man/dispatch.3 +++ b/man/dispatch.3 @@ -13,14 +13,16 @@ concurrent execution via the core functions described in .Xr dispatch_async 3 and .Xr dispatch_apply 3 . .Pp -Dispatch queues are the basic units of organization of blocks. Several queues -are created by default, and applications may create additional queues for their -own use. See +Dispatch queues are the basic units of organization of blocks. +Several queues are created by default, and applications may create additional +queues for their own use. +See .Xr dispatch_queue_create 3 for more information. .Pp Dispatch groups allow applications to track the progress of blocks submitted to -queues and take action when the blocks complete. See +queues and take action when the blocks complete. +See .Xr dispatch_group_create 3 for more information. .Pp diff --git a/man/dispatch_after.3 b/man/dispatch_after.3 index db34af0e3..7463d1c5f 100644 --- a/man/dispatch_after.3 +++ b/man/dispatch_after.3 @@ -40,6 +40,15 @@ and the time at which the function is called, with the leeway capped to at least .Pp For a more detailed description about submitting blocks to queues, see .Xr dispatch_async 3 . +.Sh FUNDAMENTALS +The +.Fn dispatch_after +function is a wrapper around +.Fn dispatch_after_f . +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_async 3 , +.Xr dispatch_time 3 .Sh CAVEATS .Fn dispatch_after retains the passed queue. @@ -57,13 +66,3 @@ The result of passing as the .Fa when parameter is undefined. -.Pp -.Sh FUNDAMENTALS -The -.Fn dispatch_after -function is a wrapper around -.Fn dispatch_after_f . -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_async 3 , -.Xr dispatch_time 3 diff --git a/man/dispatch_api.3 b/man/dispatch_api.3 index 912338672..c82425051 100644 --- a/man/dispatch_api.3 +++ b/man/dispatch_api.3 @@ -11,15 +11,17 @@ consider when designing and implementing API in terms of dispatch queues and blocks. .Pp A general recommendation is to allow both a callback block and target dispatch -queue to be specified. This gives the application the greatest flexibility in -handling asynchronous events. +queue to be specified. +This gives the application the greatest flexibility in handling asynchronous +events. .Pp It's also recommended that interfaces take only a single block as the last -parameter. This is both for consistency across projects, as well as the visual -aesthetics of multiline blocks that are declared inline. The dispatch queue to -which the block will be submitted should immediately precede the block argument -(second-to-last argument). For example: -.Pp +parameter. +This is both for consistency across projects, as well as the visual aesthetics +of multiline blocks that are declared inline. +The dispatch queue to which the block will be submitted should immediately +precede the block argument (second-to-last argument). +For example: .Bd -literal -offset indent read_async(file, callback_queue, ^{ printf("received callback.\\n"); @@ -34,10 +36,8 @@ pointer, and a new last parameter is added, which is the function to call. The function based callback should pass the context pointer as the first argument, and the subsequent arguments should be identical to the block based variant (albeit offset by one in order). -.Pp -It is also important to use consistent naming. The dispatch API, for example, -uses the suffix "_f" for function based variants. -.Pp +It is also important to use consistent naming. +The dispatch API, for example, uses the suffix "_f" for function based variants. .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_async 3 , diff --git a/man/dispatch_apply.3 b/man/dispatch_apply.3 index 57c99a8a7..7f3651dfd 100644 --- a/man/dispatch_apply.3 +++ b/man/dispatch_apply.3 @@ -30,14 +30,14 @@ dispatch_apply(iterations, DISPATCH_APPLY_AUTO, ^(size_t idx) { }); .Ed .Pp -Although any queue can be used, it is strongly recommended to use +Although any queue can be used, it is strongly recommended to use .Vt DISPATCH_APPLY_AUTO -as the -.Vt queue +as the +.Vt queue argument to both .Fn dispatch_apply and -.Fn dispatch_apply_f , +.Fn dispatch_apply_f , as shown in the example above, since this allows the system to automatically use worker threads that match the configuration of the current thread as closely as possible. No assumptions should be made about which global concurrent queue will be used. @@ -75,7 +75,8 @@ for (i = count - (count % STRIDE); i < count; i++) { .Ed .Sh IMPLIED REFERENCES Synchronous functions within the dispatch framework hold an implied reference -on the target queue. In other words, the synchronous function borrows the +on the target queue. +In other words, the synchronous function borrows the reference of the calling function (this is valid because the calling function is blocked waiting for the result of the synchronous function, and therefore cannot modify the reference count of the target queue until after the @@ -95,7 +96,7 @@ or .Fn dispatch_async_f will incur more overhead and does not express the desired parallel execution semantics to the system, so may not create an optimal number of worker threads for a parallel workload. -For this reason, prefer to use +For this reason, prefer to use .Fn dispatch_apply or .Fn dispatch_apply_f @@ -105,6 +106,10 @@ The .Fn dispatch_apply function is a wrapper around .Fn dispatch_apply_f . +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_async 3 , +.Xr dispatch_queue_create 3 .Sh CAVEATS Unlike .Fn dispatch_async , @@ -112,11 +117,7 @@ a block submitted to .Fn dispatch_apply is expected to be either independent or dependent .Em only -on work already performed in lower-indexed invocations of the block. If -the block's index dependency is non-linear, it is recommended to -use a for-loop around invocations of +on work already performed in lower-indexed invocations of the block. +If the block's index dependency is non-linear, it is recommended to use a +for-loop around invocations of .Fn dispatch_async . -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_async 3 , -.Xr dispatch_queue_create 3 diff --git a/man/dispatch_async.3 b/man/dispatch_async.3 index 99c532d40..bac733139 100644 --- a/man/dispatch_async.3 +++ b/man/dispatch_async.3 @@ -31,20 +31,23 @@ and .Fn dispatch_sync functions schedule blocks for concurrent execution within the .Xr dispatch 3 -framework. Blocks are submitted to a queue which dictates the policy for their -execution. See +framework. +Blocks are submitted to a queue which dictates the policy for their execution. +See .Xr dispatch_queue_create 3 for more information about creating dispatch queues. .Pp These functions support efficient temporal synchronization, background -concurrency and data-level concurrency. These same functions can also be used -for efficient notification of the completion of asynchronous blocks (a.k.a. -callbacks). +concurrency and data-level concurrency. +These same functions can also be used for efficient notification of the +completion of asynchronous blocks (a.k.a. callbacks). .Sh TEMPORAL SYNCHRONIZATION Synchronization is often required when multiple threads of execution access -shared data concurrently. The simplest form of synchronization is -mutual-exclusion (a lock), whereby different subsystems execute concurrently -until a shared critical section is entered. In the +shared data concurrently. +The simplest form of synchronization is mutual-exclusion (a lock), whereby +different subsystems execute concurrently until a shared critical section is +entered. +In the .Xr pthread 3 family of procedures, temporal synchronization is accomplished like so: .Bd -literal -offset indent @@ -60,7 +63,8 @@ assert(r == 0); The .Fn dispatch_sync function may be used with a serial queue to accomplish the same style of -synchronization. For example: +synchronization. +For example: .Bd -literal -offset indent dispatch_sync(my_queue, ^{ // critical section @@ -74,19 +78,21 @@ left without restoring the queue to a reentrant state. The .Fn dispatch_async function may be used to implement deferred critical sections when the result -of the block is not needed locally. Deferred critical sections have the same -synchronization properties as the above code, but are non-blocking and -therefore more efficient to perform. For example: +of the block is not needed locally. +Deferred critical sections have the same synchronization properties as the above +code, but are non-blocking and therefore more efficient to perform. +For example: .Bd -literal dispatch_async(my_queue, ^{ // critical section }); .Ed .Sh BACKGROUND CONCURRENCY -.The +The .Fn dispatch_async function may be used to execute trivial background tasks on a global concurrent -queue. For example: +queue. +For example: .Bd -literal dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0), ^{ // background operation @@ -98,8 +104,9 @@ This approach is an efficient replacement for .Sh COMPLETION CALLBACKS Completion callbacks can be accomplished via nested calls to the .Fn dispatch_async -function. It is important to remember to retain the destination queue before the -first call to +function. +It is important to remember to retain the destination queue before the first +call to .Fn dispatch_async , and to release that queue at the end of the completion callback to ensure the destination queue is not deallocated while the completion callback is pending. @@ -130,21 +137,24 @@ async_read(object_t obj, .Sh RECURSIVE LOCKS While .Fn dispatch_sync -can replace a lock, it cannot replace a recursive lock. Unlike locks, queues -support both asynchronous and synchronous operations, and those operations are -ordered by definition. A recursive call to +can replace a lock, it cannot replace a recursive lock. +Unlike locks, queues support both asynchronous and synchronous operations, and +those operations are ordered by definition. +A recursive call to .Fn dispatch_sync causes a simple deadlock as the currently executing block waits for the next block to complete, but the next block will not start until the currently running block completes. .Pp -As the dispatch framework was designed, we studied recursive locks. We found -that the vast majority of recursive locks are deployed retroactively when -ill-defined lock hierarchies are discovered. As a consequence, the adoption of -recursive locks often mutates obvious bugs into obscure ones. This study also -revealed an insight: if reentrancy is unavoidable, then reader/writer locks are -preferable to recursive locks. Disciplined use of reader/writer locks enable -reentrancy only when reentrancy is safe (the "read" side of the lock). +As the dispatch framework was designed, we studied recursive locks. +We found that the vast majority of recursive locks are deployed retroactively +when ill-defined lock hierarchies are discovered. +As a consequence, the adoption of recursive locks often mutates obvious bugs +into obscure ones. +This study also revealed an insight: if reentrancy is unavoidable, then +reader/writer locks are preferable to recursive locks. +Disciplined use of reader/writer locks enable reentrancy only when reentrancy is +safe (the "read" side of the lock). .Pp Nevertheless, if it is absolutely necessary, what follows is an imperfect way of implementing recursive locks using the dispatch framework: @@ -168,17 +178,17 @@ calls .Fn dispatch_sync against queue B which runs on thread Y which recursively calls .Fn dispatch_sync -against queue A, which deadlocks both examples. This is bug-for-bug compatible -with nontrivial pthread usage. In fact, nontrivial reentrancy is impossible to -support in recursive locks once the ultimate level of reentrancy is deployed -(IPC or RPC). +against queue A, which deadlocks both examples. +This is bug-for-bug compatible with nontrivial pthread usage. +In fact, nontrivial reentrancy is impossible to support in recursive locks once +the ultimate level of reentrancy is deployed (IPC or RPC). .Sh IMPLIED REFERENCES Synchronous functions within the dispatch framework hold an implied reference -on the target queue. In other words, the synchronous function borrows the -reference of the calling function (this is valid because the calling function -is blocked waiting for the result of the synchronous function, and therefore -cannot modify the reference count of the target queue until after the -synchronous function has returned). +on the target queue. +In other words, the synchronous function borrows the reference of the calling +function (this is valid because the calling function is blocked waiting for the +result of the synchronous function, and therefore cannot modify the reference +count of the target queue until after the synchronous function has returned). For example: .Bd -literal queue = dispatch_queue_create("com.example.queue", NULL); @@ -199,9 +209,11 @@ Conceptually, is a convenient wrapper around .Fn dispatch_async with the addition of a semaphore to wait for completion of the block, and a -wrapper around the block to signal its completion. See +wrapper around the block to signal its completion. +See .Xr dispatch_semaphore_create 3 -for more information about dispatch semaphores. The actual implementation of the +for more information about dispatch semaphores. +The actual implementation of the .Fn dispatch_sync function may be optimized and differ from the above description. .Pp @@ -226,7 +238,6 @@ parameter is passed to the .Fa function when it is invoked on the target .Fa queue . -.Pp .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_apply 3 , diff --git a/man/dispatch_data_create.3 b/man/dispatch_data_create.3 index b3a216e4f..b1a396e47 100644 --- a/man/dispatch_data_create.3 +++ b/man/dispatch_data_create.3 @@ -55,10 +55,12 @@ .Vt dispatch_data_t dispatch_data_empty ; .Sh DESCRIPTION Dispatch data objects are opaque containers of bytes that represent one or more -regions of memory. They are created either from memory buffers managed by the -application or the system or from other dispatch data objects. Dispatch data -objects are immutable and the memory regions they represent are required to -remain unchanged for the lifetime of all data objects that reference them. +regions of memory. +They are created either from memory buffers managed by the application or the +system or from other dispatch data objects. +Dispatch data objects are immutable and the memory regions they represent are +required to remain unchanged for the lifetime of all data objects that reference +them. Dispatch data objects avoid copying the represented memory as much as possible. Multiple data objects can represent the same memory regions or subsections thereof. @@ -76,8 +78,8 @@ block will be submitted to the specified when the object reaches the end of its lifecycle, indicating that the system no longer references the .Fa buffer . -This allows the application to deallocate -the associated storage. The +This allows the application to deallocate the associated storage. +The .Fa queue argument is ignored if one of the following predefined destructors is passed: .Bl -tag -width DISPATCH_DATA_DESTRUCTOR_DEFAULT -compact -offset indent @@ -111,26 +113,29 @@ function creates a new data object by mapping the memory represented by the provided .Fa data object as a single contiguous memory region (moving or copying memory as -necessary). If the +necessary). +If the .Fa buffer_ptr and .Fa size_ptr references are not .Dv NULL , they are filled with the location and extent of the contiguous region, allowing -direct read access to the mapped memory. These values are valid only as long as -the newly created object has not been released. +direct read access to the mapped memory. +These values are valid only as long as the newly created object has not been +released. .Sh ACCESS The .Fn dispatch_data_apply function provides read access to represented memory without requiring it to be -mapped as a single contiguous region. It traverses the memory regions -represented by the +mapped as a single contiguous region. +It traverses the memory regions represented by the .Fa data argument in logical order, invokes the specified .Fa applier block for each region and returns a boolean indicating whether traversal -completed successfully. The +completed successfully. +The .Fa applier block is passed the following arguments for each memory region and returns a boolean indicating whether traversal should continue: @@ -170,7 +175,8 @@ specified by the argument among the regions represented by the provided .Fa data object and returns a newly created copy of the data object representing that -region. The variable specified by the +region. +The variable specified by the .Fa offset_ptr argument is filled with the logical position where the returned object starts in the @@ -198,17 +204,19 @@ Data objects passed as arguments to a dispatch data .Sy create or .Sy copy -function can be released when the function returns. The newly created object -holds implicit references to their constituent memory regions as necessary. +function can be released when the function returns. +The newly created object holds implicit references to their constituent memory +regions as necessary. .Pp The functions .Fn dispatch_data_create_map and .Fn dispatch_data_apply return an interior pointer to represented memory that is only valid as long as -an associated object has not been released. When Objective-C Automated -Reference Counting is enabled, care needs to be taken if that object is held in -a variable with automatic storage. It may need to be annotated with the +an associated object has not been released. +When Objective-C Automated Reference Counting is enabled, care needs to be taken +if that object is held in a variable with automatic storage. +It may need to be annotated with the .Li objc_precise_lifetime attribute, or stored in a .Li __strong @@ -216,5 +224,5 @@ instance variable instead, to ensure that the object is not released prematurely before memory accesses via the interor pointer have been completed. .Sh SEE ALSO .Xr dispatch 3 , -.Xr dispatch_object 3 , -.Xr dispatch_io_read 3 +.Xr dispatch_io_read 3 , +.Xr dispatch_object 3 diff --git a/man/dispatch_group_create.3 b/man/dispatch_group_create.3 index d82391e82..954df2117 100644 --- a/man/dispatch_group_create.3 +++ b/man/dispatch_group_create.3 @@ -68,7 +68,8 @@ has elapsed. If the .Fa group becomes empty within the specified amount of time, the function will return zero -indicating success. Otherwise, a non-zero return code will be returned. +indicating success. +Otherwise, a non-zero return code will be returned. When .Va DISPATCH_TIME_FOREVER is passed as the @@ -93,7 +94,8 @@ notification is pending, therefore it is valid to release the .Fa group after setting a notification block. The group will be empty at the time the notification block is submitted to the -target queue. The group may either be released with +target queue. +The group may either be released with .Fn dispatch_release or reused for additional operations. .Pp @@ -141,12 +143,19 @@ functions are wrappers around and .Fn dispatch_group_notify_f respectively. +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_async 3 , +.Xr dispatch_object 3 , +.Xr dispatch_queue_create 3 , +.Xr dispatch_semaphore_create 3 , +.Xr dispatch_time 3 .Sh CAVEATS In order to ensure deterministic behavior, it is recommended to call .Fn dispatch_group_wait -only once all blocks have been submitted to the group. If it is later -determined that new blocks should be run, it is recommended not to reuse an -already-running group, but to create a new group. +only once all blocks have been submitted to the group. +If it is later determined that new blocks should be run, it is recommended not +to reuse an already-running group, but to create a new group. .Pp .Fn dispatch_group_wait returns as soon as there are exactly zero @@ -155,26 +164,21 @@ blocks associated with a group (more precisely, as soon as every .Fn dispatch_group_enter call has been balanced by a .Fn dispatch_group_leave -call). If one thread waits for a group while another thread submits -new blocks to the group, then the count of associated blocks might -momentarily reach zero before all blocks have been submitted. If this happens, +call). +If one thread waits for a group while another thread submits new blocks to the +group, then the count of associated blocks might momentarily reach zero before +all blocks have been submitted. +If this happens, .Fn dispatch_group_wait will return too early: some blocks associated with the group have finished, but some have not yet been submitted or run. .Pp However, as a special case, a block associated with a group may submit new -blocks associated with its own group. In this case, the behavior is -deterministic: a waiting thread will +blocks associated with its own group. +In this case, the behavior is deterministic: a waiting thread will .Em not wake up until the newly submitted blocks have also finished. .Pp All of the foregoing also applies to -.Fn dispath_group_notify +.Fn dispatch_group_notify as well, with "block to be submitted" substituted for "waiting thread". -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_async 3 , -.Xr dispatch_object 3 , -.Xr dispatch_queue_create 3 , -.Xr dispatch_semaphore_create 3 , -.Xr dispatch_time 3 diff --git a/man/dispatch_io_create.3 b/man/dispatch_io_create.3 index 83e551401..7e2f99879 100644 --- a/man/dispatch_io_create.3 +++ b/man/dispatch_io_create.3 @@ -57,18 +57,22 @@ .Fc .Sh DESCRIPTION The dispatch I/O framework is an API for asynchronous read and write I/O -operations. It is an application of the ideas and idioms present in the +operations. +It is an application of the ideas and idioms present in the .Xr dispatch 3 -framework to device I/O. Dispatch I/O enables an application to more easily -avoid blocking I/O operations and allows it to more directly express its I/O -requirements than by using the raw POSIX file API. Dispatch I/O will make a -best effort to optimize how and when asynchronous I/O operations are performed -based on the capabilities of the targeted device. +framework to device I/O. +Dispatch I/O enables an application to more easily avoid blocking I/O operations +and allows it to more directly express its I/O requirements than by using the +raw POSIX file API. +Dispatch I/O will make a best effort to optimize how and when asynchronous I/O +operations are performed based on the capabilities of the targeted device. .Pp This page provides details on how to create and configure dispatch I/O -channels. Reading from and writing to these channels is covered in the +channels. +Reading from and writing to these channels is covered in the .Xr dispatch_io_read 3 -page. The dispatch I/O framework also provides the convenience functions +page. +The dispatch I/O framework also provides the convenience functions .Xr dispatch_read 3 and .Xr dispatch_write 3 @@ -82,16 +86,17 @@ Dispatch I/O channels can have one of the following types: .Bl -tag -width DISPATCH_IO_STREAM -compact -offset indent .It DISPATCH_IO_STREAM channels that represent a stream of bytes and do not support reads and writes -at arbitrary offsets, such as pipes or sockets. Channels of this type perform -read and write operations sequentially at the current file pointer position and -ignore any offset specified. Depending on the underlying file descriptor, read -operations may be performed simultaneously with write operations. +at arbitrary offsets, such as pipes or sockets. +Channels of this type perform read and write operations sequentially at the +current file pointer position and ignore any offset specified. +Depending on the underlying file descriptor, read operations may be performed +simultaneously with write operations. .It DISPATCH_IO_RANDOM -channels that represent random access files on disk. Only supported for -seekable file descriptors and paths. Channels of this type may perform -submitted read and write operations concurrently at the specified offset -(interpreted relative to the position of the file pointer when the channel was -created). +channels that represent random access files on disk. +Only supported for seekable file descriptors and paths. +Channels of this type may perform submitted read and write operations +concurrently at the specified offset (interpreted relative to the position of +the file pointer when the channel was created). .El .Sh CHANNEL OPENING AND CLOSING The @@ -102,13 +107,13 @@ functions create a dispatch I/O channel of provided .Fa type from a file descriptor .Fa fd -or an absolute pathname, respectively. They can be thought of as analogous to -the +or an absolute pathname, respectively. +They can be thought of as analogous to the .Xr fdopen 3 POSIX function and the .Xr fopen 3 -function in the standard C library. For a channel created from a pathname, the -provided +function in the standard C library. +For a channel created from a pathname, the provided .Fa path , .Fa oflag and @@ -122,20 +127,22 @@ The provided block will be submitted to the specified .Fa queue when all I/O operations on the channel have completed and it is closed or -reaches the end of its lifecycle. If an error occurs during channel creation, -the +reaches the end of its lifecycle. +If an error occurs during channel creation, the .Fa cleanup_handler block will be submitted immediately and passed an .Fa error -parameter with the POSIX error encountered. If an invalid +parameter with the POSIX error encountered. +If an invalid .Fa type or a non-absolute .Fa path argument is specified, these functions will return NULL and the .Fa cleanup_handler -will not be invoked. After successfully creating a dispatch I/O channel from a -file descriptor, the application must take care not to modify that file -descriptor until the associated +will not be invoked. +After successfully creating a dispatch I/O channel from a file descriptor, the +application must take care not to modify that file descriptor until the +associated .Fa cleanup_handler is invoked, see .Sx "FILEDESCRIPTOR OWNERSHIP" @@ -143,14 +150,15 @@ for details. .Pp The .Fn dispatch_io_close -function closes a dispatch I/O channel to new submissions of I/O operations. If +function closes a dispatch I/O channel to new submissions of I/O operations. +If .Dv DISPATCH_IO_STOP is passed in the .Fa flags parameter, the system will in addition not perform the I/O operations already submitted to the channel that are still pending and will make a best effort to -interrupt any ongoing operations. Handlers for operations so affected will be -passed the +interrupt any ongoing operations. +Handlers for operations so affected will be passed the .Er ECANCELED error code, along with any partial results. .Sh CHANNEL CONFIGURATION @@ -164,8 +172,7 @@ and .Fn dispatch_io_set_low_water functions configure the water mark settings of a .Fa channel . -The system will read -or write at least the number of bytes specified by +The system will read or write at least the number of bytes specified by .Fa low_water before submitting an I/O handler with partial results, and will make a best effort to submit an I/O handler as soon as the number of bytes read or written @@ -176,17 +183,18 @@ The .Fn dispatch_io_set_interval function configures the time .Fa interval -at which I/O handlers are submitted (measured in nanoseconds). If +at which I/O handlers are submitted (measured in nanoseconds). +If .Dv DISPATCH_IO_STRICT_INTERVAL is passed in the .Fa flags parameter, the interval will be strictly observed even if there is an insufficient amount of data to deliver; otherwise delivery will be skipped for intervals where the amount of available data is inferior to the channel's -low-water mark. Note that the system may defer enqueueing interval I/O handlers +low-water mark. +Note that the system may defer enqueueing interval I/O handlers by a small unspecified amount of leeway in order to align with other system activity for improved system performance or power consumption. -.Pp .Sh DATA DELIVERY The size of data objects passed to I/O handlers for a channel will never be larger than the high-water mark set on the channel; it will also never be @@ -202,53 +210,57 @@ the channel has an interval with the flag set .El Bear in mind that dispatch I/O channels will typically deliver amounts of data -significantly higher than the low-water mark. The default value for the -low-water mark is unspecified, but must be assumed to allow intermediate -handler invocations. The default value for the high-water mark is -unlimited (i.e.\& +significantly higher than the low-water mark. +The default value for the low-water mark is unspecified, but must be assumed to +allow intermediate handler invocations. +The default value for the high-water mark is unlimited (i.e.\& .Dv SIZE_MAX ) . Channels that require intermediate results of fixed size should have both the -low-water and the high-water mark set to that size. Channels that do not wish -to receive any intermediate results should have the low-water mark set to +low-water and the high-water mark set to that size. +Channels that do not wish to receive any intermediate results should have the +low-water mark set to .Dv SIZE_MAX . -.Pp .Sh FILEDESCRIPTOR OWNERSHIP When an application creates a dispatch I/O channel from a file descriptor with the .Fn dispatch_io_create function, the system takes control of that file descriptor until the channel is closed, an error occurs on the file descriptor or all references to the channel -are released. At that time the channel's cleanup handler will be enqueued and -control over the file descriptor relinquished, making it safe for the -application to +are released. +At that time the channel's cleanup handler will be enqueued and control over the +file descriptor relinquished, making it safe for the application to .Xr close 2 -the file descriptor. While a file descriptor is under the control of a dispatch -I/O channel, file descriptor flags such as +the file descriptor. +While a file descriptor is under the control of a dispatch I/O channel, file +descriptor flags such as .Dv O_NONBLOCK -will be modified by the system on behalf of the application. It is an error for -the application to modify a file descriptor directly while it is under the -control of a dispatch I/O channel, but it may create further I/O channels -from that file descriptor or use the +will be modified by the system on behalf of the application. +It is an error for the application to modify a file descriptor directly while it +is under the control of a dispatch I/O channel, but it may create further I/O +channels from that file descriptor or use the .Xr dispatch_read 3 and .Xr dispatch_write 3 -convenience functions with that file descriptor. If multiple I/O channels have +convenience functions with that file descriptor. +If multiple I/O channels have been created from the same file descriptor, all the associated cleanup handlers will be submitted together once the last channel has been closed resp.\& all -references to those channels have been released. If convenience functions have -also been used on that file descriptor, submission of their handlers will be -tied to the submission of the channel cleanup handlers as well. -.Pp +references to those channels have been released. +If convenience functions have also been used on that file descriptor, submission +of their handlers will be tied to the submission of the channel cleanup handlers +as well. .Sh BARRIER OPERATIONS The .Fn dispatch_io_barrier -function schedules a barrier operation on an I/O channel. The specified barrier -block will be run once, after all current I/O operations (such as -.Xr read 2 or +function schedules a barrier operation on an I/O channel. +The specified barrier block will be run once, after all current I/O operations +(such as +.Xr read 2 +or .Xr write 2 ) on the underlying -file descriptor have finished. No new I/O operations will start until the -barrier block finishes. +file descriptor have finished. +No new I/O operations will start until the barrier block finishes. .Pp The barrier block may operate on the underlying file descriptor with functions like @@ -266,17 +278,17 @@ There is no synchronization between a barrier block and any .Xr dispatch_io_read 3 or .Xr dispatch_io_write 3 -handler blocks; they may be running at the same time. The barrier block itself -is responsible for any required synchronization. +handler blocks; they may be running at the same time. +The barrier block itself is responsible for any required synchronization. .Sh MEMORY MODEL Dispatch I/O channel objects are retained and released via calls to .Fn dispatch_retain and .Fn dispatch_release . .Sh SEE ALSO +.Xr open 2 , .Xr dispatch 3 , .Xr dispatch_io_read 3 , .Xr dispatch_object 3 , .Xr dispatch_read 3 , -.Xr fopen 3 , -.Xr open 2 +.Xr fopen 3 diff --git a/man/dispatch_io_read.3 b/man/dispatch_io_read.3 index 26a11e894..3cff4faf8 100644 --- a/man/dispatch_io_read.3 +++ b/man/dispatch_io_read.3 @@ -26,30 +26,34 @@ .Fc .Sh DESCRIPTION The dispatch I/O framework is an API for asynchronous read and write I/O -operations. It is an application of the ideas and idioms present in the +operations. +It is an application of the ideas and idioms present in the .Xr dispatch 3 -framework to device I/O. Dispatch I/O enables an application to more easily -avoid blocking I/O operations and allows it to more directly express its I/O -requirements than by using the raw POSIX file API. Dispatch I/O will make a -best effort to optimize how and when asynchronous I/O operations are performed -based on the capabilities of the targeted device. +framework to device I/O. +Dispatch I/O enables an application to more easily avoid blocking I/O operations +and allows it to more directly express its I/O requirements than by using the +raw POSIX file API. +Dispatch I/O will make a best effort to optimize how and when asynchronous I/O +operations are performed based on the capabilities of the targeted device. .Pp This page provides details on how to read from and write to dispatch I/O -channels. Creation and configuration of these channels is covered in the +channels. +Creation and configuration of these channels is covered in the .Xr dispatch_io_create 3 -page. The dispatch I/O framework also provides the convenience functions +page. +The dispatch I/O framework also provides the convenience functions .Xr dispatch_read 3 and .Xr dispatch_write 3 for uses that do not require the full functionality provided by I/O channels. -.Pp .Sh FUNDAMENTALS The .Fn dispatch_io_read and .Fn dispatch_io_write functions are used to perform asynchronous read and write operations on -dispatch I/O channels. They can be thought of as asynchronous versions of the +dispatch I/O channels. +They can be thought of as asynchronous versions of the .Xr fread 3 and .Xr fwrite 3 @@ -68,7 +72,8 @@ been read since the handler's previous invocation. .Pp The .Va offset -parameter indicates where the read operation should begin. For a channel of +parameter indicates where the read operation should begin. +For a channel of .Dv DISPATCH_IO_RANDOM type it is interpreted relative to the position of the file pointer when the channel was created, for a channel of @@ -79,7 +84,8 @@ pointer position. The .Va length parameter indicates the number of bytes that should be read from the I/O -channel. Pass +channel. +Pass .Dv SIZE_MAX to keep reading until EOF is encountered (for a channel created from a disk-based file this happens when reading past the end of the physical file). @@ -97,14 +103,14 @@ remains to be written as part of this I/O operation. .Pp The .Va offset -parameter indicates where the write operation should begin. It is interpreted -as for read operations above. +parameter indicates where the write operation should begin. +It is interpreted as for read operations above. .Pp The .Va data parameter specifies the location and amount of data to be written, encapsulated -as a dispatch data object. The object is retained by the system until the write -operation is complete. +as a dispatch data object. +The object is retained by the system until the write operation is complete. .Sh I/O HANDLER BLOCKS Dispatch I/O handler blocks submitted to a channel via the .Fn dispatch_io_read @@ -113,9 +119,9 @@ or functions will be executed one or more times depending on system load and the channel's configuration settings (see .Xr dispatch_io_create 3 -for details). The handler block need not be reentrant safe, -no new I/O handler instance is submitted until the previously enqueued handler -block has returned. +for details). +The handler block need not be reentrant safe, no new I/O handler instance is +submitted until the previously enqueued handler block has returned. .Pp The dispatch .Va data @@ -129,12 +135,14 @@ for details). Once an I/O handler block is invoked with the .Va done flag set, the associated I/O operation is complete and that handler block will -not be run again. If an unrecoverable error occurs while performing the I/O -operation, the handler block will be submitted with the +not be run again. +If an unrecoverable error occurs while performing the I/O operation, the handler +block will be submitted with the .Va done flag set and the appropriate POSIX error code in the .Va error -parameter. An invocation of a handler block with the +parameter. +An invocation of a handler block with the .Va done flag set, zero .Va error diff --git a/man/dispatch_object.3 b/man/dispatch_object.3 index cddcf32aa..03c29b030 100644 --- a/man/dispatch_object.3 +++ b/man/dispatch_object.3 @@ -53,13 +53,13 @@ and respectively. .Pp The dispatch framework does not guarantee that any given client has the last or -only reference to a given object. Objects may be retained internally by the -system. +only reference to a given object. +Objects may be retained internally by the system. .Ss INTEGRATION WITH OBJECTIVE-C .Bd -filled -offset indent When building with an Objective-C or Objective-C++ compiler, dispatch objects -are declared as Objective-C types. This results in the following differences -compared to building as plain C/C++: +are declared as Objective-C types. +This results in the following differences compared to building as plain C/C++: .Bl -dash .It if Objective-C Automated Reference Counting is enabled, dispatch objects are @@ -72,13 +72,15 @@ functions will produce build errors. .Em Note : when ARC is enabled, care needs to be taken with dispatch API returning an interior pointer that is only valid as long as an associated object has not -been released. If that object is held in a variable with automatic storage, it -may need to be annotated with the +been released. +If that object is held in a variable with automatic storage, it may need to be +annotated with the .Li objc_precise_lifetime attribute, or stored in a .Li __strong instance variable instead, to ensure that the object is not prematurely -released. The functions returning interior pointers are +released. +The functions returning interior pointers are .Xr dispatch_data_create_map 3 and .Xr dispatch_data_apply 3 . @@ -116,10 +118,9 @@ preprocessor macro to When building with a plain C/C++ compiler or when integration with Objective-C is disabled, dispatch objects are .Em not -automatically retained and released when captured by a block. Therefore, when a -dispatch object is captured by a block that will be executed asynchronously, -the object must be manually retained and released: -.Pp +automatically retained and released when captured by a block. +Therefore, when a dispatch object is captured by a block that will be executed +asynchronously, the object must be manually retained and released: .Bd -literal -offset indent dispatch_retain(object); dispatch_async(queue, ^{ @@ -129,13 +130,15 @@ dispatch_async(queue, ^{ .Ed .Sh ACTIVATION Dispatch objects such as queues and sources may be created in an inactive -state. Objects in this state must be activated before any blocks -associated with them will be invoked. Calling +state. +Objects in this state must be activated before any blocks associated with them +will be invoked. +Calling .Fn dispatch_activate on an active object has no effect. .Pp -Changing attributes such as the target queue or a source handler is no longer permitted -once the object has been activated (see +Changing attributes such as the target queue or a source handler is no longer +permitted once the object has been activated (see .Xr dispatch_set_target_queue 3 , .Xr dispatch_source_set_event_handler 3 ). .Sh SUSPENSION @@ -144,7 +147,8 @@ or resumed with the functions .Fn dispatch_suspend and .Fn dispatch_resume -respectively. Other dispatch objects do not support suspension. +respectively. +Other dispatch objects do not support suspension. .Pp The dispatch framework always checks the suspension status before executing a block, but such changes never affect a block during execution (non-preemptive). @@ -155,18 +159,20 @@ a dispatch source is undefined. .Pp .Em Important : suspension applies to all aspects of the dispatch object life cycle, including -the finalizer function and cancellation handler. Suspending an object causes it -to be retained and resuming an object causes it to be released. Therefore it is -important to balance calls to +the finalizer function and cancellation handler. +Suspending an object causes it to be retained and resuming an object causes it +to be released. +Therefore it is important to balance calls to .Fn dispatch_suspend and .Fn dispatch_resume such that the dispatch object is fully resumed when the last reference is -released. The result of releasing all references to a dispatch object while in +released. +The result of releasing all references to a dispatch object while in an inactive or suspended state is undefined. .Sh CONTEXT POINTERS -Dispatch objects support supplemental context pointers. The value of the -context pointer may be retrieved and updated with +Dispatch objects support supplemental context pointers. +The value of the context pointer may be retrieved and updated with .Fn dispatch_get_context and .Fn dispatch_set_context @@ -176,8 +182,8 @@ The specifies an optional per-object finalizer function that is invoked asynchronously if the context pointer is not NULL when the last reference to the object is released. -This gives the -application an opportunity to free the context data associated with the object. +This gives the application an opportunity to free the context data associated +with the object. The finalizer will be run on the object's target queue. .Sh SEE ALSO .Xr dispatch 3 , diff --git a/man/dispatch_once.3 b/man/dispatch_once.3 index 2118a23bb..0875bc54c 100644 --- a/man/dispatch_once.3 +++ b/man/dispatch_once.3 @@ -36,7 +36,6 @@ FILE *getlogfile(void) return logfile; } .Ed -.Pp .Sh FUNDAMENTALS The .Fn dispatch_once diff --git a/man/dispatch_queue_create.3 b/man/dispatch_queue_create.3 index 833e564a0..3eeb4d366 100644 --- a/man/dispatch_queue_create.3 +++ b/man/dispatch_queue_create.3 @@ -49,11 +49,13 @@ All blocks submitted to dispatch queues are dequeued in FIFO order. Queues created with the .Dv DISPATCH_QUEUE_SERIAL attribute wait for the previously dequeued block to complete before dequeuing -the next block. A queue with this FIFO completion behavior is usually simply -described as a "serial queue." All memory writes performed by a block dispatched -to a serial queue are guaranteed to be visible to subsequent blocks dispatched -to the same queue. Queues are not bound to any specific thread of execution and -blocks submitted to independent queues may execute concurrently. +the next block. +A queue with this FIFO completion behavior is usually simply described as a +"serial queue". +All memory writes performed by a block dispatched to a serial queue are +guaranteed to be visible to subsequent blocks dispatched to the same queue. +Queues are not bound to any specific thread of execution and blocks submitted to +independent queues may execute concurrently. .Pp Queues created with the .Dv DISPATCH_QUEUE_CONCURRENT @@ -62,15 +64,17 @@ submitted with the dispatch barrier API. .Sh CREATION Queues are created with the .Fn dispatch_queue_create -function. Queues, like all dispatch objects, are reference counted and newly -created queues have a reference count of one. +function. +Queues, like all dispatch objects, are reference counted and newly created +queues have a reference count of one. .Pp The optional .Fa label argument is used to describe the purpose of the queue and is useful during -debugging and performance analysis. If a label is provided, it is copied. -By convention, clients should pass a reverse DNS style label. For example: -.Pp +debugging and performance analysis. +If a label is provided, it is copied. +By convention, clients should pass a reverse DNS style label. +For example: .Bd -literal -offset indent my_queue = dispatch_queue_create("com.example.subsystem.taskXYZ", DISPATCH_QUEUE_SERIAL); @@ -98,12 +102,14 @@ Queues may be temporarily suspended and resumed with the functions .Fn dispatch_suspend and .Fn dispatch_resume -respectively. Suspension is checked prior to block execution and is +respectively. +Suspension is checked prior to block execution and is .Em not preemptive. .Sh MAIN QUEUE The dispatch framework provides a default serial queue for the application to -use. This queue is accessed via the +use. +This queue is accessed via the .Fn dispatch_get_main_queue function. .Pp @@ -111,17 +117,20 @@ Programs must call .Fn dispatch_main at the end of .Fn main -in order to process blocks submitted to the main queue. (See the +in order to process blocks submitted to the main queue. +(See the .Sx COMPATIBILITY -section for exceptions.) The +section for exceptions.) +The .Fn dispatch_main function never returns. .Sh GLOBAL CONCURRENT QUEUES Unlike the main queue or queues allocated with .Fn dispatch_queue_create , the global concurrent queues schedule blocks as soon as threads become -available (non-FIFO completion order). Four global concurrent queues are -provided, representing the following priority bands: +available (non-FIFO completion order). +Four global concurrent queues are provided, representing the following priority +bands: .Bl -bullet -compact -offset indent .It DISPATCH_QUEUE_PRIORITY_HIGH @@ -136,33 +145,34 @@ DISPATCH_QUEUE_PRIORITY_BACKGROUND The priority of a global concurrent queue controls the scheduling priority of the threads created by the system to invoke the blocks submitted to that queue. Global queues with lower priority will be scheduled for execution after all -global queues with higher priority have been scheduled. Additionally, items on -the background priority global queue will execute on threads with background -state as described in +global queues with higher priority have been scheduled. +Additionally, items on the background priority global queue will execute on +threads with background state as described in .Xr setpriority 2 (i.e.\& disk I/O is throttled and the thread's scheduling priority is set to lowest value). .Pp Use the .Fn dispatch_get_global_queue -function to obtain the global queue of given priority. The +function to obtain the global queue of given priority. +The .Fa flags -argument is reserved for future use and must be zero. Passing any value other -than zero may result in a NULL return value. +argument is reserved for future use and must be zero. +Passing any value other than zero may result in a NULL return value. .Sh TARGET QUEUE The .Fn dispatch_set_target_queue -function updates the target queue of the given dispatch object. The target -queue of an object is responsible for processing the object. +function updates the target queue of the given dispatch object. +The target queue of an object is responsible for processing the object. .Pp The new target queue is retained by the given object before the previous target -queue is released. The new target queue setting will take effect between block -executions on the object, but not in the middle of any existing block executions -(non-preemptive). +queue is released. +The new target queue setting will take effect between block executions on the +object, but not in the middle of any existing block executions (non-preemptive). .Pp The default target queue of all dispatch objects created by the application is -the default priority global concurrent queue. To reset an object's target queue -to the default, pass the +the default priority global concurrent queue. +To reset an object's target queue to the default, pass the .Dv DISPATCH_TARGET_QUEUE_DEFAULT constant to .Fn dispatch_set_target_queue . @@ -179,12 +189,14 @@ will not be invoked concurrently with blocks submitted to the target queue or to any other queue with that same target queue. .Pp The target queue of a dispatch source specifies where its event handler and -cancellation handler blocks will be submitted. See +cancellation handler blocks will be submitted. +See .Xr dispatch_source_create 3 for more information about dispatch sources. .Pp The target queue of a dispatch I/O channel specifies the priority of the global -queue where its I/O operations are executed. See +queue where its I/O operations are executed. +See .Xr dispatch_io_create 3 for more information about dispatch I/O channels. .Pp @@ -207,24 +219,27 @@ The following functions are deprecated and will be removed in a future release: .El .Pp .Fn dispatch_get_current_queue -always returns a valid queue. When called from within a block -submitted to a dispatch queue, that queue will be returned. If this function is -called from the main thread before +always returns a valid queue. +When called from within a block submitted to a dispatch queue, that queue will +be returned. +If this function is called from the main thread before .Fn dispatch_main is called, then the result of .Fn dispatch_get_main_queue -is returned. In all other cases, the default target queue will be returned. +is returned. +In all other cases, the default target queue will be returned. .Pp The use of .Fn dispatch_get_current_queue -is strongly discouraged except for debugging and logging purposes. Code must not -make any assumptions about the queue returned, unless it is one of the global -queues or a queue the code has itself created. The returned queue may have -arbitrary policies that may surprise code that tries to schedule work with the -queue. The list of policies includes, but is not limited to, queue width (i.e. -serial vs. concurrent), scheduling priority, security credential or filesystem -configuration. This function is deprecated and will be removed in a future -release. +is strongly discouraged except for debugging and logging purposes. +Code must not make any assumptions about the queue returned, unless it is one of +the global queues or a queue the code has itself created. +The returned queue may have arbitrary policies that may surprise code that tries +to schedule work with the queue. +The list of policies includes, but is not limited to, queue width (i.e. serial +vs. concurrent), scheduling priority, security credential or filesystem +configuration. +This function is deprecated and will be removed in a future release. .Pp It is equally unsafe for code to assume that synchronous execution onto a queue is safe from deadlock if that queue is not the one returned by @@ -234,17 +249,21 @@ The result of .Fn dispatch_get_main_queue may or may not equal the result of .Fn dispatch_get_current_queue -when called on the main thread. Comparing the two is not a valid way to test -whether code is executing on the main thread. Foundation/AppKit programs should -use [NSThread isMainThread]. POSIX programs may use +when called on the main thread. +Comparing the two is not a valid way to test whether code is executing on the +main thread. +Foundation/AppKit programs should use [NSThread isMainThread]. +POSIX programs may use .Xr pthread_main_np 3 . .Pp .Fn dispatch_get_current_queue may return a queue owned by a different subsystem which has already had all -external references to it released. While such a queue will continue to exist +external references to it released. +While such a queue will continue to exist until all blocks submitted to it have completed, attempting to retain it is -forbidden and will trigger an assertion. If Objective-C Automatic Reference -Counting is enabled, any use of the object returned by +forbidden and will trigger an assertion. +If Objective-C Automatic Reference Counting is enabled, any use of the object +returned by .Fn dispatch_get_current_queue will cause retain calls to be automatically generated, so the use of .Fn dispatch_get_current_queue @@ -258,17 +277,20 @@ However, blocks submitted to the main queue in applications using .Fn dispatch_main are not guaranteed to execute on the main thread. .Pp -The dispatch framework is a pure C level API. As a result, it does not catch -exceptions generated by higher level languages such as Objective-C or C++. +The dispatch framework is a pure C level API. +As a result, it does not catch exceptions generated by higher level languages +such as Objective-C or C++. Applications .Em MUST catch all exceptions before returning from a block submitted to a dispatch queue; otherwise the process will be terminated with an uncaught exception. .Pp The dispatch framework manages the relationship between dispatch queues and -threads of execution. As a result, applications +threads of execution. +As a result, applications .Em MUST NOT -delete or mutate objects that they did not create. The following interfaces +delete or mutate objects that they did not create. +The following interfaces .Em MUST NOT be called by blocks submitted to a dispatch queue: .Bl -bullet -offset indent @@ -323,17 +345,19 @@ invocations of blocks submitted to a dispatch queue: While the result of .Fn pthread_self may change between invocations of blocks, the value will not change during the -execution of any single block. Because the underlying thread may change beteween -block invocations on a single queue, using per-thread data as an out-of-band -return value is error prone. In other words, the result of calling +execution of any single block. +Because the underlying thread may change beteween block invocations on a single +queue, using per-thread data as an out-of-band return value is error prone. +In other words, the result of calling .Fn pthread_setspecific and .Fn pthread_getspecific -is well defined within a signle block, but not across multiple blocks. Also, -one cannot make any assumptions about when the destructor passed to +is well defined within a signle block, but not across multiple blocks. +Also, one cannot make any assumptions about when the destructor passed to .Fn pthread_key_create -is called. The destructor may be called between the invocation of blocks on -the same queue, or during the idle state of a process. +is called. +The destructor may be called between the invocation of blocks on the same queue, +or during the idle state of a process. .Pp The following example code correctly handles per-thread return values: .Bd -literal -offset indent @@ -350,20 +374,19 @@ printf("kill(1,0) returned %d and errno %d\n", r, e); Note that in the above example .Va errno is a per-thread variable and must be copied out explicitly as the block may be -invoked on different thread of execution than the caller. Another example of -per-thread data that would need to be copied is the use of +invoked on different thread of execution than the caller. +Another example of per-thread data that would need to be copied is the use of .Fn getpwnam instead of .Fn getpwnam_r . .Pp As an optimization, .Fn dispatch_sync -invokes the block on the current thread when possible. In this case, the thread -specific data such as +invokes the block on the current thread when possible. +In this case, the thread specific data such as .Va errno -may persist from the block until back to the caller. Great care should be taken -not to accidentally rely on this side-effect. -.Pp +may persist from the block until back to the caller. +Great care should be taken not to accidentally rely on this side-effect. .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_async 3 , diff --git a/man/dispatch_read.3 b/man/dispatch_read.3 index 38e88dea8..42e915f54 100644 --- a/man/dispatch_read.3 +++ b/man/dispatch_read.3 @@ -27,19 +27,19 @@ The .Fn dispatch_read and .Fn dispatch_write -functions asynchronously read from and write to POSIX file descriptors. They -can be thought of as asynchronous, callback-based versions of the +functions asynchronously read from and write to POSIX file descriptors. +They can be thought of as asynchronous, callback-based versions of the .Fn fread and .Fn fwrite -functions provided by the standard C library. They are convenience functions -based on the +functions provided by the standard C library. +They are convenience functions based on the .Xr dispatch_io_read 3 and .Xr dispatch_io_write 3 -functions, intended for simple one-shot read or write requests. Multiple -request on the same file desciptor are better handled with the full underlying -dispatch I/O channel functions. +functions, intended for simple one-shot read or write requests. +Multiple request on the same file desciptor are better handled with the full +underlying dispatch I/O channel functions. .Sh BEHAVIOR The .Fn dispatch_read @@ -48,20 +48,21 @@ function schedules an asynchronous read operation on the file descriptor Once the file descriptor is readable, the system will read as much data as is currently available, up to the specified .Va length , -starting at the current file pointer position. The given +starting at the current file pointer position. +The given .Va handler block will be submitted to .Va queue -when the operation completes or an error occurs. The block will be passed a -dispatch +when the operation completes or an error occurs. +The block will be passed a dispatch .Va data -object with the result of the read operation. If an error occurred while -reading from the file descriptor, the +object with the result of the read operation. +If an error occurred while reading from the file descriptor, the .Va error parameter to the block will be set to the appropriate POSIX error code and .Va data -will contain any data that could be read successfully. If the file pointer -position is at end-of-file, emtpy +will contain any data that could be read successfully. +If the file pointer position is at end-of-file, emtpy .Va data and zero .Va error @@ -75,23 +76,31 @@ The system will attempt to write the entire contents of the provided .Va data object to .Va fd -at the current file pointer position. The given +at the current file pointer position. +The given .Va handler block will be submitted to .Va queue -when the operation completes or an error occurs. If the write operation -completed successfully, the +when the operation completes or an error occurs. +If the write operation completed successfully, the .Va error parameter to the block will be set to zero, otherwise it will be set to the appropriate POSIX error code and the .Va data parameter will contain any data that could not be written. +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_data_create 3 , +.Xr dispatch_io_create 3 , +.Xr dispatch_io_read 3 , +.Xr fread 3 .Sh CAVEATS The .Va data object passed to a .Va handler -block is released by the system when the block returns. If +block is released by the system when the block returns. +If .Va data is needed outside of the handler block, it must concatenate, copy, or retain it. @@ -101,7 +110,8 @@ descriptor .Va fd , the system takes control of that file descriptor until the .Va handler -block is executed. During this time the application must not manipulate +block is executed. +During this time the application must not manipulate .Va fd directly, in particular it is only safe to close .Va fd @@ -110,14 +120,9 @@ from the handler block (or after it has returned). If multiple asynchronous read or write operations are submitted to the same file descriptor, they will be performed in order, but their handlers will only be submitted once all operations have completed and control over the file -descriptor has been relinquished. For details on this and on the interaction -with dispatch I/O channels created from the same file descriptor, see +descriptor has been relinquished. +For details on this and on the interaction with dispatch I/O channels created +from the same file descriptor, see .Sx FILEDESCRIPTOR OWNERSHIP in .Xr dispatch_io_create 3 . -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_data_create 3 , -.Xr dispatch_io_create 3 , -.Xr dispatch_io_read 3 , -.Xr fread 3 diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 index c0aa45171..7f0a5430a 100644 --- a/man/dispatch_semaphore_create.3 +++ b/man/dispatch_semaphore_create.3 @@ -26,8 +26,9 @@ Dispatch semaphores are used to synchronize threads. .Pp The .Fn dispatch_semaphore_wait -function decrements the semaphore. If the resulting value is less than zero, -it waits for a signal from a thread that increments the semaphore by calling +function decrements the semaphore. +If the resulting value is less than zero, it waits for a signal from a thread +that increments the semaphore by calling .Fn dispatch_semaphore_signal before returning. The @@ -36,13 +37,15 @@ parameter is creatable with the .Xr dispatch_time 3 or .Xr dispatch_walltime 3 -functions. If the timeout is reached without a signal being received, the semaphore -is re-incremented before the function returns. +functions. +If the timeout is reached without a signal being received, the semaphore is +re-incremented before the function returns. .Pp The .Fn dispatch_semaphore_signal -function increments the counting semaphore. If the previous value was less than zero, -it wakes one of the threads that are waiting in +function increments the counting semaphore. +If the previous value was less than zero, it wakes one of the threads that are +waiting in .Fn dispatch_semaphore_wait before returning. .Sh COMPLETION SYNCHRONIZATION @@ -98,8 +101,8 @@ Otherwise, zero is returned. .Pp The .Fn dispatch_semaphore_wait -function returns zero upon success and non-zero after the timeout expires. If -the timeout is DISPATCH_TIME_FOREVER, then +function returns zero upon success and non-zero after the timeout expires. +If the timeout is DISPATCH_TIME_FOREVER, then .Fn dispatch_semaphore_wait waits forever and always returns zero. .Sh MEMORY MODEL @@ -107,15 +110,18 @@ Dispatch semaphores are retained and released via calls to .Fn dispatch_retain and .Fn dispatch_release . +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_object 3 .Sh CAVEATS Unbalanced dispatch semaphores cannot be released. -For a given semaphore, calls to +For a given semaphore, the count at the time +.Fn dispatch_release +is called must be equal to or larger than the +count the semaphore was created with. +In other words, at the time of releasing the semaphore, there must have been at +least as many .Fn dispatch_semaphore_signal -and +calls as there were successful .Fn dispatch_semaphore_wait -must be balanced before -.Fn dispatch_release -is called on it. -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_object 3 +calls that did not timeout. diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index 313b6e723..b54d3da8a 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -91,18 +91,20 @@ with calls to .Fn dispatch_retain and .Fn dispatch_release -respectively. The +respectively. +The .Fa queue parameter specifies the target queue of the new source object, it will -be retained by the source object. Pass the +be retained by the source object. +Pass the .Dv DISPATCH_TARGET_QUEUE_DEFAULT constant to use the default target queue (the default priority global concurrent queue). .Pp -Newly created sources are created in a suspended state. After the source has -been configured by setting an event handler, cancellation handler, registration -handler, context, -etc., the source must be activated by a call to +Newly created sources are created in a suspended state. +After the source has been configured by setting an event handler, cancellation +handler, registration handler, context, etc., the source must be activated by a +call to .Fn dispatch_resume before any events will be delivered. .Pp @@ -151,8 +153,8 @@ The .Fn dispatch_source_get_handle function returns the underlying handle to the dispatch source (i.e. file descriptor, -mach port, process identifer, etc.). The result of this function may be cast -directly to the underlying type. +mach port, process identifer, etc.). +The result of this function may be cast directly to the underlying type. .Pp The .Fn dispatch_source_get_mask @@ -174,8 +176,10 @@ function is intended for use with the .Vt DISPATCH_SOURCE_TYPE_DATA_OR and .Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE -source types. The result of using this function with any other source type is -undefined. Data merging is performed according to the source type: +source types. +The result of using this function with any other source type is +undefined. +Data merging is performed according to the source type: .Bl -tag -width "XXDISPATCH_SOURCE_TYPE_DATA_REPLACE" -compact -offset indent .It \(bu DISPATCH_SOURCE_TYPE_DATA_ADD .Vt data @@ -189,7 +193,8 @@ atomically replaces the source's data. .El .Pp If the source data value resulting from the merge operation is 0, the source -handler will not be invoked. This can happen if: +handler will not be invoked. +This can happen if: .Bl -bullet -compact -offset indent .It the atomic addition wraps for sources of type @@ -198,14 +203,14 @@ the atomic addition wraps for sources of type 0 is merged for sources of type .Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE . .El -.Pp .Sh SOURCE EVENT HANDLERS In order to receive events from the dispatch source, an event handler should be specified via .Fn dispatch_source_set_event_handler . The event handler block is submitted to the source's target queue when the state -of the underlying system handle changes, or when an event occurs. If a source -is resumed with no event handler block set, events will be quietly ignored. +of the underlying system handle changes, or when an event occurs. +If a source is resumed with no event handler block set, events will be quietly +ignored. If the event handler block is changed while the source is suspended, or from a block running on a serial queue that is the source's target queue, then the next event handler invocation will use the new block. @@ -215,8 +220,9 @@ queues using .Fn dispatch_suspend and .Fn dispatch_resume -on the dispatch source directly. The data describing events which occur while a -source is suspended are coalesced and delivered once the source is resumed. +on the dispatch source directly. +The data describing events which occur while a source is suspended are coalesced +and delivered once the source is resumed. .Pp The .Fa handler @@ -235,11 +241,11 @@ To unset the event handler, call and pass NULL as .Fa function . This unsets the event handler regardless of whether the handler -was a function pointer or a block. Registration and cancellation handlers -(see below) may be unset in the same way, but as noted below, a cancellation -handler may be required. +was a function pointer or a block. +Registration and cancellation handlers (see below) may be unset in the same way, +but as noted below, a cancellation handler may be required. .Sh REGISTRATION -When +When .Fn dispatch_resume is called on a suspended or newly created source, there may be a brief delay before the source is ready to receive events from the underlying system handle. @@ -248,29 +254,33 @@ missed. .Pp Once the dispatch source is registered with the underlying system and is ready to process all events its optional registration handler will be submitted to -its target queue. This registration handler may be specified via +its target queue. +This registration handler may be specified via .Fn dispatch_source_set_registration_handler . .Pp The event handler will not be called until the registration handler finishes. If the source is canceled (see below) before it is registered, its registration handler will not be called. -.Pp .Sh CANCELLATION The .Fn dispatch_source_cancel function asynchronously cancels the dispatch source, preventing any further -invocation of its event handler block. Cancellation does not interrupt a -currently executing handler block (non-preemptive). If a source is canceled -before the first time it is resumed, its event handler will never be called. +invocation of its event handler block. +Cancellation does not interrupt a currently executing handler block +(non-preemptive). +If a source is canceled before the first time it is resumed, its event handler +will never be called. (In this case, note that the source must be resumed before it can be released.) .Pp The .Fn dispatch_source_testcancel function may be used to determine whether the specified source has been -canceled. A non-zero value will be returned if the source is canceled. +canceled. +A non-zero value will be returned if the source is canceled. .Pp When a dispatch source is canceled its optional cancellation handler will be -submitted to its target queue. The cancellation handler may be specified via +submitted to its target queue. +The cancellation handler may be specified via .Fn dispatch_source_set_cancel_handler . This cancellation handler is invoked only once, and only as a direct consequence of calling @@ -278,12 +288,11 @@ of calling .Pp .Em Important: a cancellation handler is required for file descriptor and mach port based -sources in order to safely close the descriptor or destroy the port. Closing the -descriptor or port before the cancellation handler has run may result in a race -condition: if a new descriptor is allocated with the same value as the recently -closed descriptor while the source's event handler is still running, the event -handler may read/write data to the wrong descriptor. -.Pp +sources in order to safely close the descriptor or destroy the port. +Closing the descriptor or port before the cancellation handler has run may +result in a race condition: if a new descriptor is allocated with the same value +as the recently closed descriptor while the source's event handler is still +running, the event handler may read/write data to the wrong descriptor. .Sh DISPATCH SOURCE TYPES The following section contains a summary of supported dispatch event types and the interpretation of their parameters and returned data. @@ -297,9 +306,11 @@ handler via a call to .Fn dispatch_source_merge_data . The data will be merged with the source's pending data via an atomic add or atomic bitwise OR, or direct replacement (based on the source's type), and the -event handler block will be submitted to the source's target queue. The +event handler block will be submitted to the source's target queue. +The .Fa data -is application defined. These sources have no +is application defined. +These sources have no .Fa handle or .Fa mask @@ -322,7 +333,8 @@ The data returned by .Fn dispatch_source_get_data is a bitmask that indicates which of the events in the .Fa mask -were observed. Note that because this source type will request notifications on +were observed. +Note that because this source type will request notifications on the provided port, it should not be mixed with the use of .Fn mach_port_request_notification on the same port. @@ -341,9 +353,11 @@ on the mach port is waiting to be received. .Vt DISPATCH_SOURCE_TYPE_MEMORYPRESSURE .Pp Sources of this type monitor the system memory pressure condition for state -changes. The +changes. +The .Fa handle -is unused and should be zero. The +is unused and should be zero. +The .Fa mask may be one or more of the following: .Bl -tag -width "XXDISPATCH_MEMORYPRESSURE_CRITICAL" -compact -offset indent @@ -412,14 +426,15 @@ is unused and should be zero. .Pp The data returned by .Fn dispatch_source_get_data -is an estimated number of bytes available to be read from the descriptor. This -estimate should be treated as a suggested +is an estimated number of bytes available to be read from the descriptor. +This estimate should be treated as a suggested .Em minimum -read buffer size. There are no guarantees that a complete read of this size -will be performed. +read buffer size. +There are no guarantees that a complete read of this size will be performed. .Pp Users of this source type are strongly encouraged to perform non-blocking I/O -and handle any truncated reads or error conditions that may occur. See +and handle any truncated reads or error conditions that may occur. +See .Xr fcntl 2 for additional information about setting the .Vt O_NONBLOCK @@ -427,7 +442,8 @@ flag on a file descriptor. .Pp .Vt DISPATCH_SOURCE_TYPE_SIGNAL .Pp -Sources of this type monitor signals delivered to the current process. The +Sources of this type monitor signals delivered to the current process. +The .Fa handle is the signal number to monitor (int) and the .Fa mask @@ -445,11 +461,13 @@ of execution; therefore the handler block is not limited to the use of signal safe interfaces defined in .Xr sigaction 2 . Furthermore, multiple observers of a given signal are supported; thus allowing -applications and libraries to cooperate safely. However, a dispatch source +applications and libraries to cooperate safely. +However, a dispatch source .Em does not install a signal handler or otherwise alter the behavior of signal delivery. Therefore, applications must ignore or at least catch any signal that terminates -a process by default. For example, near the top of +a process by default. +For example, near the top of .Fn main : .Bd -literal -offset ident signal(SIGTERM, SIG_IGN); @@ -458,7 +476,8 @@ signal(SIGTERM, SIG_IGN); .Vt DISPATCH_SOURCE_TYPE_TIMER .Pp Sources of this type periodically submit the event handler block to the target -queue. The +queue. +The .Fa handle argument is unused and should be zero. .Pp @@ -469,7 +488,8 @@ event handler block. .Pp The timer parameters are configured with the .Fn dispatch_source_set_timer -function. Once this function returns, any pending source data accumulated for +function. +Once this function returns, any pending source data accumulated for the previous timer parameters has been cleared; the next fire of the timer will occur at .Fa start , @@ -478,8 +498,8 @@ and every nanoseconds thereafter until the timer source is canceled. .Pp Any fire of the timer may be delayed by the system in order to improve power -consumption and system performance. The upper limit to the allowable delay may -be configured with the +consumption and system performance. +The upper limit to the allowable delay may be configured with the .Fa leeway argument, the lower limit is under the control of the system. .Pp @@ -487,7 +507,8 @@ For the initial timer fire at .Fa start , the upper limit to the allowable delay is set to .Fa leeway -nanoseconds. For the subsequent timer fires at +nanoseconds. +For the subsequent timer fires at .Fa start .Li "+ N *" .Fa interval , @@ -498,14 +519,16 @@ the upper limit is .Li "/ 2 )" . .Pp The lower limit to the allowable delay may vary with process state such as -visibility of application UI. If the specified timer source was created with a +visibility of application UI. +If the specified timer source was created with a .Fa mask of .Vt DISPATCH_TIMER_STRICT , the system will make a best effort to strictly observe the provided .Fa leeway -value even if it is smaller than the current lower limit. Note that a minimal -amount of delay is to be expected even if this flag is specified. +value even if it is smaller than the current lower limit. +Note that a minimal amount of delay is to be expected even if this flag is +specified. .Pp The .Fa start @@ -575,12 +598,12 @@ is the file descriptor (int) to monitor and the is unused and should be zero. .Pp Users of this source type are strongly encouraged to perform non-blocking I/O -and handle any truncated reads or error conditions that may occur. See +and handle any truncated reads or error conditions that may occur. +See .Xr fcntl 2 for additional information about setting the .Vt O_NONBLOCK flag on a file descriptor. -.Pp .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_object 3 , diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 index 2536e0e9f..635f7d909 100644 --- a/man/dispatch_time.3 +++ b/man/dispatch_time.3 @@ -34,8 +34,9 @@ type is a semi-opaque integer, with only the special values .Vt DISPATCH_WALLTIME_NOW and .Vt DISPATCH_TIME_FOREVER -being externally defined. All other values are represented using an internal -format that is not safe for integer arithmetic or comparison. +being externally defined. +All other values are represented using an internal format that is not safe for +integer arithmetic or comparison. The internal format is subject to change. .Pp The @@ -52,8 +53,8 @@ Otherwise, if .Fa base is .Vt DISPATCH_TIME_NOW , -then the current time of the default host clock is used. On Apple platforms, -the value of the default host clock is obtained from +then the current time of the default host clock is used. +On Apple platforms, the value of the default host clock is obtained from .Vt mach_absolute_time() . .Pp The @@ -61,7 +62,8 @@ The function is useful for creating a milestone relative to a fixed point in time using the wall clock, as specified by the optional .Fa base -parameter. If +parameter. +If .Fa base is NULL, then the current time of the wall clock is used. .Vt dispatch_walltime(NULL, offset) @@ -78,7 +80,8 @@ parameter. .Pp Overflow causes .Vt DISPATCH_TIME_FOREVER -to be returned. When +to be returned. +When .Fa base is .Vt DISPATCH_TIME_FOREVER , diff --git a/os/clock.h b/os/clock.h new file mode 100644 index 000000000..665e1d871 --- /dev/null +++ b/os/clock.h @@ -0,0 +1,18 @@ +#ifndef __OS_CLOCK__ +#define __OS_CLOCK__ + +#include +#include + +/* + * @typedef os_clockid_t + * + * @abstract + * Describes the kind of clock that the workgroup timestamp parameters are + * specified in + */ +OS_ENUM(os_clockid, uint32_t, + OS_CLOCK_MACH_ABSOLUTE_TIME = 32, +); + +#endif /* __OS_CLOCK__ */ diff --git a/os/eventlink_private.h b/os/eventlink_private.h new file mode 100644 index 000000000..eb55a745b --- /dev/null +++ b/os/eventlink_private.h @@ -0,0 +1,296 @@ +#ifndef __OS_EVENTLINK__ +#define __OS_EVENTLINK__ + +#include +#include +#include + +__BEGIN_DECLS + +OS_OBJECT_ASSUME_NONNULL_BEGIN + +/*! + * @typedef os_eventlink_t + * + * @abstract + * A reference counted os_object representing a directed paired link of "wake" events + * between two designated threads, the link `source` and the link `target`. + * The target thread may optionally inherit properties of the source thread upon + * return from wait (such as membership in a workgroup). + * + * @discussion + * Threads explicitly associate themselves with an an eventlink, only one source + * and one target may exist per eventlink. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_eventlink_s *os_eventlink_t; +#else +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_OBJECT_DECL_CLASS(os_eventlink); +#endif + +/*! + * @function os_eventlink_create + * + * @abstract + * Creates an inactive refcounted os_object representing an os_eventlink_t. + * + * This function creates only 1 endpoint of an eventlink object. The other + * endpoint of the eventlink needs to be created from this eventlink object + * using one of the other creator functions - + * os_eventlink_create_remote_with_eventlink() or + * os_eventlink_create_with_port() + */ +OS_EXPORT OS_OBJECT_RETURNS_RETAINED +os_eventlink_t _Nullable +os_eventlink_create(const char *name); + +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) + +/* TODO: API for the future when we make a variant of eventlink that does + * copyin */ + +/*! + * @typedef os_eventlink_shared_data_t + * + * @abstract + * Pointer to an opaque structure identifying the data that is used to + * synchronize between the two endpoints of an eventlink. + * + * It is the client's responsibility to allocate this structure such that both + * threads on the two endpoints of the eventlink can synchronize with it ie. If + * the eventlink is between 2 threads in 2 processes, os_eventlink_shared_data_t + * should be allocated in shared memory between the two processes. + */ +typedef struct os_eventlink_shared_data_s { + uint64_t local_count; + uint64_t remote_count; +} os_eventlink_shared_data_s, *os_eventlink_shared_data_t; +#define OS_EVENTLINK_SHARED_DATA_INITIALIZER { 0 } + +/*! + * @function os_eventlink_set_shared_data + * + * @abstract + * Associates a shared data structure with the os_eventlink. + * + * As a performance enhancement, clients may choose to provide an opaque shared + * data structure in memory visible to both ends of the eventlink based on the + * usage pattern of the os eventlink. + * + * Passing in NULL for shared data is recommended if the eventlink is to be used + * for the typical RPC ping-pong case whereby one side of the eventlink is + * always blocked waiting on a signal from the other side. In this case, each + * signal causes a single wakeup. + * + * Passing in shared data is recommended when one side of the eventlink is not + * necessarily always waiting for the other's signal in order to work. Passing + * in the shared data allows for more efficient signalling - potentially without + * any system calls. + */ +int +os_eventlink_set_shared_data(os_eventlink_t eventlink, + os_eventlink_shared_data_t data); + +#endif + +/*! + * @function os_eventlink_activate + * + * @abstract + * Activates the os_eventlink object for use. No further configuration can be + * done on the eventlink object after it has been activated. This API is not + * real-time safe. + * + * If an error is encountered, errno is set and returned. + */ +OS_EXPORT OS_OBJECT_WARN_UNUSED_RESULT +int +os_eventlink_activate(os_eventlink_t eventlink); + +/*! + * @function os_eventlink_extract_remote_port + * + * @abstract + * Returns a reference to a send right representing the remote endpoint of the + * eventlink. This port is to be passed to os_eventlink_create_with_port() to + * create an eventlink object. + * + * Calling this function multiple times on an eventlink object will result in an + * error. + * + * @param eventlink + * An eventlink returns from a previous call to os_eventlink_create(). This + * evenlink must have been activated. + */ +OS_EXPORT OS_OBJECT_WARN_UNUSED_RESULT +int +os_eventlink_extract_remote_port(os_eventlink_t eventlink, mach_port_t *port_out); + +/*! + * @function os_eventlink_create_with_port + * + * @abstract + * Creates an inactive eventlink from a port returned from a previous call to + * os_eventlink_extract_remote_port. This function does not consume a reference + * on the specified send right. + */ +OS_EXPORT OS_OBJECT_RETURNS_RETAINED +os_eventlink_t _Nullable +os_eventlink_create_with_port(const char *name, mach_port_t mach_port); + +/*! + * @function os_eventlink_create_remote_with_eventlink + * + * @abstract + * Creates an inactive refcounted os_object representing an os_eventlink_t + * remote endpoint. Each eventlink has exactly one remote endpoint that can be + * created from it. Calling this function on an eventlink object returned from + * os_eventlink_create(), more than once will return in an error. + * + * @param eventlink + * An eventlink returned from a previous call to os_eventlink_create(). This + * eventlink must have been activated. + */ +OS_EXPORT OS_OBJECT_RETURNS_RETAINED +os_eventlink_t _Nullable +os_eventlink_create_remote_with_eventlink(const char *name, os_eventlink_t eventlink); + +/*! + * @function os_eventlink_associate + * + * @abstract + * Associate a thread with the eventlink endpoint provided. The eventlink + * provided should be activated before this call. This API is not real + * time safe. + * + * If a thread is already associated with the eventlink, errno is set and + * returned. + */ + +OS_ENUM(os_eventlink_associate_options, uint64_t, + OE_ASSOCIATE_CURRENT_THREAD = 0, + OE_ASSOCIATE_ON_WAIT = 0x1, +); + +OS_EXPORT OS_OBJECT_WARN_UNUSED_RESULT +int +os_eventlink_associate(os_eventlink_t eventlink, + os_eventlink_associate_options_t options); + +/*! + * @function os_eventlink_disassociate + * + * @abstract + * Disassociate the current thread with the eventlink endpoint provided. This + * API is not real time safe. + * + * If the current thread is not associated with the eventlink via a previous + * call to os_eventlink_associate, errno is set and returned. + */ +OS_EXPORT +int +os_eventlink_disassociate(os_eventlink_t eventlink); + +/*! + * @function os_eventlink_wait + * + * @abstract + * Wait on the eventlink endpoint for a signal from the other endpoint. If there + * are outstanding signals, this function will consume them and return + * immediately. + * + * Upon receiving a signal, the function returns the number of signals that have + * been consumed by the waiter in the out parameter if specified. + * + * If the eventlink has not been previously associated via a call to + * os_eventlink_associate or if there is a mismatch between the associated + * thread and the current thread, the process will abort. This API call is + * real-time safe. + */ +OS_EXPORT +int +os_eventlink_wait(os_eventlink_t eventlink, uint64_t * _Nullable signals_consumed_out); + +/*! + * @function os_eventlink_wait_until + * + * @abstract + * Wait on the eventlink endpoint for a signal or until the timeout specified is + * hit. If there are outstanding signals, this function will consume them and + * return immediately. + * + * Upon success, the function returns the number of signals that have been + * consumed by the waiter in the out parameter, if provided. If the timeout is + * hit, then 0 signals are said to have been consumed by the waiter. This API + * call is real time safe. + */ +OS_EXPORT +int +os_eventlink_wait_until(os_eventlink_t eventlink, os_clockid_t clock, + uint64_t timeout, uint64_t * _Nullable signals_consumed_out); + +/*! + * @function os_eventlink_signal + * + * @abstract + * Signal the other endpoint of an eventlink. This API call is real time safe. + * + * If an error is encountered, errno will be set and returned. + */ +OS_EXPORT +int +os_eventlink_signal(os_eventlink_t eventlink); + +/*! + * @function os_eventlink_signal_and_wait + * + * @abstract + * Signals on an eventlink endpoint and then proceeds to wait on it until the + * eventlink is signalled. Returns the number of signals consumed by the waiter + * through the out parameter if provided. This API call is real time safe. + */ +OS_EXPORT +int +os_eventlink_signal_and_wait(os_eventlink_t eventlink, uint64_t * _Nullable signals_consumed_out); + +/*! + * @function os_eventlink_signal_and_wait_until + * + * @abstract + * Signals on an eventlink endpoint and then proceeds to wait on it until the + * evenlink is signalled or the timeout is hit. Returns the number of signals + * consumed by the waiter through the out parameter if provided, with 0 + * indicating that a timeout has been hit. This API call is real time safe. + */ +OS_EXPORT +int +os_eventlink_signal_and_wait_until(os_eventlink_t eventlink, os_clockid_t clock, + uint64_t timeout, uint64_t * _Nullable signals_consumed_out); + +/* + * @function os_eventlink_cancel + * + * @abstract + * Invalidates an eventlink. The only follow up actions possible on the eventlink + * after it has been invalidated, are to disassociate from the eventlink and + * dispose of it. + * + * If the eventlink had a remote endpoint created, the remote side will get an + * ECANCELED when it tries to wait or signal on it. Existing waiters on the + * eventlink will get the same result as well. The only valid follow up + * actions possible on a remote endpoint are to disassociate from the eventlink + * and dispose of it. + * + * This API is idempotent. It is not required to call this API before dropping + * the last reference count of an eventlink. + */ +OS_EXPORT +void +os_eventlink_cancel(os_eventlink_t eventlink); + +OS_OBJECT_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_EVENTLINK__ */ diff --git a/os/firehose_buffer_private.h b/os/firehose_buffer_private.h index a633bf408..7ee0541ba 100644 --- a/os/firehose_buffer_private.h +++ b/os/firehose_buffer_private.h @@ -78,7 +78,7 @@ __firehose_buffer_tracepoint_flush(firehose_tracepoint_t vat, firehose_buffer_t __firehose_buffer_create(size_t *size); -void +bool __firehose_merge_updates(firehose_push_reply_t update); int @@ -97,9 +97,11 @@ static inline const uint8_t * _firehose_tracepoint_reader_init(firehose_chunk_t fc, const uint8_t **endptr) { const uint8_t *start = fc->fc_data; - const uint8_t *end = fc->fc_start + fc->fc_pos.fcp_next_entry_offs; + const uint8_t *end; - if (end > fc->fc_start + FIREHOSE_CHUNK_SIZE) { + if (fc->fc_pos.fcp_next_entry_offs <= FIREHOSE_CHUNK_SIZE) { + end = fc->fc_start + fc->fc_pos.fcp_next_entry_offs; + } else { end = start; } *endptr = end; diff --git a/os/firehose_server_private.h b/os/firehose_server_private.h index d2c379e62..bab44824b 100644 --- a/os/firehose_server_private.h +++ b/os/firehose_server_private.h @@ -399,6 +399,7 @@ OS_ENUM(firehose_server_queue, unsigned long, FIREHOSE_SERVER_QUEUE_UNKNOWN, FIREHOSE_SERVER_QUEUE_IO, FIREHOSE_SERVER_QUEUE_MEMORY, + FIREHOSE_SERVER_QUEUE_IO_WL, ); /*! diff --git a/os/object.h b/os/object.h index 1ad1158c5..e2ce3f467 100644 --- a/os/object.h +++ b/os/object.h @@ -91,14 +91,22 @@ #endif #ifndef OS_OBJECT_SWIFT3 -#if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \ - SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#ifdef __swift__ #define OS_OBJECT_SWIFT3 1 -#else +#else // __swift__ #define OS_OBJECT_SWIFT3 0 -#endif // SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#endif // __swift__ #endif // OS_OBJECT_SWIFT3 +#if __has_feature(assume_nonnull) +#define OS_OBJECT_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") +#define OS_OBJECT_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") +#else +#define OS_OBJECT_ASSUME_NONNULL_BEGIN +#define OS_OBJECT_ASSUME_NONNULL_END +#endif +#define OS_OBJECT_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__)) + #if OS_OBJECT_USE_OBJC #import #if __has_attribute(objc_independent_class) @@ -117,9 +125,9 @@ #define OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, proto) \ OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL_IMPL( \ OS_OBJECT_CLASS(name), OS_OBJECT_CLASS(proto)) -#define OS_OBJECT_DECL_IMPL(name, ...) \ +#define OS_OBJECT_DECL_IMPL(name, adhere, ...) \ OS_OBJECT_DECL_PROTOCOL(name, __VA_ARGS__) \ - typedef NSObject \ + typedef adhere \ * OS_OBJC_INDEPENDENT_CLASS name##_t #define OS_OBJECT_DECL_BASE(name, ...) \ @interface OS_OBJECT_CLASS(name) : __VA_ARGS__ \ @@ -130,9 +138,9 @@ typedef OS_OBJECT_CLASS(name) \ * OS_OBJC_INDEPENDENT_CLASS name##_t #define OS_OBJECT_DECL(name, ...) \ - OS_OBJECT_DECL_IMPL(name, ) + OS_OBJECT_DECL_IMPL(name, NSObject, ) #define OS_OBJECT_DECL_SUBCLASS(name, super) \ - OS_OBJECT_DECL_IMPL(name, ) + OS_OBJECT_DECL_IMPL(name, NSObject, ) #if __has_attribute(ns_returns_retained) #define OS_OBJECT_RETURNS_RETAINED __attribute__((__ns_returns_retained__)) #else @@ -150,6 +158,8 @@ #define OS_OBJECT_BRIDGE #define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT #endif + + #if __has_attribute(objc_runtime_visible) && \ ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \ __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_12) || \ @@ -164,7 +174,7 @@ /* * To provide backward deployment of ObjC objects in Swift on pre-10.12 * SDKs, OS_object classes can be marked as OS_OBJECT_OBJC_RUNTIME_VISIBLE. - * When compiling with a deployment target earlier than OS X 10.12 (iOS 10.0, + * When compiling with a deployment target earlier than OS X 10.12 (iOS 10.0, * tvOS 10.0, watchOS 3.0) the Swift compiler will only refer to this type at * runtime (using the ObjC runtime). */ @@ -188,9 +198,9 @@ #define OS_OBJECT_DECL_SUBCLASS_SWIFT(name, super) \ OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ OS_OBJECT_DECL_IMPL_CLASS(name, OS_OBJECT_CLASS(super)) +#endif // OS_OBJECT_SWIFT3 OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE OS_OBJECT_DECL_BASE(object, NSObject); -#endif // OS_OBJECT_SWIFT3 #else /*! @parseOnly */ #define OS_OBJECT_RETURNS_RETAINED @@ -216,6 +226,27 @@ OS_OBJECT_DECL_BASE(object, NSObject); typedef struct name##_s *name##_t #endif +#if OS_OBJECT_USE_OBJC +/* Declares a class of the specific name and exposes the interface and typedefs + * name##_t to the pointer to the class */ +#define OS_OBJECT_SHOW_CLASS(name, ...) \ + OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ + OS_OBJECT_DECL_IMPL_CLASS(name, ## __VA_ARGS__ ) +/* Declares a subclass of the same name, and + * subclass adheres to protocol specified. Typedefs baseclass * to subclass##_t */ +#define OS_OBJECT_SHOW_SUBCLASS(subclass_name, super, proto_name) \ + OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ + OS_OBJECT_DECL_BASE(subclass_name, OS_OBJECT_CLASS(super)); \ + typedef OS_OBJECT_CLASS(super) \ + * OS_OBJC_INDEPENDENT_CLASS subclass_name##_t +#else /* Plain C */ +#define OS_OBJECT_DECL_PROTOCOL(name, ...) +#define OS_OBJECT_SHOW_CLASS(name, ...) \ + typedef struct name##_s *name##_t +#define OS_OBJECT_SHOW_SUBCLASS(name, super, ...) \ + typedef super##_t name##_t +#endif + #define OS_OBJECT_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object)) __BEGIN_DECLS diff --git a/os/object_private.h b/os/object_private.h index a667f79f0..0d58e8650 100644 --- a/os/object_private.h +++ b/os/object_private.h @@ -30,6 +30,12 @@ #include #include #include +#if __has_include() +#include +#endif +#ifndef __ptrauth_objc_isa_pointer +#define __ptrauth_objc_isa_pointer +#endif #if __GNUC__ #define OS_OBJECT_NOTHROW __attribute__((__nothrow__)) @@ -63,7 +69,7 @@ #define _OS_OBJECT_GLOBAL_REFCNT INT_MAX #define _OS_OBJECT_HEADER(isa, ref_cnt, xref_cnt) \ - isa; /* must be pointer-sized */ \ + isa; /* must be pointer-sized and use __ptrauth_objc_isa_pointer */ \ int volatile ref_cnt; \ int volatile xref_cnt @@ -97,10 +103,26 @@ #define OS_OBJECT_CLASS(name) OS_##name +#if OS_OBJECT_USE_OBJC +#define OS_OBJECT_USES_XREF_DISPOSE() \ + - (oneway void)release { \ + _os_object_release((OS_object *) self); \ + } +#endif + +#if __has_attribute(objc_nonlazy_class) +#define OS_OBJECT_NONLAZY_CLASS __attribute__((objc_nonlazy_class)) +#define OS_OBJECT_NONLAZY_CLASS_LOAD +#else +#define OS_OBJECT_NONLAZY_CLASS +#define OS_OBJECT_NONLAZY_CLASS_LOAD + (void)load { } +#endif + #if OS_OBJECT_USE_OBJC && OS_OBJECT_SWIFT3 @interface OS_OBJECT_CLASS(object) (OSObjectPrivate) +// Note: objects who want _xref_dispose to be called need +// to use OS_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose; -- (void)_dispose; @end OS_OBJECT_DECL_PROTOCOL(object, ); typedef OS_OBJECT_CLASS(object) *_os_object_t; @@ -113,11 +135,10 @@ typedef OS_OBJECT_CLASS(object) *_os_object_t; #define _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) \ OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) #elif OS_OBJECT_USE_OBJC -API_AVAILABLE(macos(10.8), ios(6.0)) -OS_OBJECT_EXPORT -@interface OS_OBJECT_CLASS(object) : NSObject +@interface OS_OBJECT_CLASS(object) (OSObjectPrivate) +// Note: objects who want _xref_dispose to be called need +// to use OS_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose; -- (void)_dispose; @end typedef OS_OBJECT_CLASS(object) *_os_object_t; #define _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ @@ -141,7 +162,7 @@ API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t -_os_object_alloc(const void *cls, size_t size); +_os_object_alloc(const void * _Nullable cls, size_t size); API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW @@ -172,6 +193,12 @@ OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_release(_os_object_t object); +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") +void +_os_object_release_without_xref_dispose(_os_object_t object); + API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") diff --git a/os/voucher_private.h b/os/voucher_private.h index ad4e31274..3e72c919a 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -24,6 +24,9 @@ #if __APPLE__ #include #include + +#include +#define OS_VOUCHER_TSD_KEY __PTK_LIBDISPATCH_KEY8 #endif #if __has_include() #include @@ -101,12 +104,41 @@ OS_OBJECT_DECL_CLASS(voucher); * @result * The previously adopted voucher object. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT_NEEDS_RELEASE OS_NOTHROW voucher_t _Nullable voucher_adopt(voucher_t _Nullable voucher OS_OBJECT_CONSUMED); +/*! + * @function voucher_needs_adopt + * + * @abstract + * An inline check to determine if the input voucher matches the one + * on the current thread. This can be used to shortcircuit calls to + * voucher_adopt() and avoid a cross library jump. If this function returns + * true, then the client should make sure to follow up with a voucher_adopt() + * call. + * + * This check must only be in code that ships with the operating system since + * the TSD key assignment is not ABI. + * + * @param voucher + * The input voucher being tested + */ + +SPI_AVAILABLE(macos(12.0), ios(15.0)) +__header_always_inline bool +voucher_needs_adopt(voucher_t _Nullable voucher) +{ +#if __APPLE__ + if (_pthread_has_direct_tsd()) { + return (((void *) voucher) != _pthread_getspecific_direct(OS_VOUCHER_TSD_KEY)); + } +#endif + return true; +} + /*! * @function voucher_copy * @@ -117,7 +149,7 @@ voucher_adopt(voucher_t _Nullable voucher OS_OBJECT_CONSUMED); * @result * The currently adopted voucher object. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_copy(void); @@ -136,7 +168,7 @@ voucher_copy(void); * @result * A copy of the currently adopted voucher object, with importance removed. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_copy_without_importance(void); @@ -162,7 +194,7 @@ voucher_copy_without_importance(void); * * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_NOTHROW void voucher_replace_default_voucher(void); @@ -180,7 +212,7 @@ voucher_replace_default_voucher(void); * * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_NOTHROW void voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); @@ -264,7 +296,7 @@ voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -345,7 +377,7 @@ dispatch_block_create_with_voucher(dispatch_block_flags_t flags, * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL5 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -363,7 +395,7 @@ dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, * @abstract * Deprecated, do not use, will abort process if called. */ -API_DEPRECATED("removed SPI", \ +SPI_DEPRECATED("removed SPI", \ macos(10.11,10.13), ios(9.0,11.0), watchos(2.0,4.0), tvos(9.0,11.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW @@ -397,7 +429,7 @@ dispatch_queue_create_with_accounting_override_voucher( * The newly created voucher object or NULL if the message was not carrying a * mach voucher. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_create_with_mach_msg(mach_msg_header_t *msg); @@ -437,14 +469,14 @@ voucher_create_with_mach_msg(mach_msg_header_t *msg); * * @param max_hex_data * The maximum number of bytes of hex data to be formatted for voucher content - * that is not of type MACH_VOUCHER_ATTR_KEY_ATM, MACH_VOUCHER_ATTR_KEY_BANK - * or MACH_VOUCHER_ATTR_KEY_IMPORTANCE. + * that is not of type MACH_VOUCHER_ATTR_KEY_BANK or + * MACH_VOUCHER_ATTR_KEY_IMPORTANCE. * * @result * The offset of the first byte in the buffer following the formatted voucher * representation. */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +SPI_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW DISPATCH_COLD size_t voucher_kvoucher_debug(mach_port_t task, mach_port_name_t voucher, char *buf, @@ -479,7 +511,7 @@ struct proc_persona_info; * or the persona identifier of the current process * or PERSONA_ID_NONE */ -API_AVAILABLE(macos(10.14), ios(9.2)) +SPI_AVAILABLE(macos(10.15), ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW uid_t voucher_get_current_persona(void); @@ -502,7 +534,7 @@ voucher_get_current_persona(void); * 0 on success: currently adopted voucher has a PERSONA_TOKEN * -1 on failure: persona_info is untouched/uninitialized */ -API_AVAILABLE(macos(10.14), ios(9.2)) +SPI_AVAILABLE(macos(10.15), ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 int voucher_get_current_persona_originator_info( @@ -526,12 +558,23 @@ voucher_get_current_persona_originator_info( * 0 on success: currently adopted voucher has a PERSONA_TOKEN * -1 on failure: persona_info is untouched/uninitialized */ -API_AVAILABLE(macos(10.14), ios(9.2)) +SPI_AVAILABLE(macos(10.15), ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 int voucher_get_current_persona_proximate_info( struct proc_persona_info *persona_info); +/*! + * @function voucher_process_can_use_arbitrary_personas + * + * @abstract + * Returns true if the current process is able to use arbitrary personas + */ +SPI_AVAILABLE(macos(12.0), ios(15.0)) +OS_VOUCHER_EXPORT OS_WARN_RESULT +bool +voucher_process_can_use_arbitrary_personas(void); + /*! * @function voucher_copy_with_persona_mach_voucher * @@ -578,7 +621,7 @@ voucher_copy_with_persona_mach_voucher( * KERN_RESOURCE_SHORTAGE: mach voucher creation failed due to * lack of free space */ -API_AVAILABLE(macos(10.14), ios(12)) +SPI_AVAILABLE(macos(10.15), ios(12)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 kern_return_t mach_voucher_persona_self(mach_voucher_t *persona_mach_voucher); diff --git a/os/workgroup.h b/os/workgroup.h new file mode 100644 index 000000000..96b870c10 --- /dev/null +++ b/os/workgroup.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_WORKGROUP__ +#define __OS_WORKGROUP__ + +#ifndef __DISPATCH_BUILDING_DISPATCH__ +#ifndef __OS_WORKGROUP_INDIRECT__ +#define __OS_WORKGROUP_INDIRECT__ +#endif /* __OS_WORKGROUP_INDIRECT__ */ + +#include +#include +#include +#include + +#undef __OS_WORKGROUP_INDIRECT__ +#endif /* __DISPATCH_BUILDING_DISPATCH__ */ + +#endif /* __OS_WORKGROUP__ */ diff --git a/os/workgroup_base.h b/os/workgroup_base.h new file mode 100644 index 000000000..3983f002a --- /dev/null +++ b/os/workgroup_base.h @@ -0,0 +1,78 @@ +#ifndef __OS_WORKGROUP_BASE__ +#define __OS_WORKGROUP_BASE__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#endif + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#if __has_feature(assume_nonnull) +#define OS_WORKGROUP_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") +#define OS_WORKGROUP_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") +#else +#define OS_WORKGROUP_ASSUME_NONNULL_BEGIN +#define OS_WORKGROUP_ASSUME_NONNULL_END +#endif +#define OS_WORKGROUP_WARN_RESULT __attribute__((__warn_unused_result__)) +#define OS_WORKGROUP_EXPORT OS_EXPORT +#define OS_WORKGROUP_RETURNS_RETAINED OS_OBJECT_RETURNS_RETAINED + +#define OS_WORKGROUP_DECL(name, swift_name) \ + OS_SWIFT_NAME(swift_name) \ + OS_OBJECT_SHOW_CLASS(name, OS_OBJECT_CLASS(object)) + +#if OS_OBJECT_USE_OBJC +#define OS_WORKGROUP_SUBCLASS_DECL_PROTO(name, swift_name, ...) \ + OS_SWIFT_NAME(swift_name) \ + OS_OBJECT_DECL_PROTOCOL(name ## __VA_ARGS__ ) +#else +#define OS_WORKGROUP_SUBCLASS_DECL_PROTO(name, swift_name, ...) +#endif + +#define OS_WORKGROUP_SUBCLASS_DECL(name, super, swift_name, ...) \ + OS_SWIFT_NAME(swift_name) \ + OS_OBJECT_SHOW_SUBCLASS(name, super, name, ## __VA_ARGS__) + +#if defined(__LP64__) +#define __OS_WORKGROUP_ATTR_SIZE__ 60 +#define __OS_WORKGROUP_INTERVAL_DATA_SIZE__ 56 +#define __OS_WORKGROUP_JOIN_TOKEN_SIZE__ 36 +#else +#define __OS_WORKGROUP_ATTR_SIZE__ 60 +#define __OS_WORKGROUP_INTERVAL_DATA_SIZE__ 56 +#define __OS_WORKGROUP_JOIN_TOKEN_SIZE__ 28 +#endif + +#define _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT 0x2FA863B4 +#define _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT 0x2FA863C4 + +struct OS_REFINED_FOR_SWIFT os_workgroup_attr_opaque_s { + uint32_t sig; + char opaque[__OS_WORKGROUP_ATTR_SIZE__]; +}; + +#define _OS_WORKGROUP_INTERVAL_DATA_SIG_INIT 0x52A74C4D +struct OS_REFINED_FOR_SWIFT os_workgroup_interval_data_opaque_s { + uint32_t sig; + char opaque[__OS_WORKGROUP_INTERVAL_DATA_SIZE__]; +}; + +struct OS_REFINED_FOR_SWIFT os_workgroup_join_token_opaque_s { + uint32_t sig; + char opaque[__OS_WORKGROUP_JOIN_TOKEN_SIZE__]; +}; + +#endif /* __OS_WORKGROUP_BASE__ */ diff --git a/os/workgroup_interval.h b/os/workgroup_interval.h new file mode 100644 index 000000000..b056f82cf --- /dev/null +++ b/os/workgroup_interval.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_WORKGROUP_INTERVAL__ +#define __OS_WORKGROUP_INTERVAL__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/*! + * @typedef os_workgroup_interval_t + * + * @abstract + * A subclass of an os_workgroup_t for tracking work performed as part of + * a repeating interval-driven workload. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_interval_s *os_workgroup_interval_t; +#else +OS_WORKGROUP_SUBCLASS_DECL_PROTO(os_workgroup_interval, Repeatable); +OS_WORKGROUP_SUBCLASS_DECL(os_workgroup_interval, os_workgroup, WorkGroupInterval); +#endif + +/* During the first instance of this API, the only supported interval + * workgroups are for audio workloads. Please refer to the AudioToolbox + * framework for more information. + */ + +/* + * @typedef os_workgroup_interval_data, os_workgroup_interval_data_t + * + * @abstract + * An opaque structure containing additional configuration for the workgroup + * interval. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) +typedef struct os_workgroup_interval_data_s os_workgroup_interval_data_s; +typedef struct os_workgroup_interval_data_s *os_workgroup_interval_data_t; +#else +typedef struct os_workgroup_interval_data_opaque_s os_workgroup_interval_data_s; +typedef struct os_workgroup_interval_data_opaque_s *os_workgroup_interval_data_t; +#endif +#define OS_WORKGROUP_INTERVAL_DATA_INITIALIZER \ + { .sig = _OS_WORKGROUP_INTERVAL_DATA_SIG_INIT } + +/*! + * @function os_workgroup_interval_start + * + * @abstract + * Indicates to the system that the member threads of this + * os_workgroup_interval_t have begun working on an instance of the repeatable + * interval workload with the specified timestamps. This function is real time + * safe. + * + * This function will set and return an errno in the following cases: + * + * - The current thread is not a member of the os_workgroup_interval_t + * - The os_workgroup_interval_t has been cancelled + * - The timestamps passed in are malformed + * - os_workgroup_interval_start() was previously called on the + * os_workgroup_interval_t without an intervening os_workgroup_interval_finish() + * - A concurrent workgroup interval configuration operation is taking place. + * + * @param start + * Start timestamp specified in the os_clockid_t with which the + * os_workgroup_interval_t was created. This is generally a time in the past and + * indicates when the workgroup started working on an interval period + * + * @param deadline + * Deadline timestamp specified in the os_clockid_t with which the + * os_workgroup_interval_t was created. This specifies the deadline which the + * interval period would like to meet. + * + * @param data + * This field is currently unused and should be NULL + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_interval_start(os_workgroup_interval_t wg, uint64_t start, uint64_t + deadline, os_workgroup_interval_data_t _Nullable data); + +/*! + * @function os_workgroup_interval_update + * + * @abstract + * Updates an already started interval workgroup to have the new + * deadline specified. This function is real time safe. + * + * This function will return an error in the following cases: + * - The current thread is not a member of the os_workgroup_interval_t + * - The os_workgroup_interval_t has been cancelled + * - The timestamp passed in is malformed + * - os_workgroup_interval_start() was not previously called on the + * os_workgroup_interval_t or was already matched with an + * os_workgroup_interval_finish() + * - A concurrent workgroup interval configuration operation is taking place + * + * @param deadline + * Timestamp specified in the os_clockid_t with + * which the os_workgroup_interval_t was created. + * + * @param data + * This field is currently unused and should be NULL + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_interval_update(os_workgroup_interval_t wg, uint64_t deadline, + os_workgroup_interval_data_t _Nullable data); + +/*! + * @function os_workgroup_interval_finish + * + * @abstract + * Indicates to the system that the member threads of + * this os_workgroup_interval_t have finished working on the current instance + * of the interval workload. This function is real time safe. + * + * This function will return an error in the following cases: + * - The current thread is not a member of the os_workgroup_interval_t + * - os_workgroup_interval_start() was not previously called on the + * os_workgroup_interval_t or was already matched with an + * os_workgroup_interval_finish() + * - A concurrent workgroup interval configuration operation is taking place. + * + * @param data + * This field is currently unused and should be NULL + * + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_interval_finish(os_workgroup_interval_t wg, + os_workgroup_interval_data_t _Nullable data); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_WORKGROUP_INTERVAL__ */ diff --git a/os/workgroup_interval_private.h b/os/workgroup_interval_private.h new file mode 100644 index 000000000..48ddc7301 --- /dev/null +++ b/os/workgroup_interval_private.h @@ -0,0 +1,188 @@ +#ifndef __OS_WORKGROUP_INTERVAL_PRIVATE__ +#define __OS_WORKGROUP_INTERVAL_PRIVATE__ + +#ifndef __OS_WORKGROUP_PRIVATE_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/* + * @typedef os_workgroup_interval_type_t + * + * @abstract + * Describes a specialized os_workgroup_interval type the client would like to + * create. + * + * Clients need the 'com.apple.private.kernel.work-interval' entitlement to + * create all workgroups types listed below except the following: + * + * OS_WORKGROUP_INTERVAL_TYPE_DEFAULT, + * OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT, + * OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT, + * + * Note that only real time threads are allowed to join workgroups of type + * OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT and + * OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO. + */ +OS_ENUM(os_workgroup_interval_type, uint16_t, + OS_WORKGROUP_INTERVAL_TYPE_DEFAULT = 0x1, + OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT, + OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT, + + OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO, + OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION, + OS_WORKGROUP_INTERVAL_TYPE_CA_RENDER_SERVER, + OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY, + OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA, + + OS_WORKGROUP_INTERVAL_TYPE_ARKIT, +); + +/* + * @function os_workgroup_attr_set_interval_type + * + * @abstract + * Specifies that the os_workgroup_interval_t to be created should be of a + * specialized type. These types should only be specified when creating an + * os_workgroup_interval_t using the os_workgroup_interval_create or + * os_workgroup_interval_create_with_workload_id APIs - using it with any other + * workgroup creation API will result in an error at creation time. + * + * When used with os_workgroup_interval_create_with_workload_id, the type + * specified via this attribute must match the one configured by the system for + * the provided workload identifier (if that identifier is known). + * + * Setting type OS_WORKGROUP_INTERVAL_TYPE_DEFAULT on an os_workgroup_interval_t + * is a no-op. + * + * EINVAL is returned if the attribute passed in hasn't been initialized. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT +int +os_workgroup_attr_set_interval_type(os_workgroup_attr_t attr, + os_workgroup_interval_type_t type); + +/* + * @function os_workgroup_interval_create + * + * @abstract + * Creates an os_workgroup_interval_t with the specified name and attributes. + * This object tracks a repeatable workload characterized by a start time, end + * time and targeted deadline. Example use cases include audio and graphics + * rendering workloads. + * + * A newly created os_workgroup_interval_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_interval_t + * implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param clockid + * The clockid in which timestamps passed to the os_workgroup_interval_start() + * and os_workgroup_interval_update() functions are specified. + * + * @param attrs + * The requested set of os_workgroup_t attributes. NULL is to be specified for + * the default set of attributes. By default, an interval workgroup + * is nonpropagating with asynchronous work and differentiated from other threads + * in the process (see os_workgroup_attr_flags_t). + * + * The OS_WORKGROUP_ATTR_UNDIFFERENTIATED attribute is invalid to specify for + * interval workgroups. If it isn't or if invalid attributes are specified, this + * function returns NULL and sets errno. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_interval_t _Nullable +os_workgroup_interval_create(const char * _Nullable name, os_clockid_t clock, + os_workgroup_attr_t _Nullable attr); + +/* + * @function os_workgroup_interval_create_with_workload_id + * + * @abstract + * Creates an os_workgroup_interval_t with the specified name and workload + * identifier. + * This object tracks a repeatable workload characterized by a start time, end + * time and targeted deadline. Example use cases include audio and graphics + * rendering workloads. + * + * The newly created os_workgroup_interval_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_interval_t + * implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param workload_id + * A system-defined workload identifier string determining the configuration + * parameters to apply to the workgroup and its member threads. + * Must not be NULL. + * If the specified identifier is known, it must refer to a workload configured + * as being of interval type, or this function will return NULL. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wga` arguments. + * + * @param clockid + * The clockid in which timestamps passed to the os_workgroup_interval_start() + * and os_workgroup_interval_update() functions are specified. + * + * @param wga + * The requested set of os_workgroup_t attributes. NULL is to be specified for + * the default set of attributes. By default, a workgroup created with workload + * identifier is nonpropagating with asynchronous work and differentiated from + * other threads in the process (see os_workgroup_attr_flags_t). + * The interval type specified by the attributes will be used as a fallback in + * case the provided workload identifier is unknown. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wga` arguments. + * + * @discussion + * Rules used for resolution of configuration parameters potentially specified + * by both workload identifier and attributes, applied in order: + * - If the provided attributes are NULL or equal to the default set of + * attributes, no parameters are considered to be explicitly specified via + * attribute. + * - If the provided workload identifier is known, and the provided attributes + * explicitly specify a parameter that is also configured by the identifier, + * the two parameter values must match or this function will fail and return + * an error. + * - If the provided workload identifier is known, the parameters configured by + * the identifier will be used. + * - If the provided workload identifier is unknown, the parameters specified + * via the provided attributes will be used as a fallback. + * - If a given parameter is neither configured by a known workload identifier + * or explicitly specified via an attribute, a system-dependent fallback + * value will be used. + * + * @result + * The newly created workgroup object, or NULL if invalid arguments were + * specified (in which case errno is also set). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_interval_t _Nullable +os_workgroup_interval_create_with_workload_id(const char * _Nullable name, + const char *workload_id, os_clockid_t clock, + os_workgroup_attr_t _Nullable attr); + +/* This SPI is for use by Audio Toolbox only. This function returns a reference + * which is the responsibility of the caller to manage. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t +os_workgroup_interval_copy_current_4AudioToolbox(void); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS +#endif /* __OS_WORKGROUP_INTERVAL_PRIVATE__ */ diff --git a/os/workgroup_object.h b/os/workgroup_object.h new file mode 100644 index 000000000..5c8bd4f1a --- /dev/null +++ b/os/workgroup_object.h @@ -0,0 +1,371 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_WORKGROUP_OBJECT__ +#define __OS_WORKGROUP_OBJECT__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/*! + * @typedef os_workgroup_t + * + * @abstract + * A reference counted os object representing a workload that needs to + * be distinctly recognized and tracked by the system. The workgroup + * tracks a collection of threads all working cooperatively. An os_workgroup + * object - when not an instance of a specific os_workgroup_t subclass - + * represents a generic workload and makes no assumptions about the kind of + * work done. + * + * @discussion + * Threads can explicitly join an os_workgroup_t to mark themselves as + * participants in the workload. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_s *os_workgroup_t; +#else +OS_WORKGROUP_DECL(os_workgroup, WorkGroup); +#endif + + +/* Attribute creation and specification */ + +/*! + * @typedef os_workgroup_attr_t + * + * @abstract + * Pointer to an opaque structure for describing attributes that can be + * configured on a workgroup at creation. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_attr_s os_workgroup_attr_s; +typedef struct os_workgroup_attr_s *os_workgroup_attr_t; +#else +typedef struct os_workgroup_attr_opaque_s os_workgroup_attr_s; +typedef struct os_workgroup_attr_opaque_s *os_workgroup_attr_t; +#endif + +/* os_workgroup_t attributes need to be initialized before use. This initializer + * allows you to create a workgroup with the system default attributes. */ +#define OS_WORKGROUP_ATTR_INITIALIZER_DEFAULT \ + { .sig = _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT } + + + +/* The main use of the workgroup API is through instantiations of the concrete + * subclasses - please refer to os/workgroup_interval.h and + * os/workgroup_parallel.h for more information on creating workgroups. + * + * The functions below operate on all subclasses of os_workgroup_t. + */ + +/*! + * @function os_workgroup_copy_port + * + * @abstract + * Returns a reference to a send right representing this workgroup that is to be + * sent to other processes. This port is to be passed to + * os_workgroup_create_with_port() to create a workgroup object. + * + * It is the client's responsibility to release the send right reference. + * + * If an error is encountered, errno is set and returned. + */ +API_AVAILABLE(macos(10.16)) +SPI_AVAILABLE(ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_copy_port(os_workgroup_t wg, mach_port_t *mach_port_out); + +/*! + * @function os_workgroup_create_with_port + * + * @abstract + * Create an os_workgroup_t object from a send right returned by a previous + * call to os_workgroup_copy_port, potentially in a different process. + * + * A newly created os_workgroup_t has no initial member threads - in particular + * the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param mach_port + * The send right to create the workgroup from. No reference is consumed + * on the specified send right. + */ +API_AVAILABLE(macos(10.16)) +SPI_AVAILABLE(ios(14.0), tvos(14.0), watchos(7.0)) +OS_SWIFT_NAME(WorkGroup.init(__name:port:)) OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_port(const char *_Nullable name, mach_port_t mach_port); + +/*! + * @function os_workgroup_create_with_workgroup + * + * @abstract + * Create a new os_workgroup object from an existing os_workgroup. + * + * The newly created os_workgroup has no initial member threads - in particular + * the creating threaad does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param wg + * The existing workgroup to create a new workgroup object from. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_workgroup(const char * _Nullable name, os_workgroup_t wg); + +/*! + * @typedef os_workgroup_join_token, os_workgroup_join_token_t + * + * @abstract + * An opaque join token which the client needs to pass to os_workgroup_join + * and os_workgroup_leave + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_join_token_s os_workgroup_join_token_s; +typedef struct os_workgroup_join_token_s *os_workgroup_join_token_t; +#else +OS_REFINED_FOR_SWIFT +typedef struct os_workgroup_join_token_opaque_s os_workgroup_join_token_s; +OS_REFINED_FOR_SWIFT +typedef struct os_workgroup_join_token_opaque_s *os_workgroup_join_token_t; +#endif + + +/*! + * @function os_workgroup_join + * + * @abstract + * Joins the current thread to the specified workgroup and populates the join + * token that has been passed in. This API is real-time safe. + * + * @param wg + * The workgroup that the current thread would like to join + * + * @param token_out + * Pointer to a client allocated struct which the function will populate + * with the join token. This token must be passed in by the thread when it calls + * os_workgroup_leave(). + * + * Errors will be returned in the following cases: + * + * EALREADY The thread is already part of a workgroup that the specified + * workgroup does not nest with + * EINVAL The workgroup has been cancelled + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_join(os_workgroup_t wg, os_workgroup_join_token_t token_out); + +/*! + * @function os_workgroup_leave + * + * @abstract + * This removes the current thread from a workgroup it has previously + * joined. Threads must leave all workgroups in the reverse order that they + * have joined them. Failing to do so before exiting will result in undefined + * behavior. + * + * If the join token is malformed, the process will be aborted. + * + * This API is real time safe. + * + * @param wg + * The workgroup that the current thread would like to leave. + * + * @param token + * This is the join token populated by the most recent call to + * os_workgroup_join(). + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +void +os_workgroup_leave(os_workgroup_t wg, os_workgroup_join_token_t token); + +/* Working Arena index of a thread in a workgroup */ +typedef uint32_t os_workgroup_index; +/* Destructor for Working Arena */ +typedef void (*os_workgroup_working_arena_destructor_t)(void * _Nullable); + +/*! + * @function os_workgroup_set_working_arena + * + * @abstract + * Associates a client defined working arena with the workgroup. The arena + * is local to the workgroup object in the process. This is intended for + * distributing a manually managed memory allocation between member threads + * of the workgroup. + * + * This function can be called multiple times and the client specified + * destructor will be called on the previously assigned arena, if any. This + * function can only be called when no threads have currently joined the + * workgroup and all workloops associated with the workgroup are idle. + * + * @param wg + * The workgroup to associate the working arena with + * + * @param arena + * The client managed arena to associate with the workgroup. This value can + * be NULL. + * + * @param max_workers + * The maximum number of threads that will ever query the workgroup for the + * arena and request an index into it. If the arena is not used to partition + * work amongst member threads, then this field can be 0. + * + * @param destructor + * A destructor to call on the previously assigned working arena, if any + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_set_working_arena(os_workgroup_t wg, void * _Nullable arena, + uint32_t max_workers, os_workgroup_working_arena_destructor_t destructor); + +/*! + * @function os_workgroup_get_working_arena + * + * @abstract + * Returns the working arena associated with the workgroup and the current + * thread's index in the workgroup. This function can only be called by a member + * of the workgroup. Multiple calls to this API by a member thread will return + * the same arena and index until the thread leaves the workgroup. + * + * For workloops with an associated workgroup, every work item on the workloop + * will receive the same index in the arena. + * + * This method returns NULL if no arena is set on the workgroup. The index + * returned by this function is zero-based and is namespaced per workgroup + * object in the process. The indices provided are strictly monotonic and never + * reused until a future call to os_workgroup_set_working_arena. + * + * @param wg + * The workgroup to get the working arena from. + * + * @param index_out + * A pointer to a os_workgroup_index which will be populated by the caller's + * index in the workgroup. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +void * _Nullable +os_workgroup_get_working_arena(os_workgroup_t wg, + os_workgroup_index * _Nullable index_out); + +/*! + * @function os_workgroup_cancel + * + * @abstract + * This API invalidates a workgroup and indicates to the system that the + * workload is no longer relevant to the caller. + * + * No new work should be initiated for a cancelled workgroup and + * work that is already underway should periodically check for + * cancellation with os_workgroup_testcancel and initiate cleanup if needed. + * + * Threads currently in the workgroup continue to be tracked together but no + * new threads may join this workgroup - the only possible operation allowed is + * to leave the workgroup. Other actions may have undefined behavior or + * otherwise fail. + * + * This API is idempotent. Cancellation is local to the workgroup object + * it is called on and does not affect other workgroups. + * + * @param wg + * The workgroup that that the thread would like to cancel + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +void +os_workgroup_cancel(os_workgroup_t wg); + +/*! + * @function os_workgroup_testcancel + * + * @abstract + * Returns true if the workgroup object has been cancelled. See also + * os_workgroup_cancel + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +bool +os_workgroup_testcancel(os_workgroup_t wg); + +/*! + * @typedef os_workgroup_max_parallel_threads_attr_t + * + * @abstract + * A pointer to a structure describing the set of properties of a workgroup to + * override with the explicitly specified values in the structure. + * + * See also os_workgroup_max_parallel_threads. + */ +OS_REFINED_FOR_SWIFT +typedef struct os_workgroup_max_parallel_threads_attr_s os_workgroup_mpt_attr_s; +OS_REFINED_FOR_SWIFT +typedef struct os_workgroup_max_parallel_threads_attr_s *os_workgroup_mpt_attr_t; + +/*! + * @function os_workgroup_max_parallel_threads + * + * @abstract + * Returns the system's recommendation for maximum number of threads the client + * should make for a multi-threaded workload in a given workgroup. + * + * This API takes into consideration the current hardware the code is running on + * and the attributes of the workgroup. It does not take into consideration the + * current load of the system and therefore always provides the most optimal + * recommendation for the workload. + * + * @param wg + * The workgroup in which the multi-threaded workload will be performed in. The + * threads performing the multi-threaded workload are expected to join this + * workgroup. + * + * @param attr + * This value is currently unused and should be NULL. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +int +os_workgroup_max_parallel_threads(os_workgroup_t wg, os_workgroup_mpt_attr_t + _Nullable attr); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_WORKGROUP_OBJECT__ */ diff --git a/os/workgroup_object_private.h b/os/workgroup_object_private.h new file mode 100644 index 000000000..ec7ebee71 --- /dev/null +++ b/os/workgroup_object_private.h @@ -0,0 +1,285 @@ +#ifndef __OS_WORKGROUP_OBJECT_PRIVATE__ +#define __OS_WORKGROUP_OBJECT_PRIVATE__ + +#ifndef __OS_WORKGROUP_PRIVATE_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +#include + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/* Attribute creation and specification */ + +/* This is for clients who want to build their own workgroup attribute from + * scratch instead of configuring their attributes on top of the default set of + * attributes */ +#define OS_WORKGROUP_ATTR_INITIALIZER_EMPTY { .sig = _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT } + +/*! + * @enum os_workgroup_attr_flags_t + * + * @abstract A bitfield of flags describing options for workgroup configuration + */ +OS_ENUM(os_workgroup_attr_flags, uint32_t, + /*! + * @const OS_WORKGROUP_ATTR_NONPROPAGATING + * + * Asynchronous work initiated by threads which are members of a + * workgroup with OS_WORKGROUP_ATTR_NONPROPAGATING attribute, will not + * automatically be tracked as part of the workgroup. This applies to work + * initiated by calls such as dispatch_async() that may propagate other + * execution context properties. + * + * os_workgroups which are propagating by default can opt out this behavior + * by specifying the OS_WORKGROUP_ATTR_NONPROPAGATING flag. + */ + OS_WORKGROUP_ATTR_NONPROPAGATING = (1 << 1), + + /*! + * @const OS_WORKGROUP_ATTR_UNDIFFERENTIATED + * + * Member threads of a workgroup with the attribute flag + * OS_WORKGROUP_ATTR_UNDIFFERENTIATED are tracked and measured together with + * other threads in their process by the system for scheduling and + * performance control. + * + * os_workgroups which are tracked separately from other threads in + * the process by default, can opt out of it by specifying the + * OS_WORKGROUP_ATTR_UNDIFFERENTIATED flag. + */ + OS_WORKGROUP_ATTR_UNDIFFERENTIATED = (1 << 2) +); + +/*! + * @function os_workgroup_attr_set_flags + * + * @abstract + * Sets the user specified flags in the workgroup attribute. If invalid + * attributes are specified, this function will set and return an error. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_attr_set_flags(os_workgroup_attr_t wga, + os_workgroup_attr_flags_t flags); + +/*! + * @function os_workgroup_create + * + * @abstract + * Creates an os_workgroup_t with the specified name and attributes. + * A newly created os_workgroup_t has no initial member threads - in particular + * the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param wga + * The requested set of os_workgroup_t attributes. NULL is to be specified for + * the default set of attributes. A workgroup with default attributes is + * propagating with asynchronous work and differentiated from other threads in + * the process (see os_workgroup_attr_flags_t). + * + * The attribute flag OS_WORKGROUP_ATTR_NONPROPAGATING MUST currently be + * specified. If it isn't or if invalid attributes are specified, this function + * will return NULL and set an errno. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create(const char * _Nullable name, + os_workgroup_attr_t _Nullable wga); + +/*! + * @function os_workgroup_create_with_workload_id + * + * @abstract + * Creates an os_workgroup_t with the specified name and workload identifier. + * + * The newly created os_workgroup_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param workload_id + * A system-defined workload identifier string determining the configuration + * parameters to apply to the workgroup and its member threads. + * Must not be NULL. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wga` arguments. + * + * @param wga + * The requested set of os_workgroup_t attributes. NULL is to be specified for + * the default set of attributes. By default, a workgroup created with workload + * identifier is nonpropagating with asynchronous work and differentiated from + * other threads in the process (see os_workgroup_attr_flags_t). + * Currently NULL or the default set of attributes are the only valid + * attributes for this function. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wga` arguments. + * + * @discussion + * Rules used for resolution of configuration parameters potentially specified + * by both workload identifier and attributes, applied in order: + * - If the provided attributes are NULL or equal to the default set of + * attributes, no parameters are considered to be explicitly specified via + * attribute. + * - If the provided workload identifier is known, and the provided attributes + * explicitly specify a parameter that is also configured by the identifier, + * the two parameter values must match or this function will fail and return + * an error. + * - If the provided workload identifier is known, the parameters configured by + * the identifier will be used. + * - If the provided workload identifier is unknown, the parameters specified + * via the provided attributes will be used as a fallback. + * - If a given parameter is neither configured by a known workload identifier + * or explicitly specified via an attribute, a system-dependent fallback + * value will be used. + * + * @result + * The newly created workgroup object, or NULL if invalid arguments were + * specified (in which case errno is also set). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_workload_id(const char * _Nullable name, + const char *workload_id, os_workgroup_attr_t _Nullable wga); + +/*! + * @function os_workgroup_create_with_workload_id_and_port + * + * @abstract + * Create an os_workgroup_t object with the specified name and workload + * identifier from a send right returned by a previous call to + * os_workgroup_copy_port, potentially in a different process. + * + * The newly created os_workgroup_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param workload_id + * A system-defined workload identifier string determining the configuration + * parameters to apply to the workgroup and its member threads. + * Must not be NULL. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `mach_port` arguments. + * + * @param mach_port + * The send right to create the workgroup from. No reference is consumed + * on the specified send right. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `mach_port` arguments. + * + * @discussion + * Rules used for resolution of configuration parameters potentially specified + * by both workload identifier and send right, applied in order: + * - If the provided workload identifier is known, and the provided send right + * references a workgroup that was created with a parameter that is also + * configured by the identifier, the parameter value configured by the + * identifier will be used. For certain parameters such as the kernel + * work_interval type underlying a workgroup interval type, it is required + * that the two parameter values must match, or this function will fail and + * return an error. + * - If the provided workload identifier is known, the parameters configured by + * the identifier will be used. + * - If the provided workload identifier is unknown, the parameters used to + * create the workgroup referenced by the provided send right are used. + * - If a given parameter is neither configured by a known workload identifier + * or was used to create the workgroup referenced by the provided send right, + * a system-dependent fallback value will be used. + * + * @result + * The newly created workgroup object, or NULL if invalid arguments were + * specified (in which case errno is also set). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_workload_id_and_port(const char * _Nullable name, + const char *workload_id, mach_port_t mach_port); + +/*! + * @function os_workgroup_create_with_workload_id_and_workgroup + * + * @abstract + * Create a new os_workgroup object with the specified name and workload + * identifier from an existing os_workgroup. + * + * The newly created os_workgroup_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param workload_id + * A system-defined workload identifier string determining the configuration + * parameters to apply to the workgroup and its member threads. + * Must not be NULL. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wg` arguments. + * + * @param wg + * The existing workgroup to create a new workgroup object from. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wg` arguments. + * + * @discussion + * Rules used for resolution of configuration parameters potentially specified + * by both workload identifier and existing workgroup, applied in order: + * - If the provided workload identifier is known, and the provided workgroup + * was created with a parameter that is also configured by the identifier, + * the parameter value configured by the identifier will be used. For certain + * parameters such as the kernel work_interval type underlying a workgroup + * interval type, it is required that the two parameter values must match, or + * this function will fail and return an error. + * - If the provided workload identifier is known, the parameters configured by + * the identifier will be used. + * - If the provided workload identifier is unknown, the parameters used to + * create the provided workgroup will be used. + * - If a given parameter is neither configured by a known workload identifier + * or was used to create the provided workgroup, a system-dependent fallback + * value will be used. + * + * @result + * The newly created workgroup object, or NULL if invalid arguments were + * specified (in which case errno is also set). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_workload_id_and_workgroup(const char * _Nullable name, + const char *workload_id, os_workgroup_t wg); + +/* To be deprecated once coreaudio adopts */ +#define OS_WORKGROUP_ATTR_INITIALIZER OS_WORKGROUP_ATTR_INITIALIZER_DEFAULT + +typedef uint32_t os_workgroup_index; + +/* Deprecated in favor of os_workgroup_join */ +OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_join_self(os_workgroup_t wg, os_workgroup_join_token_t token_out, + os_workgroup_index *_Nullable id_out); + +/* Deprecated in favor of os_workgroup_leave */ +OS_WORKGROUP_EXPORT +void +os_workgroup_leave_self(os_workgroup_t wg, os_workgroup_join_token_t token); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_WORKGROUP_OBJECT__ */ diff --git a/os/workgroup_parallel.h b/os/workgroup_parallel.h new file mode 100644 index 000000000..2aca7f861 --- /dev/null +++ b/os/workgroup_parallel.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_WORKGROUP_PARALLEL__ +#define __OS_WORKGROUP_PARALLEL__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +#include + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/*! + * @typedef os_workgroup_parallel_t + * + * @abstract + * A subclass of an os_workgroup_t for tracking parallel work. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_s *os_workgroup_parallel_t; +#else +OS_WORKGROUP_SUBCLASS_DECL_PROTO(os_workgroup_parallel, Parallelizable); +OS_WORKGROUP_SUBCLASS_DECL(os_workgroup_parallel, os_workgroup, WorkGroupParallel); +#endif + +/*! + * @function os_workgroup_parallel_create + * + * @abstract + * Creates an os_workgroup_t which tracks a parallel workload. + * A newly created os_workgroup_interval_t has no initial member threads - + * in particular the creating thread does not join the os_workgroup_parallel_t + * implicitly. + * + * See also os_workgroup_max_parallel_threads(). + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param attr + * The requested set of workgroup attributes. NULL is to be specified for the + * default set of attributes. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +OS_SWIFT_NAME(WorkGroupParallel.init(__name:attr:)) +os_workgroup_parallel_t _Nullable +os_workgroup_parallel_create(const char * _Nullable name, + os_workgroup_attr_t _Nullable attr); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_WORKGROUP_PARALLEL__ */ diff --git a/os/workgroup_private.h b/os/workgroup_private.h new file mode 100644 index 000000000..255fd5079 --- /dev/null +++ b/os/workgroup_private.h @@ -0,0 +1,17 @@ +#ifndef __OS_WORKGROUP_PRIVATE__ +#define __OS_WORKGROUP_PRIVATE__ + +#ifndef __DISPATCH_BUILDING_DISPATCH__ + +#ifndef __OS_WORKGROUP_PRIVATE_INDIRECT__ +#define __OS_WORKGROUP_PRIVATE_INDIRECT__ +#endif /* __OS_WORKGROUP_PRIVATE_INDIRECT__ */ + +#include +#include +#include + +#undef __OS_WORKGROUP_PRIVATE_INDIRECT__ +#endif /* __DISPATCH_BUILDING_DISPATCH__ */ + +#endif /* __OS_WORKGROUP_PRIVATE__ */ diff --git a/private/apply_private.h b/private/apply_private.h new file mode 100644 index 000000000..195e5a4de --- /dev/null +++ b/private/apply_private.h @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_APPLY_PRIVATE__ +#define __DISPATCH_APPLY_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +DISPATCH_ASSUME_NONNULL_BEGIN +/*! + * @typedef dispatch_apply_attr_s dispatch_apply_attr_t + * + * @abstract + * Pointer to an opaque structure for describing the workload to be executed by + * dispatch_apply_with_attr. + * + * This struct must be initialized with dispatch_apply_attr_init before use + * and must not be copied once initialized. It must be destroyed with + * dispatch_apply_attr_destroy before going out of scope or being freed, to + * avoid leaking associated system resources. + */ +#define __DISPATCH_APPLY_ATTR_SIZE__ 64 + +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct dispatch_apply_attr_s dispatch_apply_attr_s; +typedef struct dispatch_apply_attr_s *dispatch_apply_attr_t; +#else +struct dispatch_apply_attr_opaque_s { + char opaque[__DISPATCH_APPLY_ATTR_SIZE__]; +}; +typedef struct dispatch_apply_attr_opaque_s dispatch_apply_attr_s; +typedef struct dispatch_apply_attr_opaque_s *dispatch_apply_attr_t; +#endif + +/*! + * @function dispatch_apply_attr_init, dispatch_apply_attr_destroy + * + * @abstract + * Initializer and destructor functions for the attribute structure. The + * attribute structure must be initialized before calling any setters on it. + * + * Every call to dispatch_apply_attr_init must be paired with a corresponding + * call to dispatch_apply_attr_destroy. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_apply_attr_init(dispatch_apply_attr_t attr); + +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_apply_attr_destroy(dispatch_apply_attr_t attr); + +/*! + * @enum dispatch_apply_attr_entity_t + * + * @abstract + * This enum describes an entity in the hardware for which parallelism via + * dispatch_apply is being requested + */ +DISPATCH_ENUM(dispatch_apply_attr_entity, unsigned long, + DISPATCH_APPLY_ATTR_ENTITY_CPU = 1, + DISPATCH_APPLY_ATTR_ENTITY_CLUSTER = 2, +); + +/*! + * @function dispatch_apply_attr_set_parallelism + * + * @param attr + * The dispatch_apply attribute to be modified + * + * @param entity + * The named entity the requested configuration applies to. + * + * @param threads_per_entity + * The number of worker threads to be created per named entity on the system. + * + * @abstract + * Adds a request for the system to start enough worker threads such that + * threads_per_entity number of threads will share each named entity. The + * system will make a best effort to spread such worker threads evenly + * across the available entity. + * + * @notes + * At the present time, the only supported value of threads_per_entity is 1. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT +void +dispatch_apply_attr_set_parallelism(dispatch_apply_attr_t attr, + dispatch_apply_attr_entity_t entity, size_t threads_per_entity); + +/*! + * @typedef dispatch_apply_attr_query_flags_t + * + * @abstract + * Flags that affect calls to dispatch_apply_attr_query(). + * + * @const DISPATCH_APPLY_ATTR_QUERY_FLAGS_MAX_CURRENT_SCOPE + * Modifies DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS so that it takes into + * account the current execution context. This may produce a tighter upper bound + * on the number of worker threads. If dispatch_apply_with_attr is called from + * the current execution context, it is guaranteed that the worker_index will + * not exceed the result of this query. However if the current execution context + * is changed (for example with dispatch or pthread functions) or the current + * scope is left, that guarantee will not hold. + */ +DISPATCH_ENUM(dispatch_apply_attr_query_flags, unsigned long, + DISPATCH_APPLY_ATTR_QUERY_FLAGS_MAX_CURRENT_SCOPE DISPATCH_ENUM_API_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) = 1, +); + +/*! + * @typedef dispatch_apply_attr_query_t + * + * @abstract + * Enumeration indicating question dispatch_apply_attr_query() should answer + * about its arguments. + * + * @const DISPATCH_APPLY_ATTR_QUERY_VALID + * Query if the properties requested by this attribute are invalid or + * unsatisfiable. For example, some properties may describe how the workload will + * use certain hardware resources. On machines which lack that hardware, an + * attribute with those properties may be invalid. + * Passing an invalid attribute to dispatch_apply_with_attr will have undefined + * behaviour. + * If the attribute is valid, the query returns 1. If it is not valid, the query + * returns 0. + * + * @const DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS + * Calculates an upper bound of how many parallel worker threads + * dispatch_apply_with_attr could create when running a workload with the + * specified attribute. This will include the thread calling + * dispatch_apply_with_attr as a worker. This is an upper bound; depending on + * conditions, such as the load of other work on the system and the execution + * context where dispatch_apply_with_attr is called, fewer parallel worker + * threads may actually be created. + * + * A good use of this query is to determine the size of a working arena + * (such as preallocated memory space or other resources) appropriate for the + * the maximum number of workers. This API can be used in coordination + * with the worker_index block argument in dispatch_apply_with_attr to provide + * each parallel worker thread with their own slice of the arena. + * + * @const DISPATCH_APPLY_ATTR_QUERY_LIKELY_WORKERS + * Calculates a good guess of how many parallel worker threads + * dispatch_apply_with_attr would likely create when running a workload with + * the specified attribute. This will include the thread calling + * dispatch_apply_with_attr as a worker. This is only a guess; depending on + * conditions, dispatch_apply_with_attr may actually create more or fewer + * parallel worker threads than this value. + * + * Compared to QUERY_MAXIMUM_WORKERS, this query tries to predict the behavior + * of dispatch_apply_with_attr more faithfully. The number of parallel worker + * threads to be used may be affected by aspects of the current execution context + * like the thread's QOS class, scheduling priority, queue hierarchy, and current + * workloop; as well as transitory aspects of the system like power state and + * computational loads from other tasks. For those reasons, repeating this query + * for the same attribute may produce a different result. + */ +DISPATCH_ENUM(dispatch_apply_attr_query, unsigned long, + DISPATCH_APPLY_ATTR_QUERY_VALID DISPATCH_ENUM_API_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) = 0, + DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS DISPATCH_ENUM_API_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) = 1, + DISPATCH_APPLY_ATTR_QUERY_LIKELY_WORKERS DISPATCH_ENUM_API_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) = 2, +); + +/*! + * @function dispatch_apply_attr_query + * + * @abstract + * Query how dispatch_apply_with_attr will respond to a certain attr, such + * as how the attr may affect its choice of how many parallel worker threads + * to use. + * + * @param attr + * The dispatch_apply attribute describing a workload + * + * @param which + * An enumeration value indicating which question this function should answer + * about its arguments. See dispatch_apply_attr_query_t for possible values and + * explanations. + * + * @param flags + * Flags for the query that describe factors beyond the workload (which + * is described by the attr). See dispatch_apply_attr_query_flags_t for + * valid values. Pass 0 if no flags are needed. + * + * @return + * Returns the numerical answer to the query. See dispatch_apply_attr_query_t. + * Most types of query return 0 if the properties requested by this attribute + * are invalid or unsatisfiable. (Exceptions will described in + * dispatch_apply_attr_query_t entries). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT +size_t +dispatch_apply_attr_query(dispatch_apply_attr_t attr, + dispatch_apply_attr_query_t which, + dispatch_apply_attr_query_flags_t flags); + +/*! + * @function dispatch_apply_with_attr + * + * * @abstract + * Submits a block for parallel invocation, with an attribute structure + * describing the workload. + * + * @discussion + * Submits a block for parallel invocation. The system will try to use worker + * threads that match the configuration of the current thread. The system will + * try to start an appropriate number of worker threads to maximimize + * throughput given the available hardware and current system conditions. An + * attribute structure that describes the nature of the workload may be passed. + * The system will use the attribute's properties to improve its scheduling + * choices, such as how many worker threads to create and how to distribute them + * across processors. + * + * This function waits for all invocations of the task block to complete before + * returning. + * + * Each invocation of the block will be passed 2 arguments: + * - the current index of iteration + * - the index of the worker thread invoking the block + * + * The worker index will be in the range [0, n) + * where n = dispatch_apply_attr_query(attr, DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS, 0) + * + * Worker threads may start in any order. Some worker indexes within the + * permissible range may not actually be used, depending on conditions. + * Generally, one worker thread will use one worker index, but this is not + * guaranteed; worker index MAY NOT match thread one-to-one. No assumptions + * should be made about which CPU a worker runs on. Two invocations of + * the block MAY have different worker indexes even if they run on the same + * thread or the same processor. However, two invocations of the block running + * at the same time WILL NEVER have the same worker index. + * + * When this API is called inside another dispatch_apply_with_attr or + * dispatch_apply, it will execute as a serial loop. + * + * @param iterations + * The number of iterations to perform. + * + * The choice of how to divide a large workload into a number of iterations can + * have substantial effects on the performance of executing that workload. + * If the number of iterations is very small, the system may not effectively + * spread and balance the work across the available hardware. As a rough + * guideline, the number of iterations should be at least three times the maximum + * worker index. On the other hand, a workload should not be finely divided into + * a huge number of iterations, each doing only a miniscule amount of work, since + * there is a small overhead cost of accounting and invocation for each iteration. + * + * @param attr + * The dispatch_apply_attr_t describing specialized properties of the workload. + * This value can be NULL. If non-NULL, the attribute must have been initialized + * with dispatch_apply_attr_init(). + * + * If the attribute requests properties that are invalid or meaningless on this + * system, the function will have undefined behaviour. This is a programming + * error. An attribute's validity can be checked with dispatch_apply_attr_query. + * + * @param block + * The block to be invoked the specified number of iterations. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT +void +dispatch_apply_with_attr(size_t iterations, dispatch_apply_attr_t _Nullable attr, + DISPATCH_NOESCAPE void (^block)(size_t iteration, size_t worker_index)); +#endif + +/*! + * @function dispatch_apply_with_attr_f + * + * * @abstract + * Submits a function for parallel invocation, with an attribute structure + * describing the workload. + * + * @discussion + * See dispatch_apply_with_attr() for details. + * + * @param iterations + * The number of iterations to perform. + * + * @param attr + * The dispatch_apply_attr_t describing specialized properties of the workload. + * This value can be NULL. If non-NULL, the attribute must have been initialized + * with dispatch_apply_attr_init(). + * + * If the attribute requests properties that are invalid or meaningless on this + * system, the function will have undefined behaviour. This is a programming + * error. An attribute's validity can be checked with dispatch_apply_attr_query. + * + * @param context + * The application-defined context parameter to pass to the function. + + * @param work + * The application-defined function to invoke on the specified queue. The first + * parameter passed to this function is the context provided to + * dispatch_apply_with_attr_f(). The second parameter passed to this function is + * the current index of iteration. The third parameter passed to this function is + * the index of the worker thread invoking the function. + * See dispatch_apply_with_attr() for details. + * The result of passing NULL in this parameter is undefined. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT +void +dispatch_apply_with_attr_f(size_t iterations, dispatch_apply_attr_t _Nullable attr, + void *_Nullable context, void (*work)(void *_Nullable context, size_t iteration, size_t worker_index)); + +DISPATCH_ASSUME_NONNULL_END + +__END_DECLS +#endif /* __DISPATCH_APPLY_PRIVATE__ */ diff --git a/private/channel_private.h b/private/channel_private.h new file mode 100644 index 000000000..9c2ecf626 --- /dev/null +++ b/private/channel_private.h @@ -0,0 +1,567 @@ +/* + * Copyright (c) 2017 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_CHANNEL_PRIVATE__ +#define __DISPATCH_CHANNEL_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +DISPATCH_ASSUME_NONNULL_BEGIN + +__BEGIN_DECLS + +#if DISPATCH_CHANNEL_SPI + +/*! + * @typedef dispatch_channel_t + * + * @abstract + */ +DISPATCH_DECL(dispatch_channel); + +typedef struct dispatch_channel_invoke_ctxt_s *dispatch_channel_invoke_ctxt_t; + +/*! @typedef dispatch_channel_callbacks_t + * + * @abstract + * Vtable used by dispatch channels (see dispatch_channel_create). + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +typedef struct dispatch_channel_callbacks_s { +#define DISPATCH_CHANNEL_CALLBACKS_VERSION 1ul + /*! @field dcc_version + * + * @abstract + * Version of the callbacks, used for binary compatibilty. + * This must be set to DISPATCH_CHANNEL_CALLBACKS_VERSION + */ + unsigned long dcc_version; + + /*! @field dcc_probe + * + * @abstract + * This callback is called when GCD is considering whether it should wakeup + * the channel. + * + * @discussion + * This function may be called from ANY context. It may be called + * concurrently from several threads, it may be called concurrently with + * a call to other channel callbacks. + * + * Reasons for this function to be called include: + * - the channel became non empty, + * - the channel is receiving a Quality of Service override to resolve + * a priority inversion, + * - dispatch_activate() or dispatch_resume() was called, + * - dispatch_channel_wakeup() was called. + * + * The implementation of this callback should be idempotent, and as cheap + * as possible, avoiding taking locks if possible. A typical implementation + * will perform a single atomic state look to determine what answer to + * return. Possible races or false positives can be later be debounced in + * dcc_invoke which is synchronized. + * + * Calling dispatch_channel_wakeup() from the context of this call is + * incorrect and will result in undefined behavior. Instead, it should be + * called in response to external events, in order to cause the channel to + * re-evaluate the `dcc_probe` hook. + * + * param channel + * The channel that is being probed. + * + * param context + * The context associated with the channel. + * + * returns + * - true if the dispatch channel can be woken up according to the other + * runtime rules + * + * - false if the dispatch channel would not be able to make progress if + * woken up. A subsequent explicit call to dispatch_channel_wakeup() will + * be required when this condition has changed. + */ + bool + (*_Nonnull dcc_probe)(dispatch_channel_t channel, void *_Nullable context); + + /*! @field dcc_invoke + * + * @abstract + * This callback is called when a dispatch channel is being drained. + * + * @discussion + * This callback is where the state machine for the channel can + * be implemented using dispatch_channel_foreach_work_item_peek() + * and dispatch_channel_drain(). + * + * Note that if this function returns true, it must have called + * dispatch_channel_drain() exactly once. It is valid not to call + * peek nor drain if false is returned. + * + * param channel + * The channel that has been invoked. + * + * param invoke_context + * An opaque data structure that must be passed back to + * dispatch_channel_foreach_work_item_peek() and dispatch_channel_drain(). + * + * param context + * The context associated with the channel. + * + * returns + * - true if the channel can drain further + * - false if an explicit call to dispatch_channel_wakeup() is required + * for the channel to be able to drain items again. A subsequent explicit + * call to dispatch_channel_wakeup() will be required when this condition + * has changed. + */ + bool + (*_Nonnull dcc_invoke)(dispatch_channel_t channel, + dispatch_channel_invoke_ctxt_t invoke_context, + void *_Nullable context); + + /*! @field dcc_acknowledge_cancel + * + * @abstract + * This optional callback is called when the channel has been cancelled + * until that cancellation is acknowledged. + * + * @discussion + * If this callback isn't set, the channel cancelation is implicit and can + * be tested with dispatch_channel_testcancel(). + * + * When this callback is set, it will be called as soon as cancelation has + * been noticed. When it is called, it is called from a context serialized + * with `dcc_invoke`, or from `dcc_invoke` itself. + * + * Returning `false` causes the dispatch channel to stop its invocation + * early. A subsequent explicit call to dispatch_channel_wakeup() will be + * required when the cancellation can be acknowledged. + * + * param channel + * The channel that has been invoked. + * + * param context + * The context associated with the channel. + * + * returns + * Whether the cancellation was acknowledged. + */ + bool + (*_Nullable dcc_acknowledge_cancel)(dispatch_channel_t channel, + void *_Nullable context); +} const *dispatch_channel_callbacks_t; + +/*! @function dispatch_channel_create + * + * @abstract + * Create a dispatch channel. + * + * @discussion + * A dispatch channel is similar to a dispatch serial queue, however it will + * accept arbitrary items into the queue, as well as regular dispatch blocks + * to execute. + * + * Unlike serial queues, this object cannot be targeted by other dispatch + * objects. + * + * Dispatch channels are created in an inactive state. After creating the + * channel and setting any desired property, a call must be made to + * dispatch_activate() in order to use the object. + * + * Calling dispatch_set_target_queue() on a channel after it has been activated + * is not allowed (see dispatch_activate() and dispatch_set_target_queue()). + * + * @param label + * A string label to attach to the channel. + * This parameter is optional and may be NULL. + * + * @param context + * A context to associated with the channel. It can be retrieved with + * dispatch_get_context() at any time, but should not mutated. + * + * @param target + * The target queue for the newly created channel. The target queue is retained. + * If this parameter is DISPATCH_TARGET_QUEUE_DEFAULT, sets the channel's target + * queue to the default target queue for the given channel type. + * + * @param callbacks + * Hooks for the created channel. + * + * @returns + * The newly created channel. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW DISPATCH_NONNULL4 +dispatch_channel_t +dispatch_channel_create(const char *_Nullable label, + dispatch_queue_t _Nullable target, + void *_Nullable context, dispatch_channel_callbacks_t callbacks); + +/*! @function dispatch_channel_wakeup + * + * @abstract + * Re-evaluate whether a dispatch channel needs to be woken up. + * + * @discussion + * Calling this function causes the GCD runtime to reevaluate whether + * the specified dispatch channel needs to be woken up. If a previous call to + * `dcc_probe`, `dcc_acknowledge_cancel` or `dcc_invoke` returned false then + * a channel may remain asleep until wakeup is called. + * + * It is valid to call this function from the context of any of the the `invoke` + * callbacks, but not from the `dcc_probe` callback. + * + * This function will have no effect if: + * - the dispatch channel is suspeneded, + * - the `dcc_probe` callback subsequently returns false, + * - the dispatch channel has no work items queued, nor a pending cancellation + * to acknowledge. + * + * @param channel + * The channel for which wakeup should be evaluated. + * + * @param qos_class + * The QoS override that should be applied to this channel because of this + * event. The override will persist until the channel has been drained of + * pending items. + * + * It is expected that most wakeups will not require an additional QoS + * override. In this case, passing QOS_CLASS_UNSPECIFIED indicates that no + * additional override should be applied as a result of this wakeup. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_wakeup(dispatch_channel_t channel, qos_class_t qos_class); + +/*! @typedef dispatch_channel_enumerator_handler_t + * + * Type of the callbacks used by dispatch_channel_foreach_work_item_peek_f(). + */ +typedef bool (*dispatch_channel_enumerator_handler_t)(void *_Nullable context, void *item); + +/*! @function dispatch_channel_foreach_work_item_peek_f + * + * @abstract + * Peek at opaque work items currently enqueued on the channel. + * + * @discussion + * This function will enumerate items enqueued on the channel, in order, until + * the first non-opaque work item is found. No work should be performed on + * behalf of the items enumerated. + * + * This function allows the caller to preflight items that will be processed + * when draining the channel (fex. counting items in order to pre-allocate + * storage, or batch items into groups). + * + * This function can only be called from the context of the `dcc_invoke` + * callback associated with this channel, and before any call to + * dispatch_channel_drain(). + * + * @param invoke_context + * The opaque invoke context passed to the channel `dcc_invoke` callback. + * + * @param context + * An application-defined context that will be passed to the handler. + * + * @param handler + * The handler that will be passed `context` and the opaque work item + * currently enumerated. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_foreach_work_item_peek_f( + dispatch_channel_invoke_ctxt_t invoke_context, + void *_Nullable context, dispatch_channel_enumerator_handler_t handler); + +#ifdef __BLOCKS__ +/*! @typedef dispatch_channel_enumerator_block_t + * + * Type of the callbacks used by dispatch_channel_foreach_work_item_peek(). + */ +typedef bool (^dispatch_channel_enumerator_block_t)(void *item); + +/*! @function dispatch_channel_foreach_work_item_peek_f + * + * @abstract + * Peek at the opaque work items currently enqueued on the channel. + * + * @discussion + * See dispatch_channel_foreach_work_item_peek_f() + * + * @param invoke_context + * The opaque invoke context passed to the channel `dcc_invoke` callback. + * + * @param block + * The block that will be passed the opaque work item currently enumerated. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_foreach_work_item_peek( + dispatch_channel_invoke_ctxt_t invoke_context, + dispatch_channel_enumerator_block_t block DISPATCH_NOESCAPE); +#endif + +/*! @typedef dispatch_channel_drain_handler_t + * + * @abstract + * Type of the callbacks used by dispatch_channel_drain_f(). + * + * @param context + * The application defined context passed to dispatch_channel_drain_f(). + * + * @param item + * The opaque work item to consume. + * + * @param rejected_item + * An out-parameter for an opaque work item to put back at the head of the + * queue. On return from this handler, if rejected_item is set then the handler + * must also return false (and, thus, interrupts the drain operation). + * + * @returns + * - true if the drain may enumerate the next item + * - false to cause dispatch_channel_drain_f() to return. + * in which case a rejected item can optionally be returned. + */ +typedef bool (*dispatch_channel_drain_handler_t)(void *_Nullable context, + void *_Nonnull item, void *_Nonnull *_Nullable rejected_item); + +/*! @function dispatch_channel_drain_f + * + * @abstract + * Drain the opaque work items enqueued on the channel. + * + * @discussion + * This function needs to be called by any `dcc_invoke` that returns true. + * + * Calling drain will cause every opaque work item that can be consumed to be + * passed to the handler. While the handler is called, the runtime environment + * matches the QOS and context captured at dispatch_channel_enqueue() time for + * this opaque work item. + * + * Note, this function can (through factors internal to the GCD runtime) can + * decide not to consume all items that are currently enqueued on the channel. + * Therefore it is possible for dispatch_channel_drain_f() to enumerate fewer + * items than dispatch_channel_foreach_work_item_peek_f() did when called + * immediately beforehand. + * + * It is also possible for dispatch_channel_drain_f() to observe *more* items + * than previously seen with peek, if enqueues are happening concurrently. + * + * Note that work items enqueued with dispatch_channel_async() act as + * "separators". If the opaque work item O1 is enqueued before a regular + * asynchronous work item A, and a new opaque work item O2 is then enqueued, + * then neither dispatch_channel_foreach_work_item_peek_f() nor + * dispatch_channel_drain_f() will ever return O1 and O2 as part of the same + * drain streak. + * + * @param invoke_context + * The opaque invoke context passed to the channel `dcc_invoke` callback. + * + * @param handler + * The handler that will be passed the context and opaque work item to invoke. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_drain_f(dispatch_channel_invoke_ctxt_t invoke_context, + void *_Nullable context, dispatch_channel_drain_handler_t handler); + +#ifdef __BLOCKS__ +/*! @typedef dispatch_channel_drain_block_t + * + * @abstract + * Type of the callbacks used by dispatch_channel_drain(). + * + * @description + * See dispatch_channel_drain_handler_t. + */ +typedef bool (^dispatch_channel_drain_block_t)(void *_Nonnull item, + void *_Nonnull *_Nullable rejected_item); + +/*! @function dispatch_channel_drain + * + * @abstract + * Drain the opaque work items enqueued on the channel. + * + * @discussion + * See dispatch_channel_drain_f() + * + * @param invoke_context + * The opaque invoke context passed to the channel `dcc_invoke` callback. + * + * @param block + * The block that will be passed the opaque work item to invoke. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_drain(dispatch_channel_invoke_ctxt_t invoke_context, + dispatch_channel_drain_block_t block DISPATCH_NOESCAPE); +#endif + +/*! + * @function dispatch_channel_cancel + * + * @abstract + * Asynchronously cancel the dispatch channel. + * + * @discussion + * Cancellation will cause the channel to repeatedly call the + * `dcc_acknowledge_cancel` handler until it returns true. This allows the + * associated state machine to handle cancellation asynchronously (and, if + * needed, in multiple phases). + * + * The precise semantics of cancellation are up to the dispatch channel + * associated state machine, and not all dispatch channels need to use + * cancellation. + * + * However, if the `dcc_acknowledge_cancel` callback is implemented, then an + * explicit call to dispatch_channel_cancel() is mandatory before the last + * reference to the dispatch channel is released. + * + * @param channel + * The dispatch channel to be canceled. + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_channel_cancel(dispatch_channel_t channel); + +/*! + * @function dispatch_channel_testcancel + * + * @abstract + * Tests whether the given dispatch channel has been canceled. + * + * @param channel + * The dispatch channel to be tested. + * The result of passing NULL in this parameter is undefined. + * + * @result + * Non-zero if canceled and zero if not canceled. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_NOTHROW +long +dispatch_channel_testcancel(dispatch_channel_t channel); + +/*! + * @function dispatch_channel_async + * + * @abstract + * Submits a block for asynchronous execution on a dispatch channel. + * + * @discussion + * See dispatch_async(). + * + * @param channel + * The target dispatch channel to which the block is submitted. + * The system will hold a reference on the target channel until the block + * has finished. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block to submit to the target dispatch channel. This function performs + * Block_copy() and Block_release() on behalf of callers. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_channel_async(dispatch_channel_t queue, dispatch_block_t block); +#endif + +/*! + * @function dispatch_channel_async_f + * + * @abstract + * Submits a function for asynchronous execution on a dispatch channel. + * + * @discussion + * See dispatch_async() for details. + * + * @param queue + * The target dispatch channel to which the function is submitted. + * The system will hold a reference on the target channel until the function + * has returned. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target channel. The first + * parameter passed to this function is the context provided to + * dispatch_channel_async_f(). + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_channel_async_f(dispatch_queue_t queue, + void *_Nullable context, dispatch_function_t work); + +/*! + * @function dispatch_channel_enqueue + * + * @abstract + * Enqueues an opaque work item for asynchronous dequeue on a dispatch channel. + * + * @discussion + * See dispatch_channel_async() for details. + * + * @param channel + * The target dispatch channel to which the work item is submitted. + * The system will hold a reference on the target channel until the work item + * is consumed. + * The result of passing NULL in this parameter is undefined. + * + * @param item + * The application-defined work item to enqueue on the target channel. + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_channel_enqueue(dispatch_channel_t channel, void *item); + +#endif // DISPATCH_CHANNEL_SPI + +__END_DECLS + +DISPATCH_ASSUME_NONNULL_END + +#endif diff --git a/private/mach_private.h b/private/mach_private.h index e311aee16..bed88c0bd 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -34,11 +34,10 @@ __BEGIN_DECLS -#if DISPATCH_MACH_SPI - -#define DISPATCH_MACH_SPI_VERSION 20161026 +#define DISPATCH_MACH_SPI_VERSION 20200229 #include +#include DISPATCH_ASSUME_NONNULL_BEGIN @@ -129,6 +128,10 @@ DISPATCH_DECL(dispatch_mach); * an asynchronous reply to a message previously sent to the channel. Used * only if the channel is disconnected while waiting for a reply to a message * sent with dispatch_mach_send_with_result_and_async_reply_4libxpc(). + * + * @const DISPATCH_MACH_NO_SENDERS + * Sent when a no senders requested with dispatch_mach_request_no_senders() has + * been received. See dispatch_mach_request_no_senders(). */ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_CONNECTED = 1, @@ -143,6 +146,7 @@ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_NEEDS_DEFERRED_SEND, DISPATCH_MACH_SIGTERM_RECEIVED, DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED, + DISPATCH_MACH_NO_SENDERS, DISPATCH_MACH_REASON_LAST, /* unused */ ); @@ -159,7 +163,7 @@ DISPATCH_ENUM(dispatch_mach_send_flags, unsigned long, * Trailer type of mach message received by dispatch mach channels */ -typedef mach_msg_context_trailer_t dispatch_mach_trailer_t; +typedef mach_msg_mac_trailer_t dispatch_mach_trailer_t; /*! * @constant DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE @@ -351,6 +355,124 @@ dispatch_mach_create_f(const char *_Nullable label, dispatch_queue_t _Nullable queue, void *_Nullable context, dispatch_mach_handler_function_t handler); +/*! + * @function dispatch_mach_request_no_senders + * + * Configure the mach channel to receive no more senders notifications. + * + * @discussion + * This function must be called before dispatch_mach_connect() has been called. + * + * When a checkin message is passed to dispatch_mach_connect() or + * dispatch_mach_reconnect(), the notification is armed after the checkin + * message has been sent successfully. + * + * If no checkin message is passed, then the mach channel is assumed to be + * a "server" peer connection and the no more senders request is armed + * immediately. + * + * Note that the notification will not be issued if no send right was ever + * made for this connection receive right. + * + * @param channel + * The mach channel to request no senders notifications on. + */ +API_DEPRECATED("Use dispatch_mach_notify_no_senders instead", macos(10.14, 10.16), + ios(12.0, 14.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_mach_request_no_senders(dispatch_mach_t channel); + +/*! + * @function dispatch_mach_notify_no_senders + * + * Configure the mach channel to receive no more senders notifications. + * + * @discussion + * This function must be called before dispatch_mach_connect() has been called. + * + * When a checkin message is passed to dispatch_mach_connect() or + * dispatch_mach_reconnect(), the notification is armed after the checkin + * message has been sent successfully. + * + * If no checkin message is passed, then the mach channel is assumed to be + * a "server" peer connection and the no more senders request is armed + * immediately. + * + * Requesting a no-senders notification for a listener mach channel is likely a + * client error since listener connections will likely have short-lived send + * rights (only until a peer connection is established). + * + * @param channel + * The mach channel to request no senders notifications on. + * + * @param made_sendrights + * A boolean representing whether the send right for this connection has been + * made before dispatch_mach_connect() is called. + * + * There are 2 cases of consideration: + * + * a) The client is initiating the peer connection by creating a receive right + * with an inserted send right and shipping the receive right over to the server + * in a checkin message. In this case, the server must specify true for + * made_sendrights when arming for no-senders notification. + * + * b) The server is initiating the connection by creating a mach channel with a + * receive right and using MACH_MSG_TYPE_MAKE_SEND to create a send right in the + * checkin reply for the peer connection. this case, the server should specify + * false for made_sendrights while arming for no-senders notification. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(5.0)) +void +dispatch_mach_notify_no_senders(dispatch_mach_t channel, bool made_sendrights); + +/*! + * @typedef dispatch_mach_flags_t + * + * Flags that can be passed to the dispatch_mach_set_flags function. + * + * @const DMF_USE_STRICT_REPLY + * Instruct the dispatch mach channel to use strict reply port semantics. When + * using strict reply port semantics, the kernel will enforce that the port + * used as the reply port has precisely 1 extant send-once right, its receive + * right exists in the same space as the sender, and any voucher context, + * e.g., the persona in the bank attribute, used when sending the message is + * also used when replying. + * + * @const DMF_REQUEST_NO_SENDERS + * Configure the mach channel to receive no more senders notifications. + * When a checkin message is passed to dispatch_mach_connect() or + * dispatch_mach_reconnect(), the notification is armed after the checkin + * message has been sent successfully. If no checkin message is passed, then + * the mach channel is assumed to be a "server" peer connection and the no + * more senders request is armed immediately. + */ +DISPATCH_OPTIONS(dispatch_mach_flags, uint64_t, + DMF_NONE = 0x0, + DMF_USE_STRICT_REPLY = 0x1, +); + +/*! + * @function dispatch_mach_set_flags + * + * Configure optional properties on the mach channel. + * + * @discussion + * This function must be called before dispatch_mach_connect() has been called. + * + * @param channel + * The mach channel to configure. + * + * @param flags + * Flags to configure the dispatch mach channel. + * + * @see dispatch_mach_flags_t + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_mach_set_flags(dispatch_mach_t channel, dispatch_mach_flags_t flags); + /*! * @function dispatch_mach_connect * Connect a mach channel to the specified receive and send rights. @@ -882,6 +1004,8 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_port_t dispatch_mach_get_checkin_port(dispatch_mach_t channel); +#if DISPATCH_MACH_SPI + // SPI for libxpc /* * Type for the callback for receipt of asynchronous replies to @@ -914,6 +1038,8 @@ typedef const struct dispatch_mach_xpc_hooks_s { /* Fields available in version 2. */ +#define DMXH_MSG_CONTEXT_REPLY_QUEUE_SELF ((dispatch_queue_t)NULL) + /* * Gets the queue to which a reply to a message sent using * dispatch_mach_send_with_result_and_async_reply_4libxpc() should be @@ -945,7 +1071,7 @@ typedef const struct dispatch_mach_xpc_hooks_s { dispatch_mach_async_reply_callback_t dmxh_async_reply_handler; /* Fields available in version 3. */ - /** + /* * Called once when the Mach channel has been activated. If this function * returns true, a DISPATCH_MACH_SIGTERM_RECEIVED notification will be * delivered to the channel's event handler when a SIGTERM is received. @@ -1101,6 +1227,8 @@ dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t channel, dispatch_mach_send_flags_t send_flags, dispatch_mach_reason_t *send_result, mach_error_t *send_error); +#endif // DISPATCH_MACH_SPI + /*! * @function dispatch_mach_handoff_reply_f * @@ -1125,7 +1253,7 @@ dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t channel, * @param port * The send once right that will be replied to. */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_mach_handoff_reply_f(dispatch_queue_t queue, mach_port_t port, @@ -1140,16 +1268,52 @@ dispatch_mach_handoff_reply_f(dispatch_queue_t queue, mach_port_t port, * * @see dispatch_mach_handoff_reply_f */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_mach_handoff_reply(dispatch_queue_t queue, mach_port_t port, dispatch_block_t block); +#endif /* __BLOCKS__ */ -DISPATCH_ASSUME_NONNULL_END +#if DISPATCH_MACH_SPI + +/*! + * @function dispatch_mach_msg_get_filter_policy_id + * Returns the message filter policy id from the message trailer. + * This id is added by the kernel during message send and is specific + * to the sender and port on which the message is received.. + * + * @discussion + * This function should only be called from the context of an IPC handler. + * + * @param msg + * The dispatch mach message object to query. It should have a trailer of type dispatch_mach_trailer_t. + * + * @param filter_policy_id + * Return the filter policy id read from the message. + * + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0), bridgeos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_mach_msg_get_filter_policy_id(dispatch_mach_msg_t msg, mach_msg_filter_id *filter_policy_id); + + +/*! + * @function dispatch_mach_can_handoff_4libxpc + * + * Returns whether the code is running in a context where a handoff is possible. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0), bridgeos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +bool +dispatch_mach_can_handoff_4libxpc(void); #endif // DISPATCH_MACH_SPI +DISPATCH_ASSUME_NONNULL_END + __END_DECLS #endif diff --git a/private/private.h b/private/private.h index b87f5dc2b..e49d15c95 100644 --- a/private/private.h +++ b/private/private.h @@ -62,21 +62,23 @@ #include #include +#if DISPATCH_CHANNEL_SPI +#include +#endif #include #include -#if DISPATCH_MACH_SPI #include -#endif // DISPATCH_MACH_SPI #include #include #include #include +#include #undef __DISPATCH_INDIRECT__ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ // Check that public and private dispatch headers match -#if DISPATCH_API_VERSION != 20180109 // Keep in sync with +#if DISPATCH_API_VERSION != 20181008 // Keep in sync with #error "Dispatch header mismatch between /usr/include and /usr/local/include" #endif diff --git a/private/queue_private.h b/private/queue_private.h index 60ae96e5c..199fcaeed 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -41,13 +41,29 @@ __BEGIN_DECLS * * @constant DISPATCH_QUEUE_OVERCOMMIT * The queue will create a new thread for invoking blocks, regardless of how - * busy the computer is. + * busy the computer is. It is invalid to pass in both the + * DISPATCH_QUEUE_OVERCOMMIT as well as the DISPATCH_QUEUE_COOPERATIVE + * flags. + * + * @constant DISPATCH_QUEUE_COOPERATIVE + * The queue will not bring up threads beyond a specific limit even if + * there are pending work items on the queue. + * + * The width of the queue is determined based on the hardware the code is + * running on and may change dynamically depending on the load of the system. + * Blocking any thread working on this queue will therefore reduce the + * throughput of the queue as a whole. Work running on this queue should be + * able to make progress till completion even if just 1 thread is available to + * process this queue. + * + * It is invalid to pass in both the DISPATCH_QUEUE_OVERCOMMIT as well as the + * DISPATCH_QUEUE_COOPERATIVE flags. */ enum { DISPATCH_QUEUE_OVERCOMMIT = 0x2ull, + DISPATCH_QUEUE_COOPERATIVE = 0x4ull, }; - /*! * @function dispatch_set_qos_class * @@ -95,54 +111,6 @@ void dispatch_set_qos_class(dispatch_object_t object, dispatch_qos_class_t qos_class, int relative_priority); -/*! - * @function dispatch_set_qos_class_floor - * - * @abstract - * Sets the QOS class floor on a dispatch queue, source, workloop or mach - * channel. - * - * @discussion - * The QOS class of workitems submitted to this object asynchronously will be - * elevated to at least the specified QOS class floor. - * Unlike dispatch_set_qos_class(), the QOS of the workitem will be used if - * higher than the floor even when the workitem has been created without - * "ENFORCE" semantics. - * - * Setting the QOS class floor is equivalent to the QOS effects of configuring - * a target queue whose QOS class has been set with dispatch_set_qos_class(). - * - * Calling this function will supersede any prior calls to - * dispatch_set_qos_class() or dispatch_set_qos_class_floor(). - * - * @param object - * A dispatch queue, workloop, source or mach channel to configure. - * The object must be inactive. - * - * Passing another object type or an object that has been activated is undefined - * and will cause the process to be terminated. - * - * @param qos_class - * A QOS class value: - * - QOS_CLASS_USER_INTERACTIVE - * - QOS_CLASS_USER_INITIATED - * - QOS_CLASS_DEFAULT - * - QOS_CLASS_UTILITY - * - QOS_CLASS_BACKGROUND - * Passing any other value is undefined. - * - * @param relative_priority - * A relative priority within the QOS class. This value is a negative - * offset from the maximum supported scheduler priority for the given class. - * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY - * is undefined. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NOTHROW -void -dispatch_set_qos_class_floor(dispatch_object_t object, - dispatch_qos_class_t qos_class, int relative_priority); - /*! * @function dispatch_set_qos_class_fallback * @@ -198,12 +166,16 @@ dispatch_set_qos_class_fallback(dispatch_object_t object, #define DISPATCH_QUEUE_FLAGS_MASK (DISPATCH_QUEUE_OVERCOMMIT) +#if __APPLE__ +# define DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR +#else // __APPLE__ // On FreeBSD pthread_attr_t is a typedef to a pointer type #if defined(__FreeBSD__) # define DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR _Nullable -#else +#else // defined(__FreeBSD__) # define DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR -#endif +#endif // defined(__FreeBSD__) +#endif // __APPLE__ /*! * @function dispatch_queue_attr_make_with_overcommit @@ -379,7 +351,8 @@ dispatch_queue_set_width(dispatch_queue_t dq, long width); * @result * The newly created dispatch pthread root queue. */ -API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE() +API_DEPRECATED_WITH_REPLACEMENT("dispatch_workloop_set_scheduler_priority", + macos(10.9, 10.16), ios(6.0, 14.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_global_t @@ -496,6 +469,76 @@ DISPATCH_EXPORT void _dispatch_install_thread_detach_callback(void (*cb)(void)); #endif +/* The SPIs below are for the use of the Swift Concurrency Runtime ONLY */ + +DISPATCH_OPTIONS(dispatch_swift_job_invoke_flags, uint32_t, + /*! + * @const DISPATCH_SWIFT_JOB_INVOKE_NONE + * + * No specific requirements for how the object invokes itself. + */ + DISPATCH_SWIFT_JOB_INVOKE_NONE, + + /*! + * @const DISPATCH_SWIFT_JOB_INVOKE_COOPERATIVE + * + * This swift job is invoked on a cooperative queue. It should periodically + * check dispatch_swift_job_should_yield() to determine if the object + * ought to yield the thread to other objects in the cooperative queue + */ + DISPATCH_SWIFT_JOB_INVOKE_COOPERATIVE, +); + +/*! + * @function dispatch_swift_job_should_yield() + * + * @abstract + * This function is only to be called by the Swift concurrency runtime. + * + * If this function returns true, then the currently draining object + * should reach the next safest stopping point, perform necessary cleanups, and + * return from its invocation. + * + * If more work is present, it should reenqueue itself using the + * dispatch_enqueue_swift_job SPI. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0)) +DISPATCH_EXPORT +bool +dispatch_swift_job_should_yield(void); + +/*! + * @function dispatch_async_swift_job + * + * @abstract + * This function is only to be called by the Swift concurrency runtime to + * enqueue work to run on dispatch's thread pool. + * + * @param queue + * The queue onto which to enqueue the swift object. All enqueues are + * asynchronous and do not block the thread. + * + * @param swift_job + * The swift concurrency runtime job that is to be enqueued into dispatch. This + * object needs to adhere to a specific structure and have a specific vtable + * layout that dispatch expects. + * + * The refcount and lifetime of the object is managed by the enqueuer and who + * needs need to make sure that it is live for the duration it is enqueued on + * the dispatch queue. + * + * The swift job can only be enqueued on a single queue at any + * given time. + * + * @param qos + * The QoS of at which the object should be enqueued. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 +void +dispatch_async_swift_job(dispatch_queue_t queue, void *swift_job, + qos_class_t qos); + __END_DECLS DISPATCH_ASSUME_NONNULL_END diff --git a/private/source_private.h b/private/source_private.h index 6396c113f..fab9b9854 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -190,9 +190,15 @@ enum { * * @constant DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE * Received network channel flow advisory. + * @constant DISPATCH_NW_CHANNEL_CHANNEL_EVENT + * Received network channel event. + * @constant DISPATCH_NW_CHANNEL_INTF_ADV_UPDATE + * Received network channel interface advisory. */ enum { DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE = 0x00000001, + DISPATCH_NW_CHANNEL_CHANNEL_EVENT = 0x00000002, + DISPATCH_NW_CHANNEL_INTF_ADV_UPDATE = 0x00000004, }; /*! @@ -234,6 +240,9 @@ enum { * @constant DISPATCH_VFS_NEARLOWDISK * Filesystem is nearly full (below NEARLOWDISK level). * + * @constant DISPATCH_VFS_SERVEREVENT + * Server issued a notification/warning + * * @constant DISPATCH_VFS_DESIREDDISK * Filesystem has exceeded the DESIREDDISK level * @@ -251,6 +260,7 @@ enum { DISPATCH_VFS_NOTRESPLOCK = 0x0080, DISPATCH_VFS_UPDATE = 0x0100, DISPATCH_VFS_VERYLOWDISK = 0x0200, + DISPATCH_VFS_SERVEREVENT = 0x0800, DISPATCH_VFS_QUOTA = 0x1000, DISPATCH_VFS_NEARLOWDISK = 0x2000, DISPATCH_VFS_DESIREDDISK = 0x4000, @@ -326,6 +336,18 @@ enum { DISPATCH_MACH_SEND_POSSIBLE = 0x8, }; +/*! + * @enum dispatch_source_mach_recv_flags_t + * + * @constant DISPATCH_MACH_RECV_SYNC_PEEK + * The receive source will participate in synchronous IPC priority inversion + * avoidance when possible. + */ +enum { + DISPATCH_MACH_RECV_SYNC_PEEK DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) = + 0x00008000, +}; + /*! * @enum dispatch_source_proc_flags_t * @@ -636,7 +658,7 @@ typedef struct dispatch_source_extended_data_s { * argument, the remaining space in data will have been populated with zeroes. */ API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW size_t dispatch_source_get_extended_data(dispatch_source_t source, diff --git a/private/time_private.h b/private/time_private.h index ae341e6d6..e8dd1accf 100644 --- a/private/time_private.h +++ b/private/time_private.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +__BEGIN_DECLS + /* * @constant DISPATCH_MONOTONICTIME_NOW * A dispatch_time_t value that corresponds to the current value of the @@ -83,5 +85,38 @@ enum { #endif // __APPLE__ +/*! + * @function dispatch_time_to_nsecs + * + * @abstract + * Returns the clock and nanoseconds of a given dispatch_time_t. + * + * @discussion + * This interface allows to decode dispatch_time_t which allows to compare them + * provided they are for the same "clock_id". + * + * @param time + * The dispatch_time_t value to parse. + * + * @param clock + * A pointer to the clockid for this time. + * + * @param nsecs + * A pointer to the decoded number of nanoseconds for the passed in time + * relative to the epoch for this clock ID. + * + * @result + * Returns true if the dispatch_time_t value was valid. + * Returns false if the dispatch_time_t value was invalid, + * or DISPATCH_TIME_FOREVER. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW +bool +dispatch_time_to_nsecs(dispatch_time_t time, + dispatch_clockid_t *clock, uint64_t *nsecs); + +__END_DECLS + #endif diff --git a/private/workloop_private.h b/private/workloop_private.h index 73f4d7aee..89e857a57 100644 --- a/private/workloop_private.h +++ b/private/workloop_private.h @@ -42,108 +42,7 @@ DISPATCH_ASSUME_NONNULL_BEGIN __BEGIN_DECLS -/*! - * @typedef dispatch_workloop_t - * - * @abstract - * Dispatch workloops invoke workitems submitted to them in priority order. - * - * @discussion - * A dispatch workloop is a flavor of dispatch_queue_t that is a priority - * ordered queue (using the QOS class of the submitted workitems as the - * ordering). - * - * Between each workitem invocation, the workloop will evaluate whether higher - * priority workitems have since been submitted and execute these first. - * - * Serial queues targeting a workloop maintain FIFO execution of their - * workitems. However, the workloop may reorder workitems submitted to - * independent serial queues targeting it with respect to each other, - * based on their priorities. - * - * A dispatch workloop is a "subclass" of dispatch_queue_t which can be passed - * to all APIs accepting a dispatch queue, except for functions from the - * dispatch_sync() family. dispatch_async_and_wait() must be used for workloop - * objects. Functions from the dispatch_sync() family on queues targeting - * a workloop are still permitted but discouraged for performance reasons. - */ -#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) -typedef struct dispatch_workloop_s *dispatch_workloop_t; -#else -DISPATCH_DECL_SUBCLASS(dispatch_workloop, dispatch_queue); -#endif - -/*! - * @function dispatch_workloop_create - * - * @abstract - * Creates a new dispatch workloop to which workitems may be submitted. - * - * @param label - * A string label to attach to the workloop. - * - * @result - * The newly created dispatch workloop. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT -DISPATCH_NOTHROW -dispatch_workloop_t -dispatch_workloop_create(const char *_Nullable label); - -/*! - * @function dispatch_workloop_create_inactive - * - * @abstract - * Creates a new inactive dispatch workloop that can be setup and then - * activated. - * - * @discussion - * Creating an inactive workloop allows for it to receive further configuration - * before it is activated, and workitems can be submitted to it. - * - * Submitting workitems to an inactive workloop is undefined and will cause the - * process to be terminated. - * - * @param label - * A string label to attach to the workloop. - * - * @result - * The newly created dispatch workloop. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT -DISPATCH_NOTHROW -dispatch_workloop_t -dispatch_workloop_create_inactive(const char *_Nullable label); - -/*! - * @function dispatch_workloop_set_autorelease_frequency - * - * @abstract - * Sets the autorelease frequency of the workloop. - * - * @discussion - * See dispatch_queue_attr_make_with_autorelease_frequency(). - * The default policy for a workloop is - * DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM. - * - * @param workloop - * The dispatch workloop to modify. - * - * This workloop must be inactive, passing an activated object is undefined - * and will cause the process to be terminated. - * - * @param frequency - * The requested autorelease frequency. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t workloop, - dispatch_autorelease_frequency_t frequency); - -DISPATCH_ENUM(dispatch_workloop_param_flags, uint64_t, +DISPATCH_OPTIONS(dispatch_workloop_param_flags, uint64_t, DISPATCH_WORKLOOP_NONE DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = 0x0, DISPATCH_WORKLOOP_FIXED_PRIORITY DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = 0x1, ); @@ -280,158 +179,6 @@ DISPATCH_EXPORT DISPATCH_NOTHROW bool _dispatch_workloop_should_yield_4NW(void); -/*! - * @function dispatch_async_and_wait - * - * @abstract - * Submits a block for synchronous execution on a dispatch queue. - * - * @discussion - * Submits a workitem to a dispatch queue like dispatch_async(), however - * dispatch_async_and_wait() will not return until the workitem has finished. - * - * Like functions of the dispatch_sync family, dispatch_async_and_wait() is - * subject to dead-lock (See dispatch_sync() for details). - * - * However, dispatch_async_and_wait() differs from functions of the - * dispatch_sync family in two fundamental ways: how it respects queue - * attributes and how it chooses the execution context invoking the workitem. - * - * Differences with dispatch_sync() - * - * Work items submitted to a queue with dispatch_async_and_wait() observe all - * queue attributes of that queue when invoked (inluding autorelease frequency - * or QOS class). - * - * When the runtime has brought up a thread to invoke the asynchronous workitems - * already submitted to the specified queue, that servicing thread will also be - * used to execute synchronous work submitted to the queue with - * dispatch_async_and_wait(). - * - * However, if the runtime has not brought up a thread to service the specified - * queue (because it has no workitems enqueued, or only synchronous workitems), - * then dispatch_async_and_wait() will invoke the workitem on the calling thread, - * similar to the behaviour of functions in the dispatch_sync family. - * - * As an exception, if the queue the work is submitted to doesn't target - * a global concurrent queue (for example because it targets the main queue), - * then the workitem will never be invoked by the thread calling - * dispatch_async_and_wait(). - * - * In other words, dispatch_async_and_wait() is similar to submitting - * a dispatch_block_create()d workitem to a queue and then waiting on it, as - * shown in the code example below. However, dispatch_async_and_wait() is - * significantly more efficient when a new thread is not required to execute - * the workitem (as it will use the stack of the submitting thread instead of - * requiring heap allocations). - * - * - * dispatch_block_t b = dispatch_block_create(0, block); - * dispatch_async(queue, b); - * dispatch_block_wait(b, DISPATCH_TIME_FOREVER); - * Block_release(b); - * - * - * @param queue - * The target dispatch queue to which the block is submitted. - * The result of passing NULL in this parameter is undefined. - * - * @param block - * The block to be invoked on the target dispatch queue. - * The result of passing NULL in this parameter is undefined. - */ -#ifdef __BLOCKS__ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_async_and_wait(dispatch_queue_t queue, - DISPATCH_NOESCAPE dispatch_block_t block); -#endif - -/*! - * @function dispatch_async_and_wait_f - * - * @abstract - * Submits a function for synchronous execution on a dispatch queue. - * - * @discussion - * See dispatch_async_and_wait() for details. - * - * @param queue - * The target dispatch queue to which the function is submitted. - * The result of passing NULL in this parameter is undefined. - * - * @param context - * The application-defined context parameter to pass to the function. - * - * @param work - * The application-defined function to invoke on the target queue. The first - * parameter passed to this function is the context provided to - * dispatch_async_and_wait_f(). - * The result of passing NULL in this parameter is undefined. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW -void -dispatch_async_and_wait_f(dispatch_queue_t queue, - void *_Nullable context, dispatch_function_t work); - -/*! - * @function dispatch_barrier_async_and_wait - * - * @abstract - * Submits a block for synchronous execution on a dispatch queue. - * - * @discussion - * Submits a block to a dispatch queue like dispatch_async_and_wait(), but marks - * that block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT - * queues). - * - * @param queue - * The target dispatch queue to which the block is submitted. - * The result of passing NULL in this parameter is undefined. - * - * @param work - * The application-defined block to invoke on the target queue. - * The result of passing NULL in this parameter is undefined. - */ -#ifdef __BLOCKS__ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_barrier_async_and_wait(dispatch_queue_t queue, - DISPATCH_NOESCAPE dispatch_block_t block); -#endif - -/*! - * @function dispatch_barrier_async_and_wait_f - * - * @abstract - * Submits a function for synchronous execution on a dispatch queue. - * - * @discussion - * Submits a function to a dispatch queue like dispatch_async_and_wait_f(), but - * marks that function as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT - * queues). - * - * @param queue - * The target dispatch queue to which the function is submitted. - * The result of passing NULL in this parameter is undefined. - * - * @param context - * The application-defined context parameter to pass to the function. - * - * @param work - * The application-defined function to invoke on the target queue. The first - * parameter passed to this function is the context provided to - * dispatch_barrier_async_and_wait_f(). - * The result of passing NULL in this parameter is undefined. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW -void -dispatch_barrier_async_and_wait_f(dispatch_queue_t queue, - void *_Nullable context, dispatch_function_t work); __END_DECLS diff --git a/src/allocator_internal.h b/src/allocator_internal.h index 9409048f3..2b5a6061b 100644 --- a/src/allocator_internal.h +++ b/src/allocator_internal.h @@ -278,7 +278,7 @@ struct dispatch_magazine_s { }; #if DISPATCH_DEBUG -#define DISPATCH_ALLOCATOR_SCRIBBLE ((uintptr_t)0xAFAFAFAFAFAFAFAF) +#define DISPATCH_ALLOCATOR_SCRIBBLE ((int)0xAFAFAFAF) #endif diff --git a/src/apply.c b/src/apply.c index 9c7d60ffd..160874f4c 100644 --- a/src/apply.c +++ b/src/apply.c @@ -21,11 +21,97 @@ #include "internal.h" typedef void (*dispatch_apply_function_t)(void *, size_t); + static char const * const _dispatch_apply_key = "apply"; #define DISPATCH_APPLY_INVOKE_REDIRECT 0x1 #define DISPATCH_APPLY_INVOKE_WAIT 0x2 +/* flags for da_dc->dc_data + * + * continuation func is a dispatch_apply_function_t (args: item) + */ +#define DA_FLAG_APPLY 0x01ul +// contin func is a dispatch_apply_attr_function_t (args: item, worker idx) +#define DA_FLAG_APPLY_WITH_ATTR 0x02ul + +#if __LP64__ +/* Our continuation allocator is a bit more performant than the default system + * malloc (especially with our per-thread cache), so let's use it if we can. + * On 32-bit platforms, dispatch_apply_s is bigger than dispatch_continuation_s + * so we can't use the cont allocator, but we're okay with the slight perf + * degradation there. + */ +#define DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR 1 +dispatch_static_assert(sizeof(struct dispatch_apply_s) <= sizeof(struct dispatch_continuation_s), + "Apply struct should fit inside continuation struct so we can borrow the continuation allocator"); +#else // __LP64__ +#define DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR 0 +#endif // __LP64__ + +DISPATCH_ALWAYS_INLINE DISPATCH_MALLOC +static inline dispatch_apply_t +_dispatch_apply_alloc(void) +{ + dispatch_apply_t da; +#if DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + da = (__typeof__(da))_dispatch_continuation_alloc(); +#else // DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + da = _dispatch_calloc(1, sizeof(*da)); +#endif // DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + return da; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_apply_free(dispatch_apply_t da) +{ +#if DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + _dispatch_continuation_free((dispatch_continuation_t)da); +#else // DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + free(da); +#endif // DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR +} + +static void _dispatch_apply_da_copy_attr(dispatch_apply_t, dispatch_apply_attr_t _Nullable); +static bool _dispatch_attr_is_initialized(dispatch_apply_attr_t attr); + +static void +_dispatch_apply_set_attr_behavior(dispatch_apply_attr_t _Nullable attr, size_t worker_index) +{ + if (!attr) { + return; + } + if (attr->per_cluster_parallelism > 0) { + _dispatch_attr_apply_cluster_set(worker_index, attr->per_cluster_parallelism); + } +} + +static void +_dispatch_apply_clear_attr_behavior(dispatch_apply_attr_t _Nullable attr, size_t worker_index) +{ + if (!attr) { + return; + } + if (attr->per_cluster_parallelism > 0) { + _dispatch_attr_apply_cluster_clear(worker_index, attr->per_cluster_parallelism); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_apply_destroy(dispatch_apply_t da) +{ +#if DISPATCH_INTROSPECTION + _dispatch_continuation_free(da->da_dc); +#endif + if (da->da_attr) { + dispatch_apply_attr_destroy(da->da_attr); + free(da->da_attr); + } + _dispatch_apply_free(da); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) @@ -33,12 +119,34 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) size_t const iter = da->da_iterations; size_t idx, done = 0; + /* workers start over time but never quit until the job is done, so + * we can allocate an index simply by incrementing + */ + uint32_t worker_index = 0; + worker_index = os_atomic_inc_orig2o(da, da_worker_index, relaxed); + + _dispatch_apply_set_attr_behavior(da->da_attr, worker_index); + idx = os_atomic_inc_orig2o(da, da_index, acquire); if (unlikely(idx >= iter)) goto out; - - // da_dc is only safe to access once the 'index lock' has been acquired - dispatch_apply_function_t const func = (void *)da->da_dc->dc_func; + /* + * da_dc is only safe to access once the 'index lock' has been acquired + * because it lives on the stack of the thread calling dispatch_apply. + * + * da lives until the last worker thread has finished (protected by + * da_thr_cnt), but da_dc only lives until the calling thread returns + * after the last work item is complete, which may be sooner than that. + * (In fact, the calling thread could do all the workitems itself and + * return before the worker threads even start.) + * + * Therefore the increment (reserving a valid workitem index from + * da_index) protects our access to da_dc. + * + * We also need an acquire barrier, and this is a good place to have one. + */ + dispatch_function_t const func = da->da_dc->dc_func; void *const da_ctxt = da->da_dc->dc_ctxt; + uintptr_t apply_flags = (uintptr_t)da->da_dc->dc_data; _dispatch_perfmon_workitem_dec(); // this unit executes many items @@ -52,7 +160,7 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) dispatch_thread_frame_s dtf; dispatch_priority_t old_dbp = 0; if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) { - dispatch_queue_t dq = da->da_dc->dc_data; + dispatch_queue_t dq = da->da_dc->dc_other; _dispatch_thread_frame_push(&dtf, dq); old_dbp = _dispatch_set_basepri(dq->dq_priority); } @@ -61,7 +169,13 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) // Striding is the responsibility of the caller. do { dispatch_invoke_with_autoreleasepool(flags, { - _dispatch_client_callout2(da_ctxt, idx, func); + if (apply_flags & DA_FLAG_APPLY) { + _dispatch_client_callout2(da_ctxt, idx, (dispatch_apply_function_t)func); + } else if (apply_flags & DA_FLAG_APPLY_WITH_ATTR) { + _dispatch_client_callout3_a(da_ctxt, idx, worker_index, (dispatch_apply_attr_function_t)func); + } else { + DISPATCH_INTERNAL_CRASH(apply_flags, "apply continuation has invalid flags"); + } _dispatch_perfmon_workitem_inc(); done++; idx = os_atomic_inc_orig2o(da, da_index, relaxed); @@ -75,21 +189,21 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) _dispatch_thread_context_pop(&apply_ctxt); - // The thread that finished the last workitem wakes up the possibly waiting - // thread that called dispatch_apply. They could be one and the same. - if (!os_atomic_sub2o(da, da_todo, done, release)) { + /* The thread that finished the last workitem wakes up the possibly waiting + * thread that called dispatch_apply. They could be one and the same. + */ + if (os_atomic_sub2o(da, da_todo, done, release) == 0) { _dispatch_thread_event_signal(&da->da_event); } out: + _dispatch_apply_clear_attr_behavior(da->da_attr, worker_index); + if (invoke_flags & DISPATCH_APPLY_INVOKE_WAIT) { _dispatch_thread_event_wait(&da->da_event); _dispatch_thread_event_destroy(&da->da_event); } if (os_atomic_dec2o(da, da_thr_cnt, release) == 0) { -#if DISPATCH_INTROSPECTION - _dispatch_continuation_free(da->da_dc); -#endif - _dispatch_continuation_free((dispatch_continuation_t)da); + _dispatch_apply_destroy(da); } } @@ -138,19 +252,25 @@ _dispatch_apply_serial(void *ctxt) dispatch_invoke_flags_t flags; size_t idx = 0; + // no need yet for _set_attr_behavior() for serial applies _dispatch_perfmon_workitem_dec(); // this unit executes many items - flags = _dispatch_apply_autorelease_frequency(dc->dc_data); + flags = _dispatch_apply_autorelease_frequency(dc->dc_other); do { dispatch_invoke_with_autoreleasepool(flags, { - _dispatch_client_callout2(dc->dc_ctxt, idx, (void*)dc->dc_func); + if ((uintptr_t)dc->dc_data & DA_FLAG_APPLY) { + _dispatch_client_callout2(dc->dc_ctxt, idx, (dispatch_apply_function_t)dc->dc_func); + } else if ((uintptr_t)dc->dc_data & DA_FLAG_APPLY_WITH_ATTR) { + // when running serially, the only worker is worker number 0 + _dispatch_client_callout3_a(dc->dc_ctxt, idx, 0, (dispatch_apply_attr_function_t)dc->dc_func); + } else { + DISPATCH_INTERNAL_CRASH(dc->dc_data, "apply continuation has invalid flags"); + } + _dispatch_perfmon_workitem_inc(); }); } while (++idx < iter); -#if DISPATCH_INTROSPECTION - _dispatch_continuation_free(da->da_dc); -#endif - _dispatch_continuation_free((dispatch_continuation_t)da); + _dispatch_apply_destroy(da); } DISPATCH_ALWAYS_INLINE @@ -234,7 +354,7 @@ _dispatch_apply_redirect(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; int32_t da_width = da->da_thr_cnt - 1; - dispatch_queue_t top_dq = da->da_dc->dc_data, dq = top_dq; + dispatch_queue_t top_dq = da->da_dc->dc_other, dq = top_dq; do { int32_t width = _dispatch_queue_try_reserve_apply_width(dq, da_width); @@ -249,9 +369,10 @@ _dispatch_apply_redirect(void *ctxt) da->da_thr_cnt -= excess; } if (!da->da_flags) { - // find first queue in descending target queue order that has - // an autorelease frequency set, and use that as the frequency for - // this continuation. + /* find first queue in descending target queue order that has + * an autorelease frequency set, and use that as the frequency for + * this continuation. + */ da->da_flags = _dispatch_queue_autorelease_frequency(dq); } dq = dq->do_targetq; @@ -267,29 +388,101 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_queue_global_t _dispatch_apply_root_queue(dispatch_queue_t dq) { + dispatch_queue_t tq = NULL; + if (dq) { while (unlikely(dq->do_targetq)) { - dq = dq->do_targetq; - } - // if the current root queue is a pthread root queue, select it - if (!_dispatch_is_in_root_queues_array(dq)) { - return upcast(dq)._dgq; + tq = dq->do_targetq; + + // If the current root is a custom pri workloop, select it. We have + // to this check here because custom pri workloops have a fake + // bottom targetq. + if (_dispatch_is_custom_pri_workloop(dq)) { + return upcast(dq)._dgq; + } + + dq = tq; } } + // if the current root queue is a pthread root queue, select it + if (dq && !_dispatch_is_in_root_queues_array(dq)) { + return upcast(dq)._dgq; + } + pthread_priority_t pp = _dispatch_get_priority(); dispatch_qos_t qos = _dispatch_qos_from_pp(pp); - return _dispatch_get_root_queue(qos ? qos : DISPATCH_QOS_DEFAULT, false); + return _dispatch_get_root_queue(qos ? qos : DISPATCH_QOS_DEFAULT, 0); } -DISPATCH_NOINLINE -void -dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, - void (*func)(void *, size_t)) +DISPATCH_ALWAYS_INLINE +static inline size_t +_dispatch_apply_calc_thread_count_for_cluster(dispatch_apply_attr_t _Nullable attr, dispatch_qos_t qos) +{ + size_t cluster_max = SIZE_MAX; + if (attr && attr->per_cluster_parallelism > 0) { + uint32_t rc = _dispatch_cluster_max_parallelism(qos); + if (likely(rc > 0)) { + cluster_max = rc * (uint32_t) (attr->per_cluster_parallelism); + } else { + /* if there's no cluster resource parallelism, then our return value + * is 0 which means "attr is a meaningless request" + */ + cluster_max = 0; + } + } + return cluster_max; +} + +DISPATCH_ALWAYS_INLINE +static inline size_t +_dispatch_apply_calc_thread_count(dispatch_apply_attr_t _Nullable attr, size_t nested, dispatch_qos_t qos, bool active) +{ + if (attr && !_dispatch_attr_is_initialized(attr)) { + DISPATCH_CLIENT_CRASH(attr, "dispatch_apply_attr not initialized using dispatch_apply_attr_init"); + } + + size_t thr_cnt = 0; + + if (likely(!attr)) { + /* Normal apply: Start with as many threads as the QOS class would + * allow. If we are nested inside another apply, account for the fact + * that it's calling us N times, so we need to use 1/Nth the threads + * we usually would, to stay under the useful parallelism limit. + */ + unsigned long flags = active ? DISPATCH_MAX_PARALLELISM_ACTIVE : 0; + thr_cnt = _dispatch_qos_max_parallelism(qos, flags); + if (unlikely(nested)) { + thr_cnt = nested < thr_cnt ? thr_cnt / nested : 1; + } + } else { + /* apply_with_attr: if we are already nested, just go serial. + * We should use the minimum of, the max allowed threads for this QOS + * level, and the max useful parallel workers based on the requested + * attributes (e.g. the number of cluster level resources). + */ + if (unlikely(nested)) { + thr_cnt = 1; + } else { + unsigned long flags = active ? DISPATCH_MAX_PARALLELISM_ACTIVE : 0; + size_t qos_max = _dispatch_qos_max_parallelism(qos, flags); + size_t cluster_max = _dispatch_apply_calc_thread_count_for_cluster(attr, qos); + thr_cnt = MIN(qos_max, cluster_max); + } + } + return thr_cnt; +} + +static void +_dispatch_apply_with_attr_f(size_t iterations, dispatch_apply_attr_t attr, + dispatch_queue_t _dq, void *ctxt, dispatch_function_t func, uintptr_t da_flags) { if (unlikely(iterations == 0)) { return; } + if (attr && !_dispatch_attr_is_initialized(attr)) { + DISPATCH_CLIENT_CRASH(attr, "dispatch_apply_attr not initialized using dispatch_apply_attr_init"); + } dispatch_thread_context_t dtctxt = _dispatch_thread_context_find(_dispatch_apply_key); size_t nested = dtctxt ? dtctxt->dtc_apply_nesting : 0; @@ -304,46 +497,92 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, dispatch_qos_t qos = _dispatch_priority_qos(dq->dq_priority) ?: _dispatch_priority_fallback_qos(dq->dq_priority); if (unlikely(dq->do_targetq)) { - // if the queue passed-in is not a root queue, use the current QoS - // since the caller participates in the work anyway + /* if the queue passed-in is not a root queue, use the current QoS + * since the caller participates in the work anyway + */ qos = _dispatch_qos_from_pp(_dispatch_get_priority()); } - int32_t thr_cnt = (int32_t)_dispatch_qos_max_parallelism(qos, - DISPATCH_MAX_PARALLELISM_ACTIVE); - if (likely(!nested)) { - nested = iterations; + size_t thr_cnt = _dispatch_apply_calc_thread_count(attr, nested, qos, true); + if (thr_cnt == 0) { + DISPATCH_CLIENT_CRASH(attr, "attribute's properties are invalid or meaningless on this system"); + } + + /* dispatch_apply's nesting behavior is a little complicated; it tries to + * account for the multiplicative effect of the applies above it to bring + * up just the right number of total threads. + * dispatch_apply_with_attr is much simpler: it just goes serial if it is + * nested at all, and it sets the nested TSD to the max value to indicate + * that we are already saturating the CPUs so any applies nested inside + * it will also go serial. + */ + size_t new_nested; + if (attr) { + new_nested = DISPATCH_APPLY_MAX; } else { - thr_cnt = nested < (size_t)thr_cnt ? thr_cnt / (int32_t)nested : 1; - nested = nested < DISPATCH_APPLY_MAX && iterations < DISPATCH_APPLY_MAX - ? nested * iterations : DISPATCH_APPLY_MAX; + if (likely(!nested)) { + new_nested = iterations; + } else { + /* DISPATCH_APPLY_MAX is sqrt(size_max) so we can do this + * multiplication without checking for overlow. The actual magnitude + * isn't important, it just needs to be >> ncpu. + */ + new_nested = nested < DISPATCH_APPLY_MAX && iterations < DISPATCH_APPLY_MAX + ? nested * iterations : DISPATCH_APPLY_MAX; + } } - if (iterations < (size_t)thr_cnt) { - thr_cnt = (int32_t)iterations; + + /* Notwithstanding any of the above, we should never try to start more + * threads than the number of work items. (The excess threads would have + * no work to do.) + */ + if (iterations < thr_cnt) { + thr_cnt = iterations; } + struct dispatch_continuation_s dc = { .dc_func = (void*)func, .dc_ctxt = ctxt, - .dc_data = dq, + .dc_other = dq, + .dc_data = (void *)da_flags, }; - dispatch_apply_t da = (__typeof__(da))_dispatch_continuation_alloc(); - da->da_index = 0; - da->da_todo = iterations; + dispatch_apply_t da = _dispatch_apply_alloc(); + os_atomic_init(&da->da_index, 0); + os_atomic_init(&da->da_todo, iterations); da->da_iterations = iterations; - da->da_nested = nested; - da->da_thr_cnt = thr_cnt; + da->da_nested = new_nested; + da->da_thr_cnt = (int32_t)thr_cnt; + os_atomic_init(&da->da_worker_index, 0); + _dispatch_apply_da_copy_attr(da, attr); #if DISPATCH_INTROSPECTION da->da_dc = _dispatch_continuation_alloc(); - *da->da_dc = dc; + da->da_dc->dc_func = (void *) dc.dc_func; + da->da_dc->dc_ctxt = dc.dc_ctxt; + da->da_dc->dc_other = dc.dc_other; + da->da_dc->dc_data = dc.dc_data; + da->da_dc->dc_flags = DC_FLAG_ALLOCATED; #else da->da_dc = &dc; #endif da->da_flags = 0; + if (unlikely(_dispatch_is_custom_pri_workloop(dq))) { + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); + + if (_dq_state_drain_locked_by_self(dq_state)) { + // We're already draining on the custom priority workloop, don't go + // wide, just call inline serially + return _dispatch_apply_serial(da); + } else { + return dispatch_async_and_wait_f(dq, da, _dispatch_apply_serial); + } + } + if (unlikely(dq->dq_width == 1 || thr_cnt <= 1)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } + if (unlikely(dq->do_targetq)) { if (unlikely(dq == old_dq)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); @@ -358,6 +597,21 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, _dispatch_thread_frame_pop(&dtf); } +DISPATCH_NOINLINE +void +dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, + void (*func)(void *, size_t)) +{ + _dispatch_apply_with_attr_f(iterations, NULL, _dq, ctxt, (dispatch_function_t)func, DA_FLAG_APPLY); +} + +void +dispatch_apply_with_attr_f(size_t iterations, dispatch_apply_attr_t attr, void *ctxt, + void (*func)(void *, size_t, size_t)) +{ + _dispatch_apply_with_attr_f(iterations, attr, DISPATCH_APPLY_AUTO, ctxt, (dispatch_function_t)func, DA_FLAG_APPLY_WITH_ATTR); +} + #ifdef __BLOCKS__ void dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) @@ -365,4 +619,117 @@ dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) dispatch_apply_f(iterations, dq, work, (dispatch_apply_function_t)_dispatch_Block_invoke(work)); } + +void +dispatch_apply_with_attr(size_t iterations, dispatch_apply_attr_t attr, + void (^work)(size_t iteration, size_t worker_index)) +{ + dispatch_apply_with_attr_f(iterations, attr, work, + (dispatch_apply_attr_function_t)_dispatch_Block_invoke(work)); +} #endif + +static bool +_dispatch_attr_is_initialized(dispatch_apply_attr_t attr) +{ + return (attr->sig == DISPATCH_APPLY_ATTR_SIG) && (~(attr->guard) == (uintptr_t) attr); +} + +void +dispatch_apply_attr_init(dispatch_apply_attr_t _Nonnull attr) +{ + bzero(attr, sizeof(*attr)); + + attr->sig = DISPATCH_APPLY_ATTR_SIG; + attr->guard = ~ (uintptr_t) (attr); /* To prevent leaks from picking it up */ +} + +void +dispatch_apply_attr_destroy(dispatch_apply_attr_t _Nonnull attr) +{ + bzero(attr, sizeof(*attr)); +} + +static void +_dispatch_apply_da_copy_attr(dispatch_apply_t da, dispatch_apply_attr_t _Nullable src) +{ + if (src == NULL) { + da->da_attr = NULL; + return; + } + dispatch_apply_attr_t dst = _dispatch_calloc(1, sizeof(struct dispatch_apply_attr_s)); + dispatch_apply_attr_init(dst); + + dst->per_cluster_parallelism = src->per_cluster_parallelism; + dst->flags = src->flags; + // if there were non-POD types, we would manage them here + + da->da_attr = dst; +} + +static void +dispatch_apply_attr_set_per_cluster_parallelism(dispatch_apply_attr_t _Nonnull attr, + size_t threads_per_cluster) +{ + if (threads_per_cluster == 0) { + DISPATCH_CLIENT_CRASH(threads_per_cluster, "0 is an invalid threads_per_cluster value"); + } + if (threads_per_cluster > 1) { + DISPATCH_CLIENT_CRASH(threads_per_cluster, "Invalid threads_per_cluster value, only acceptable value is 1"); + } + + if (attr && !_dispatch_attr_is_initialized(attr)) { + DISPATCH_CLIENT_CRASH(attr, "dispatch_apply_attr not initialized using dispatch_apply_attr_init"); + } + + attr->per_cluster_parallelism = threads_per_cluster; +} + +void +dispatch_apply_attr_set_parallelism(dispatch_apply_attr_t _Nonnull attr, + dispatch_apply_attr_entity_t entity, size_t threads_per_entity) +{ + switch (entity) { + case DISPATCH_APPLY_ATTR_ENTITY_CPU: + if (threads_per_entity != 1) { + DISPATCH_CLIENT_CRASH(threads_per_entity, "Invalid threads_per_entity value for CPU entity"); + } + break; + case DISPATCH_APPLY_ATTR_ENTITY_CLUSTER: + return dispatch_apply_attr_set_per_cluster_parallelism(attr, threads_per_entity); + default: + DISPATCH_CLIENT_CRASH(entity, "Unknown entity"); + } +} + +size_t +dispatch_apply_attr_query(dispatch_apply_attr_t attr, + dispatch_apply_attr_query_t which, + dispatch_apply_attr_query_flags_t flags) +{ + dispatch_thread_context_t dtctxt = _dispatch_thread_context_find(_dispatch_apply_key); + size_t current_nested = dtctxt ? dtctxt->dtc_apply_nesting : 0; + dispatch_queue_t old_dq = _dispatch_queue_get_current(); + dispatch_queue_t dq = _dispatch_apply_root_queue(old_dq)->_as_dq; + dispatch_qos_t current_qos = _dispatch_priority_qos(dq->dq_priority) ?: _dispatch_priority_fallback_qos(dq->dq_priority); + + switch (which) { + case DISPATCH_APPLY_ATTR_QUERY_VALID: + return (dispatch_apply_attr_query(attr, DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS, flags) == 0 ? 0 : 1); + case DISPATCH_APPLY_ATTR_QUERY_LIKELY_WORKERS: + return _dispatch_apply_calc_thread_count(attr, current_nested, current_qos, true); + case DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS: + if (flags & DISPATCH_APPLY_ATTR_QUERY_FLAGS_MAX_CURRENT_SCOPE) { + return _dispatch_apply_calc_thread_count(attr, current_nested, current_qos, true); + } else { + /* we SHOULD pass DISPATCH_QOS_UNSPECIFIED - the intention is "at any + * possible QOS", more exactly, "at the QOS which has highest limits". + * bsdthread_ctl_qos_max_parallelism doesn't accept unspecified, + * though, so let's say USER_INTERACTIVE assuming the highest QOS + * will be the least limited one. + * + */ + return _dispatch_apply_calc_thread_count(attr, 0, DISPATCH_QOS_USER_INTERACTIVE, false); + } + } +} diff --git a/src/benchmark.c b/src/benchmark.c index 15e9f5535..259a67ca5 100644 --- a/src/benchmark.c +++ b/src/benchmark.c @@ -60,14 +60,14 @@ _dispatch_benchmark_init(void *context) } while (i < cnt); delta = _dispatch_uptime() - start; - lcost = delta; + lcost = (typeof(lcost)) delta; #if HAVE_MACH_ABSOLUTE_TIME lcost *= bdata->tbi.numer; lcost /= bdata->tbi.denom; #endif lcost /= cnt; - bdata->loop_cost = lcost > UINT64_MAX ? UINT64_MAX : (uint64_t)lcost; + bdata->loop_cost = (uint64_t) lcost > UINT64_MAX ? UINT64_MAX : (uint64_t)lcost; } #ifdef __BLOCKS__ @@ -113,7 +113,7 @@ dispatch_benchmark_f(size_t count, register void *ctxt, } while (i < count); delta = _dispatch_uptime() - start; - conversion = delta; + conversion = (typeof(conversion)) delta; #if HAVE_MACH_ABSOLUTE_TIME conversion *= bdata.tbi.numer; big_denom = bdata.tbi.denom; @@ -122,7 +122,7 @@ dispatch_benchmark_f(size_t count, register void *ctxt, #endif big_denom *= count; conversion /= big_denom; - ns = conversion > UINT64_MAX ? UINT64_MAX : (uint64_t)conversion; + ns = (uint64_t) conversion > UINT64_MAX ? UINT64_MAX : (uint64_t)conversion; return ns - bdata.loop_cost; } diff --git a/src/block.cpp b/src/block.cpp index 55f83c27d..3d7432529 100644 --- a/src/block.cpp +++ b/src/block.cpp @@ -91,7 +91,7 @@ struct dispatch_block_private_data_s { if (dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) return; if (dbpd_group) { if (!dbpd_performed) dispatch_group_leave(dbpd_group); - _os_object_release(dbpd_group->_as_os_obj); + _os_object_release_without_xref_dispose(dbpd_group->_as_os_obj); } if (dbpd_queue) { _os_object_release_internal_n(dbpd_queue->_as_os_obj, 2); diff --git a/src/data.m b/src/data.m index 2a95d28f2..e0185a0cf 100644 --- a/src/data.m +++ b/src/data.m @@ -150,11 +150,9 @@ - (void)_activate { @end +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(data_empty) - -// Force non-lazy class realization rdar://10640168 -+ (void)load { -} +OS_OBJECT_NONLAZY_CLASS_LOAD - (id)retain { return (id)self; diff --git a/src/data_internal.h b/src/data_internal.h index 1589a793a..9ed12e13b 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -57,10 +57,10 @@ DISPATCH_CLASS_DECL(data, OBJECT); struct dispatch_data_s { #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA - const void *do_vtable; + const void *__ptrauth_objc_isa_pointer do_vtable; dispatch_queue_t do_targetq; void *ctxt; - void *finalizer; + dispatch_function_t DISPATCH_FUNCTION_POINTER finalizer; #else DISPATCH_OBJECT_HEADER(data); #endif // DISPATCH_DATA_IS_BRIDGED_TO_NSDATA diff --git a/src/event/event.c b/src/event/event.c index 98dd87171..b908419d2 100644 --- a/src/event/event.c +++ b/src/event/event.c @@ -39,7 +39,7 @@ _dispatch_unote_create(dispatch_source_type_t dst, return DISPATCH_UNOTE_NULL; } - if (dst->dst_mask && !mask) { + if (dst->dst_mask && !dst->dst_allow_empty_mask && !mask) { return DISPATCH_UNOTE_NULL; } @@ -227,7 +227,6 @@ const dispatch_source_type_s _dispatch_source_type_data_add = { .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_data_create, .dst_merge_evt = NULL, @@ -239,7 +238,6 @@ const dispatch_source_type_s _dispatch_source_type_data_or = { .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_data_create, .dst_merge_evt = NULL, @@ -251,7 +249,6 @@ const dispatch_source_type_s _dispatch_source_type_data_replace = { .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_data_create, .dst_merge_evt = NULL, @@ -271,7 +268,6 @@ const dispatch_source_type_s _dispatch_source_type_read = { #endif // DISPATCH_EVENT_BACKEND_KEVENT .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -289,7 +285,6 @@ const dispatch_source_type_s _dispatch_source_type_write = { #endif // DISPATCH_EVENT_BACKEND_KEVENT .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -313,7 +308,6 @@ const dispatch_source_type_s _dispatch_source_type_signal = { .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_ADD_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_signal_create, .dst_merge_evt = _dispatch_source_merge_evt, @@ -990,7 +984,6 @@ const dispatch_source_type_s _dispatch_source_type_timer = { .dst_timer_flags = 0, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER, .dst_size = sizeof(struct dispatch_timer_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_timer_create, .dst_merge_evt = _dispatch_source_merge_evt, @@ -1004,6 +997,7 @@ const dispatch_source_type_s _dispatch_source_type_timer_with_clock = { .dst_timer_flags = 0, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER, .dst_size = sizeof(struct dispatch_timer_source_refs_s), + .dst_strict = true, .dst_create = _dispatch_source_timer_create, .dst_merge_evt = _dispatch_source_merge_evt, diff --git a/src/event/event_config.h b/src/event/event_config.h index 4f4b6e5a3..0b883b035 100644 --- a/src/event/event_config.h +++ b/src/event/event_config.h @@ -148,8 +148,10 @@ # endif // VQ_FREE_SPACE_CHANGE # if !defined(EVFILT_NW_CHANNEL) && defined(__APPLE__) -# define EVFILT_NW_CHANNEL (-16) -# define NOTE_FLOW_ADV_UPDATE 0x1 +# define EVFILT_NW_CHANNEL (-16) +# define NOTE_FLOW_ADV_UPDATE 0x1 +# define NOTE_CHANNEL_EVENT 0x2 +# define NOTE_IF_ADV_UPD 0x4 # endif #else // DISPATCH_EVENT_BACKEND_KEVENT # define EV_ADD 0x0001 @@ -232,12 +234,16 @@ typedef unsigned int mach_msg_priority_t; # define MACH_SEND_SYNC_OVERRIDE 0x00100000 # endif // MACH_SEND_SYNC_OVERRIDE +# ifndef MACH_MSG_STRICT_REPLY +# define MACH_MSG_STRICT_REPLY 0x00000200 +# endif + # ifndef MACH_RCV_SYNC_WAIT # define MACH_RCV_SYNC_WAIT 0x00004000 # endif // MACH_RCV_SYNC_WAIT # define DISPATCH_MACH_TRAILER_SIZE sizeof(dispatch_mach_trailer_t) -# define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX +# define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_AV # define DISPATCH_MACH_RCV_OPTIONS ( \ MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \ diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index f31d13ee0..e3578a095 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -702,6 +702,12 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) } } +void +_dispatch_event_loop_ensure_ownership(dispatch_wlh_t wlh) +{ + (void)wlh; +} + void _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state, uint32_t flags) diff --git a/src/event/event_internal.h b/src/event/event_internal.h index 14c485ee3..305cf931e 100644 --- a/src/event/event_internal.h +++ b/src/event/event_internal.h @@ -99,7 +99,7 @@ typedef struct dispatch_wlh_s *dispatch_wlh_t; // opaque handle #define DISPATCH_WLH_ANON ((dispatch_wlh_t)(void*)(~0x3ul)) #define DISPATCH_WLH_MANAGER ((dispatch_wlh_t)(void*)(~0x7ul)) -DISPATCH_ENUM(dispatch_unote_timer_flags, uint8_t, +DISPATCH_OPTIONS(dispatch_unote_timer_flags, uint8_t, /* DISPATCH_TIMER_STRICT 0x1 */ /* DISPATCH_TIMER_BACKGROUND = 0x2, */ DISPATCH_TIMER_CLOCK_UPTIME = DISPATCH_CLOCK_UPTIME << 2, @@ -130,7 +130,7 @@ typedef uint32_t dispatch_unote_ident_t; #endif #define DISPATCH_UNOTE_CLASS_HEADER() \ - dispatch_source_type_t du_type; \ + dispatch_source_type_t __ptrauth_objc_isa_pointer du_type; \ uintptr_t du_owner_wref; /* "weak" back reference to the owner object */ \ os_atomic(dispatch_unote_state_t) du_state; \ dispatch_unote_ident_t du_ident; \ @@ -251,7 +251,7 @@ void dispatch_debug_machport(mach_port_t name, const char *str); // layout must match dispatch_source_refs_s struct dispatch_mach_recv_refs_s { DISPATCH_UNOTE_CLASS_HEADER(); - dispatch_mach_handler_function_t dmrr_handler_func; + dispatch_mach_handler_function_t DISPATCH_FUNCTION_POINTER dmrr_handler_func; void *dmrr_handler_ctxt; }; typedef struct dispatch_mach_recv_refs_s *dispatch_mach_recv_refs_t; @@ -350,6 +350,7 @@ typedef struct dispatch_source_type_s { dispatch_unote_action_t dst_action; uint8_t dst_per_trigger_qos : 1; uint8_t dst_strict : 1; + uint8_t dst_allow_empty_mask : 1; uint8_t dst_timer_flags; uint16_t dst_flags; #if DISPATCH_EVENT_BACKEND_KEVENT @@ -455,6 +456,34 @@ _dispatch_set_return_to_kernel(void) _dispatch_thread_setspecific(dispatch_r2k_key, (void *)1); } +DISPATCH_ALWAYS_INLINE +static inline uintptr_t +_dispatch_get_quantum_expiry_action(void) +{ + return (uintptr_t) _dispatch_thread_getspecific(dispatch_quantum_key); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_ack_quantum_expiry_action(void) +{ + return _dispatch_thread_setspecific(dispatch_quantum_key, (void *) 0); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_set_current_dsc(void *dsc) +{ + return _dispatch_thread_setspecific(dispatch_dsc_key, dsc); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_clear_current_dsc(void) +{ + return _dispatch_thread_setspecific(dispatch_dsc_key, NULL); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_clear_return_to_kernel(void) @@ -675,6 +704,7 @@ void _dispatch_event_loop_wake_owner(struct dispatch_sync_context_s *dsc, dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state); void _dispatch_event_loop_wait_for_ownership( struct dispatch_sync_context_s *dsc); +void _dispatch_event_loop_ensure_ownership(dispatch_wlh_t wlh); void _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state, uint32_t flags); #if DISPATCH_WLH_DEBUG diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c index 16b69b37d..790d72408 100644 --- a/src/event/event_kevent.c +++ b/src/event/event_kevent.c @@ -323,6 +323,17 @@ _dispatch_kevent_mach_msg_size(dispatch_kevent_t ke) return (mach_msg_size_t)ke->ext[1]; } +static inline bool +_dispatch_kevent_has_machmsg_rcv_error(dispatch_kevent_t ke) +{ +#define MACH_ERROR_RCV_SUB 0x4 + mach_error_t kr = (mach_error_t) ke->fflags; + return (err_get_system(kr) == err_mach_ipc) && + (err_get_sub(kr) == MACH_ERROR_RCV_SUB); +#undef MACH_ERROR_RCV_SUB +} + +static inline bool _dispatch_kevent_has_machmsg_rcv_error(dispatch_kevent_t ke); static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke); static inline void _dispatch_mach_host_calendar_change_register(void); @@ -548,8 +559,8 @@ _dispatch_kevent_drain(dispatch_kevent_t ke) // when the process exists but is a zombie. As a workaround, we // simulate an exit event for any EVFILT_PROC with an invalid pid. ke->flags = EV_UDATA_SPECIFIC | EV_ONESHOT | EV_DELETE; - ke->fflags = NOTE_EXIT; - ke->data = 0; + ke->fflags = NOTE_EXIT | NOTE_EXITSTATUS; + ke->data = 0; // Fake exit status _dispatch_kevent_debug("synthetic NOTE_EXIT", ke); } else { return _dispatch_kevent_print_error(ke); @@ -560,7 +571,8 @@ _dispatch_kevent_drain(dispatch_kevent_t ke) } #if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT && _dispatch_kevent_mach_msg_size(ke)) { + if (ke->filter == EVFILT_MACHPORT && (_dispatch_kevent_mach_msg_size(ke) || + _dispatch_kevent_has_machmsg_rcv_error(ke))) { return _dispatch_kevent_mach_msg_drain(ke); } #endif @@ -777,8 +789,9 @@ _dispatch_kq_drain(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, #if DISPATCH_USE_KEVENT_QOS size_t size; if (poll_for_events) { - size = DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + - DISPATCH_MACH_TRAILER_SIZE; + dispatch_assert(DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + + DISPATCH_MACH_TRAILER_SIZE <= 32 << 10); + size = 32 << 10; // match WQ_KEVENT_DATA_SIZE buf = alloca(size); avail = &size; } @@ -860,7 +873,6 @@ _dispatch_kq_unote_set_kevent(dispatch_unote_t _du, dispatch_kevent_t dk, du->du_priority), #endif }; - (void)pp; // if DISPATCH_USE_KEVENT_QOS == 0 } DISPATCH_ALWAYS_INLINE @@ -947,7 +959,7 @@ void _dispatch_sync_ipc_handoff_begin(dispatch_wlh_t wlh, mach_port_t port, uint64_t _Atomic *addr) { -#ifdef NOTE_WL_SYNC_IPC +#if DISPATCH_USE_WL_SYNC_IPC_HANDOFF dispatch_kevent_s ke = { .ident = port, .filter = EVFILT_WORKLOOP, @@ -959,18 +971,18 @@ _dispatch_sync_ipc_handoff_begin(dispatch_wlh_t wlh, mach_port_t port, .ext[EV_EXTIDX_WL_VALUE] = (uintptr_t)wlh, }; int rc = _dispatch_kq_immediate_update(wlh, &ke); - if (unlikely(rc)) { + if (unlikely(rc && rc != ENOENT)) { DISPATCH_INTERNAL_CRASH(rc, "Unexpected error from kevent"); } #else (void)wlh; (void)port; (void)addr; -#endif +#endif // DISPATCH_USE_WL_SYNC_IPC_HANDOFF } void _dispatch_sync_ipc_handoff_end(dispatch_wlh_t wlh, mach_port_t port) { -#ifdef NOTE_WL_SYNC_IPC +#if DISPATCH_USE_WL_SYNC_IPC_HANDOFF dispatch_kevent_s ke = { .ident = port, .filter = EVFILT_WORKLOOP, @@ -981,7 +993,7 @@ _dispatch_sync_ipc_handoff_end(dispatch_wlh_t wlh, mach_port_t port) _dispatch_kq_deferred_update(wlh, &ke); #else (void)wlh; (void)port; -#endif // NOTE_WL_SYNC_IPC +#endif // DISPATCH_USE_WL_SYNC_IPC_HANDOFF } #endif @@ -1286,14 +1298,13 @@ _dispatch_unote_unregister_direct(dispatch_unote_t du, uint32_t flags) enum { DISPATCH_WORKLOOP_ASYNC, DISPATCH_WORKLOOP_ASYNC_FROM_SYNC, - DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC, DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE, DISPATCH_WORKLOOP_ASYNC_LEAVE, DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC, DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER, - DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP, DISPATCH_WORKLOOP_RETARGET, + DISPATCH_WORKLOOP_SYNC_DISCOVER, DISPATCH_WORKLOOP_SYNC_WAIT, DISPATCH_WORKLOOP_SYNC_WAKE, DISPATCH_WORKLOOP_SYNC_FAKE, @@ -1303,17 +1314,16 @@ enum { static char const * const _dispatch_workloop_actions[] = { [DISPATCH_WORKLOOP_ASYNC] = "async", [DISPATCH_WORKLOOP_ASYNC_FROM_SYNC] = "async (from sync)", - [DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC] = "discover sync", [DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE] = "qos update", [DISPATCH_WORKLOOP_ASYNC_LEAVE] = "leave", [DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC] = "leave (from sync)", [DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER] = "leave (from transfer)", - [DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP] = "leave (forced)", [DISPATCH_WORKLOOP_RETARGET] = "retarget", + [DISPATCH_WORKLOOP_SYNC_DISCOVER] = "sync-discover", [DISPATCH_WORKLOOP_SYNC_WAIT] = "sync-wait", - [DISPATCH_WORKLOOP_SYNC_FAKE] = "sync-fake", [DISPATCH_WORKLOOP_SYNC_WAKE] = "sync-wake", + [DISPATCH_WORKLOOP_SYNC_FAKE] = "sync-fake", [DISPATCH_WORKLOOP_SYNC_END] = "sync-end", }; @@ -1392,6 +1402,11 @@ _dispatch_kevent_workloop_priority(dispatch_queue_t dq, int which, qos = DISPATCH_QOS_MAINTENANCE; } pthread_priority_t pp = _dispatch_qos_to_pp(qos); + + if (rq_pri & DISPATCH_PRIORITY_FLAG_COOPERATIVE) { + DISPATCH_INTERNAL_CRASH(rq_pri, "Waking up a kq with cooperative thread request is not supported"); + } + return pp | (rq_pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); } @@ -1408,11 +1423,8 @@ _dispatch_kq_fill_workloop_event(dispatch_kevent_t ke, int which, uint16_t action = 0; switch (which) { - case DISPATCH_WORKLOOP_ASYNC_FROM_SYNC: - fflags |= NOTE_WL_END_OWNERSHIP; - /* FALLTHROUGH */ case DISPATCH_WORKLOOP_ASYNC: - case DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC: + case DISPATCH_WORKLOOP_ASYNC_FROM_SYNC: case DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE: dispatch_assert(_dq_state_is_base_wlh(dq_state)); dispatch_assert(_dq_state_is_enqueued_on_target(dq_state)); @@ -1420,21 +1432,16 @@ _dispatch_kq_fill_workloop_event(dispatch_kevent_t ke, int which, mask |= DISPATCH_QUEUE_ROLE_MASK; mask |= DISPATCH_QUEUE_ENQUEUED; mask |= DISPATCH_QUEUE_MAX_QOS_MASK; - if (which == DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC) { - dispatch_assert(!_dq_state_in_sync_transfer(dq_state)); - dispatch_assert(_dq_state_drain_locked(dq_state)); - mask |= DISPATCH_QUEUE_SYNC_TRANSFER; + fflags |= NOTE_WL_IGNORE_ESTALE; + fflags |= NOTE_WL_UPDATE_QOS; + if (_dq_state_in_uncontended_sync(dq_state)) { fflags |= NOTE_WL_DISCOVER_OWNER; - } else { - fflags |= NOTE_WL_IGNORE_ESTALE; + mask |= DISPATCH_QUEUE_UNCONTENDED_SYNC; } - fflags |= NOTE_WL_UPDATE_QOS; pp = _dispatch_kevent_workloop_priority(dq, which, qos); break; case DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC: - fflags |= NOTE_WL_END_OWNERSHIP; - /* FALLTHROUGH */ case DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER: fflags |= NOTE_WL_IGNORE_ESTALE; /* FALLTHROUGH */ @@ -1444,18 +1451,6 @@ _dispatch_kq_fill_workloop_event(dispatch_kevent_t ke, int which, mask |= DISPATCH_QUEUE_ENQUEUED; break; - case DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP: - // 0 is never a valid queue state, so the knote attach will fail due to - // the debounce. However, NOTE_WL_END_OWNERSHIP is always observed even - // when ESTALE is returned, which is the side effect we're after here. - fflags |= NOTE_WL_END_OWNERSHIP; - fflags |= NOTE_WL_IGNORE_ESTALE; - action = EV_ADD | EV_ENABLE; - mask = ~0ull; - dq_state = 0; - pp = _dispatch_kevent_workloop_priority(dq, which, qos); - break; - case DISPATCH_WORKLOOP_RETARGET: action = EV_ADD | EV_DELETE | EV_ENABLE; fflags |= NOTE_WL_END_OWNERSHIP; @@ -1507,6 +1502,16 @@ _dispatch_kq_fill_workloop_sync_event(dispatch_kevent_t ke, int which, uint16_t action = 0; switch (which) { + case DISPATCH_WORKLOOP_SYNC_DISCOVER: + dispatch_assert(_dq_state_received_sync_wait(dq_state)); + dispatch_assert(_dq_state_in_uncontended_sync(dq_state)); + action = EV_ADD | EV_DISABLE; + fflags = NOTE_WL_SYNC_WAKE | NOTE_WL_DISCOVER_OWNER | + NOTE_WL_IGNORE_ESTALE; + mask = DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | + DISPATCH_QUEUE_UNCONTENDED_SYNC; + break; + case DISPATCH_WORKLOOP_SYNC_WAIT: action = EV_ADD | EV_DISABLE; fflags = NOTE_WL_SYNC_WAIT; @@ -1514,10 +1519,6 @@ _dispatch_kq_fill_workloop_sync_event(dispatch_kevent_t ke, int which, if (_dispatch_qos_from_pp(pp) == 0) { pp = _dispatch_qos_to_pp(DISPATCH_QOS_DEFAULT); } - if (_dq_state_received_sync_wait(dq_state)) { - fflags |= NOTE_WL_DISCOVER_OWNER; - mask = DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; - } break; case DISPATCH_WORKLOOP_SYNC_FAKE: @@ -1640,9 +1641,6 @@ _dispatch_event_loop_get_action_for_state(uint64_t dq_state) if (!_dq_state_drain_locked(dq_state)) { return DISPATCH_WORKLOOP_ASYNC; } - if (!_dq_state_in_sync_transfer(dq_state)) { - return DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC; - } return DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE; } @@ -1653,6 +1651,13 @@ _dispatch_kevent_workloop_poke_drain(dispatch_kevent_t ke) dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); dispatch_wlh_t wlh = (dispatch_wlh_t)ke->udata; +#if DISPATCH_USE_WL_SYNC_IPC_HANDOFF + if (ke->fflags & NOTE_WL_SYNC_IPC) { + dispatch_assert((ke->flags & EV_ERROR) && ke->data == ENOENT); + return _dispatch_kevent_wlh_debug("ignoring", ke); + } +#endif // DISPATCH_USE_WL_SYNC_IPC_HANDOFF + dispatch_assert(ke->fflags & NOTE_WL_THREAD_REQUEST); if (ke->flags & EV_ERROR) { uint64_t dq_state = ke->ext[EV_EXTIDX_WL_VALUE]; @@ -1709,42 +1714,11 @@ _dispatch_kevent_workloop_poke(dispatch_wlh_t wlh, uint64_t dq_state, dispatch_assert(_dq_state_is_enqueued_on_target(dq_state)); dispatch_assert(!_dq_state_is_enqueued_on_manager(dq_state)); action = _dispatch_event_loop_get_action_for_state(dq_state); -override: _dispatch_kq_fill_workloop_event(&ke, action, wlh, dq_state); if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { - _dispatch_kevent_workloop_drain_error(&ke, - DISPATCH_KEVENT_WORKLOOP_ALLOW_ESTALE); - dispatch_assert(action == DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC); - dq_state = ke.ext[EV_EXTIDX_WL_VALUE]; - // - // There are 4 things that can cause an ESTALE for DISCOVER_SYNC: - // - the queue role changed, we don't want to redrive - // - the queue is no longer enqueued, we don't want to redrive - // - the max QoS changed, whoever changed it is doing the same - // transition, so we don't need to redrive - // - the DISPATCH_QUEUE_IN_SYNC_TRANFER bit got set - // - // The interesting case is the last one, and will only happen in the - // following chain of events: - // 1. uncontended dispatch_sync() - // 2. contended dispatch_sync() - // 3. contended dispatch_async() - // - // And this code is running because of (3). It is possible that (1) - // hands off to (2) while this call is being made, causing the - // DISPATCH_QUEUE_IN_TRANSFER_SYNC to be set, and we don't need to tell - // the kernel about the owner anymore. However, the async in that case - // will have set a QoS on the queue (since dispatch_sync()s don't but - // dispatch_async()s always do), and we need to redrive to tell it - // to the kernel. - // - if (_dq_state_is_base_wlh(dq_state) && - _dq_state_is_enqueued_on_target(dq_state) && - _dq_state_in_sync_transfer(dq_state)) { - action = DISPATCH_WORKLOOP_ASYNC; - goto override; - } + _dispatch_kevent_workloop_drain_error(&ke, 0); + __builtin_unreachable(); } if (!(flags & DISPATCH_EVENT_LOOP_OVERRIDE)) { @@ -1853,7 +1827,6 @@ _dispatch_kevent_workloop_poke_self(dispatch_deferred_items_t ddi, // will continue to apply the overrides in question until we acknowledge // them, so there's no rush. // - ddi->ddi_wlh_needs_update = true; if (flags & DISPATCH_EVENT_LOOP_CONSUME_2) { _dispatch_release_no_dispose(dq); } else { @@ -1869,6 +1842,7 @@ _dispatch_kevent_workloop_poke_self(dispatch_deferred_items_t ddi, } dispatch_assert(!ddi->ddi_stashed_dou._dq); ddi->ddi_wlh_needs_delete = true; + ddi->ddi_wlh_needs_update = true; ddi->ddi_stashed_rq = upcast(dq->do_targetq)._dgq; ddi->ddi_stashed_dou._dq = dq; ddi->ddi_stashed_qos = _dq_state_max_qos(dq_state); @@ -2023,11 +1997,25 @@ _dispatch_event_loop_leave_deferred(dispatch_deferred_items_t ddi, uint64_t dq_state) { #if DISPATCH_USE_KEVENT_WORKLOOP + if (_dq_state_received_sync_wait(dq_state)) { + dispatch_tid tid = _dq_state_drain_owner(dq_state); + int slot = _dispatch_kq_deferred_find_slot(ddi, EVFILT_WORKLOOP, + (uint64_t)ddi->ddi_wlh, tid); + if (slot == ddi->ddi_nevents) { + dispatch_assert(slot < DISPATCH_DEFERRED_ITEMS_EVENT_COUNT); + ddi->ddi_nevents++; + } + _dispatch_kq_fill_workloop_sync_event(&ddi->ddi_eventlist[slot], + DISPATCH_WORKLOOP_SYNC_DISCOVER, ddi->ddi_wlh, + dq_state, _dq_state_drain_owner(dq_state)); + } + int action = _dispatch_event_loop_get_action_for_state(dq_state); dispatch_assert(ddi->ddi_wlh_needs_delete); ddi->ddi_wlh_needs_delete = false; ddi->ddi_wlh_needs_update = false; _dispatch_kq_fill_ddi_workloop_event(ddi, action, ddi->ddi_wlh, dq_state); + #else (void)ddi; (void)dq_state; #endif // DISPATCH_USE_KEVENT_WORKLOOP @@ -2041,11 +2029,24 @@ _dispatch_event_loop_cancel_waiter(dispatch_sync_context_t dsc) uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; dispatch_kevent_s ke; +again: _dispatch_kq_fill_workloop_sync_event(&ke, DISPATCH_WORKLOOP_SYNC_END, wlh, 0, dsc->dsc_waiter); if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { _dispatch_kevent_workloop_drain_error(&ke, dsc->dsc_waiter_needs_cancel ? 0 : DISPATCH_KEVENT_WORKLOOP_ALLOW_ENOENT); + // + // quick hack for 78288114 + // + // something with DISPATCH_WORKLOOP_SYNC_FAKE is not quite right + // we can at least make the thread in the way finish the syscall + // it's trying to make with directed handoffs. + // + // it's inefficient but doesn't have a priority inversion. + // + _dispatch_preemption_yield_to(dsc->dsc_waiter, 1); + goto again; + // // Our deletion attempt is opportunistic as in most cases we will find // the matching knote and break the waiter out. @@ -2079,6 +2080,7 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, int action, n = 0; dispatch_assert(_dq_state_drain_locked_by(new_state, dsc->dsc_waiter)); + dispatch_assert(!dsc->dsc_wlh_self_wakeup); if (wlh != DISPATCH_WLH_ANON && ddi && ddi->ddi_wlh == wlh) { dispatch_assert(ddi->ddi_wlh_needs_delete); @@ -2087,8 +2089,8 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, if (wlh == waiter_wlh) { // async -> sync handoff dispatch_assert(_dq_state_is_enqueued_on_target(old_state)); - dispatch_assert(!_dq_state_in_sync_transfer(old_state)); - dispatch_assert(_dq_state_in_sync_transfer(new_state)); + dispatch_assert(!_dq_state_in_uncontended_sync(old_state)); + dispatch_assert(!_dq_state_in_uncontended_sync(new_state)); if (_dq_state_is_enqueued_on_target(new_state)) { action = DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE; @@ -2111,7 +2113,7 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { dispatch_assert(_dq_state_is_enqueued_on_target(old_state)); - dispatch_assert(_dq_state_in_sync_transfer(new_state)); + dispatch_assert(!_dq_state_in_uncontended_sync(new_state)); // During the handoff, the waiter noticed there was no work *after* // that last work item, so we want to kill the thread request while // there's an owner around to avoid races betwen knote_process() and @@ -2119,7 +2121,7 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER, wlh, new_state); } - if (_dq_state_in_sync_transfer(new_state)) { + if (_dq_state_is_base_wlh(new_state)) { // Even when waiter_wlh != wlh we can pretend we got woken up // which is a knote we will be able to delete later with a SYNC_END. // This allows rectifying incorrect ownership sooner, and also happens @@ -2127,10 +2129,13 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, _dispatch_kq_fill_workloop_sync_event(&ke[n++], DISPATCH_WORKLOOP_SYNC_WAKE, wlh, new_state, dsc->dsc_waiter); } - if (_dq_state_in_sync_transfer(old_state)) { + if (!dsc->dsc_from_async && _dq_state_is_base_wlh(old_state) && + !_dq_state_in_uncontended_sync(old_state)) { + // Note: when coming from dispatch_resume despite having work items + // the caller has an "uncontended sync" ownership dispatch_tid tid = _dispatch_tid_self(); _dispatch_kq_fill_workloop_sync_event(&ke[n++], - DISPATCH_WORKLOOP_SYNC_END, wlh, new_state, tid); + DISPATCH_WORKLOOP_SYNC_END, wlh, old_state, tid); } // // Past this call it is not safe to look at `wlh` anymore as the callers @@ -2162,7 +2167,7 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) int i, n = 0; dq_state = os_atomic_load2o((dispatch_queue_t)wlh, dq_state, relaxed); - if (dsc->dsc_wlh_was_first && !_dq_state_drain_locked(dq_state) && + if (!_dq_state_drain_locked(dq_state) && _dq_state_is_enqueued_on_target(dq_state)) { // // @@ -2183,8 +2188,19 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) // lower priority thread, so we need to drive it once to avoid priority // inversions. // + // + // + // Also, it is possible that a low priority async is ahead of us, + // and hasn't made its thread request yet. If this waiter is high + // priority this is a priority inversion, and we need to redrive the + // async. + // _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC, wlh, dq_state); + } else if (_dq_state_received_sync_wait(dq_state)) { + _dispatch_kq_fill_workloop_sync_event(&ke[n++], + DISPATCH_WORKLOOP_SYNC_DISCOVER, wlh, dq_state, + _dq_state_drain_owner(dq_state)); } again: @@ -2194,8 +2210,7 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) for (i = 0; i < n; i++) { long flags = 0; if (ke[i].fflags & NOTE_WL_SYNC_WAIT) { - flags = DISPATCH_KEVENT_WORKLOOP_ALLOW_EINTR | - DISPATCH_KEVENT_WORKLOOP_ALLOW_ESTALE; + flags = DISPATCH_KEVENT_WORKLOOP_ALLOW_EINTR; } _dispatch_kevent_workloop_drain_error(&ke[i], flags); } @@ -2216,6 +2231,25 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) } } +void +_dispatch_event_loop_ensure_ownership(dispatch_wlh_t wlh) +{ +#if DISPATCH_USE_KEVENT_WORKLOOP + uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; + dispatch_tid tid = _dispatch_tid_self(); + dispatch_kevent_s ke; + + _dispatch_kq_fill_workloop_sync_event(&ke, DISPATCH_WORKLOOP_SYNC_WAKE, + wlh, tid, tid); + if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { + _dispatch_kevent_workloop_drain_error(&ke, 0); + __builtin_unreachable(); + } +#else + (void)wlh; +#endif +} + void _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state, uint32_t flags) @@ -2223,7 +2257,6 @@ _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, #if DISPATCH_USE_KEVENT_WORKLOOP uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; dispatch_kevent_s ke[2]; - bool needs_forceful_end_ownership = false; int n = 0; dispatch_assert(_dq_state_is_base_wlh(new_state)); @@ -2231,50 +2264,15 @@ _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC_FROM_SYNC, wlh, new_state); } else if (_dq_state_is_enqueued_on_target(old_state)) { - // - // Because the thread request knote may not - // have made it, DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC may silently - // turn into a no-op. - // - // However, the kernel may know about our ownership anyway, so we need - // to make sure it is forcefully ended. - // - needs_forceful_end_ownership = true; dispatch_assert(_dq_state_is_suspended(new_state)); _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC, wlh, new_state); - } else if (_dq_state_received_sync_wait(old_state)) { - // - // This case happens when the current workloop got waited on by some - // thread calling _dispatch_event_loop_wait_for_ownership. - // - // When the workloop became IDLE, it didn't find the sync waiter - // continuation, didn't have a thread request to cancel either, and so - // we need the kernel to forget about the current thread ownership - // of the workloop. - // - // To forget this ownership, we create a fake WAKE knote that can not - // coalesce with any meaningful one, just so that we can EV_DELETE it - // with the NOTE_WL_END_OWNERSHIP. - // - // This is a gross hack, but this will really only ever happen for - // cases where a sync waiter started to wait on a workloop, but his part - // of the graph got mutated and retargeted onto a different workloop. - // In doing so, that sync waiter has snitched to the kernel about - // ownership, and the workloop he's bogusly waiting on will go through - // this codepath. - // - needs_forceful_end_ownership = true; } - if (_dq_state_in_sync_transfer(old_state)) { + if (!_dq_state_in_uncontended_sync(old_state)) { dispatch_tid tid = _dispatch_tid_self(); _dispatch_kq_fill_workloop_sync_event(&ke[n++], DISPATCH_WORKLOOP_SYNC_END, wlh, new_state, tid); - } else if (needs_forceful_end_ownership) { - kev_flags |= KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST; - _dispatch_kq_fill_workloop_event(&ke[n++], - DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP, wlh, new_state); } if (_dispatch_kq_poll(wlh, ke, n, ke, n, NULL, NULL, kev_flags)) { @@ -2418,7 +2416,6 @@ const dispatch_source_type_s _dispatch_source_type_proc = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_proc_create, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2439,7 +2436,6 @@ const dispatch_source_type_s _dispatch_source_type_vnode = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2457,6 +2453,9 @@ const dispatch_source_type_s _dispatch_source_type_vfs = { #if HAVE_DECL_VQ_VERYLOWDISK |VQ_VERYLOWDISK #endif +#if HAVE_DECL_VQ_SERVEREVENT + |VQ_SERVEREVENT +#endif #if HAVE_DECL_VQ_QUOTA |VQ_QUOTA #endif @@ -2472,7 +2471,6 @@ const dispatch_source_type_s _dispatch_source_type_vfs = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_without_handle, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2498,7 +2496,6 @@ const dispatch_source_type_s _dispatch_source_type_sock = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2510,10 +2507,9 @@ const dispatch_source_type_s _dispatch_source_type_nw_channel = { .dst_kind = "nw_channel", .dst_filter = EVFILT_NW_CHANNEL, .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED, - .dst_mask = NOTE_FLOW_ADV_UPDATE, + .dst_mask = NOTE_FLOW_ADV_UPDATE|NOTE_CHANNEL_EVENT|NOTE_IF_ADV_UPD, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2572,6 +2568,8 @@ _dispatch_memorypressure_handler(void *context) } } +DISPATCH_STATIC_GLOBAL(dispatch_source_t _dispatch_memorypressure_source); + static void _dispatch_memorypressure_init(void) { @@ -2580,6 +2578,7 @@ _dispatch_memorypressure_init(void) DISPATCH_MEMORYPRESSURE_SOURCE_MASK, _dispatch_mgr_q._as_dq); dispatch_set_context(ds, ds); dispatch_source_set_event_handler_f(ds, _dispatch_memorypressure_handler); + _dispatch_memorypressure_source = ds; dispatch_activate(ds); } #endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE @@ -2593,7 +2592,7 @@ _dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED) if (!e) return; _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY); if (_dispatch_ios_simulator_memory_warnings_fd == -1) { - (void)dispatch_assume_zero(errno); + DISPATCH_INTERNAL_CRASH(errno, "Failed to create fd to simulator memory pressure file"); } } @@ -2610,8 +2609,12 @@ _dispatch_source_memorypressure_create(dispatch_source_type_t dst, dst = &_dispatch_source_type_vnode; handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd; + if (handle < 0) { + return DISPATCH_UNOTE_NULL; + } mask = NOTE_ATTRIB; + dispatch_unote_t du = dux_create(dst, handle, mask); if (du._du) { du._du->du_memorypressure_override = true; @@ -2631,7 +2634,6 @@ const dispatch_source_type_s _dispatch_source_type_memorypressure = { |NOTE_MEMORYSTATUS_MSL_STATUS, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, #if TARGET_OS_SIMULATOR .dst_create = _dispatch_source_memorypressure_create, @@ -2662,7 +2664,6 @@ const dispatch_source_type_s _dispatch_source_type_vm = { .dst_mask = NOTE_VM_PRESSURE, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_vm_create, // redirected to _dispatch_source_type_memorypressure @@ -2682,6 +2683,7 @@ static void _dispatch_mach_host_notify_update(void *context); DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mach_notify_port_pred); DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mach_calendar_pred); DISPATCH_STATIC_GLOBAL(mach_port_t _dispatch_mach_notify_port); +DISPATCH_STATIC_GLOBAL(dispatch_unote_t _dispatch_mach_notify_unote); static void _dispatch_timers_calendar_change(void) @@ -2713,6 +2715,18 @@ _dispatch_mach_msg_get_audit_trailer(mach_msg_header_t *hdr) return audit_tlr; } +bool +_dispatch_mach_msg_sender_is_kernel(mach_msg_header_t *hdr) +{ + mach_msg_audit_trailer_t *tlr; + tlr = _dispatch_mach_msg_get_audit_trailer(hdr); + if (!tlr) { + DISPATCH_INTERNAL_CRASH(0, "message received without expected trailer"); + } + + return tlr->msgh_audit.val[DISPATCH_MACH_AUDIT_TOKEN_PID] == 0; +} + DISPATCH_NOINLINE static void _dispatch_mach_notification_merge_msg(dispatch_unote_t du, uint32_t flags, @@ -2721,18 +2735,12 @@ _dispatch_mach_notification_merge_msg(dispatch_unote_t du, uint32_t flags, pthread_priority_t ovr_pp DISPATCH_UNUSED) { mig_reply_error_t reply; - mach_msg_audit_trailer_t *tlr = NULL; dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); dispatch_assert(sizeof(mig_reply_error_t) < DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE); - tlr = _dispatch_mach_msg_get_audit_trailer(hdr); - if (!tlr) { - DISPATCH_INTERNAL_CRASH(0, "message received without expected trailer"); - } if (hdr->msgh_id <= MACH_NOTIFY_LAST && - dispatch_assume_zero(tlr->msgh_audit.val[ - DISPATCH_MACH_AUDIT_TOKEN_PID])) { + !dispatch_assume(_dispatch_mach_msg_sender_is_kernel(hdr))) { mach_msg_destroy(hdr); goto out; } @@ -2785,6 +2793,7 @@ _dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED) dispatch_assume(_dispatch_unote_register(du, DISPATCH_WLH_ANON, DISPATCH_PRIORITY_FLAG_MANAGER)); + _dispatch_mach_notify_unote = du; } static void @@ -3076,7 +3085,6 @@ const dispatch_source_type_s _dispatch_source_type_mach_send = { .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_mach_send_create, .dst_update_mux = _dispatch_mach_send_update, @@ -3104,7 +3112,6 @@ const dispatch_source_type_s _dispatch_mach_type_send = { .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_send_refs_s), - .dst_strict = false, .dst_create = _dispatch_mach_send_create, .dst_update_mux = _dispatch_mach_send_update, @@ -3143,20 +3150,27 @@ static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) { mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke); + mach_msg_size_t siz = _dispatch_kevent_mach_msg_size(ke); dispatch_unote_t du = _dispatch_kevent_get_unote(ke); pthread_priority_t msg_pp = (pthread_priority_t)(ke->ext[2] >> 32); pthread_priority_t ovr_pp = (pthread_priority_t)ke->qos; uint32_t flags = ke->flags; - mach_msg_size_t siz; mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; - if (unlikely(!hdr)) { - DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); - } - if (likely(!kr)) { - return _dispatch_kevent_mach_msg_recv(du, flags, hdr, msg_pp, ovr_pp); - } - if (kr != MACH_RCV_TOO_LARGE) { + if (unlikely(kr == MACH_RCV_TOO_LARGE)) { + if (unlikely(!siz)) { + DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message size"); + } + } else if (unlikely(kr == MACH_RCV_INVALID_DATA)) { + dispatch_assert(siz == 0); + DISPATCH_CLIENT_CRASH(kr, "Unable to copyout msg, possible port leak"); + } else { + if (unlikely(!hdr)) { + DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); + } + if (likely(!kr)) { + return _dispatch_kevent_mach_msg_recv(du, flags, hdr, msg_pp, ovr_pp); + } goto out; } @@ -3167,9 +3181,14 @@ _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) DISPATCH_INTERNAL_CRASH(ke->ext[1], "EVFILT_MACHPORT with overlarge message"); } + + mach_msg_options_t extra_options = 0; + if (du._du->du_fflags & MACH_MSG_STRICT_REPLY) { + extra_options |= MACH_MSG_STRICT_REPLY; + } const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | - MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); - siz = _dispatch_kevent_mach_msg_size(ke) + DISPATCH_MACH_TRAILER_SIZE; + MACH_RCV_TIMEOUT | extra_options) & ~MACH_RCV_LARGE); + siz += DISPATCH_MACH_TRAILER_SIZE; hdr = malloc(siz); // mach_msg will return TOO_LARGE if hdr/siz is NULL/0 kr = mach_msg(hdr, options, 0, dispatch_assume(hdr) ? siz : 0, (mach_port_name_t)ke->data, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); @@ -3199,15 +3218,20 @@ const dispatch_source_type_s _dispatch_source_type_mach_recv = { .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, .dst_fflags = 0, + .dst_mask = 0 +#ifdef MACH_RCV_SYNC_PEEK + | MACH_RCV_SYNC_PEEK +#endif + , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_source_merge_evt, .dst_merge_msg = NULL, // never receives messages directly .dst_per_trigger_qos = true, + .dst_allow_empty_mask = true, }; static void @@ -3221,10 +3245,9 @@ const dispatch_source_type_s _dispatch_mach_type_notification = { .dst_kind = "mach_notification", .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, - .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS & ~MACH_RCV_VOUCHER, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_unote_class_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_mach_notification_event, @@ -3238,7 +3261,7 @@ _dispatch_mach_recv_direct_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data, pthread_priority_t pp) { if (flags & EV_VANISHED) { - DISPATCH_CLIENT_CRASH(du._du->du_ident, + DISPATCH_CLIENT_CRASH(0, "Unexpected EV_VANISHED (do not destroy random mach ports)"); } return _dispatch_source_merge_evt(du, flags, data, pp); @@ -3251,7 +3274,6 @@ const dispatch_source_type_s _dispatch_mach_type_recv = { .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_recv_refs_s), - .dst_strict = false, // without handle because the mach code will set the ident after connect .dst_create = _dispatch_unote_create_without_handle, @@ -3263,21 +3285,28 @@ const dispatch_source_type_s _dispatch_mach_type_recv = { DISPATCH_NORETURN static void -_dispatch_mach_reply_merge_evt(dispatch_unote_t du, - uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED, +_dispatch_mach_reply_merge_evt(dispatch_unote_t du DISPATCH_UNUSED, + uint32_t flags, uintptr_t data, pthread_priority_t pp DISPATCH_UNUSED) { - DISPATCH_INTERNAL_CRASH(du._du->du_ident, "Unexpected event"); + if (flags & EV_VANISHED) { + DISPATCH_CLIENT_CRASH(0, + "Unexpected EV_VANISHED (do not destroy random mach ports)"); + } +#if __LP64__ + data = (uintptr_t)(kern_return_t)data; + data |= (uintptr_t)flags << 32; +#endif + DISPATCH_INTERNAL_CRASH(data, "Unexpected event"); } const dispatch_source_type_s _dispatch_mach_type_reply = { .dst_kind = "mach reply", .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_ONESHOT|EV_VANISHED, - .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS & ~MACH_RCV_VOUCHER, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_reply_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_mach_reply_merge_evt, @@ -3293,7 +3322,6 @@ const dispatch_source_type_s _dispatch_xpc_type_sigterm = { .dst_fflags = 0, .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_xpc_term_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_xpc_sigterm_merge_evt, diff --git a/src/event/event_windows.c b/src/event/event_windows.c index ce322258a..94674a3bf 100644 --- a/src/event/event_windows.c +++ b/src/event/event_windows.c @@ -359,12 +359,11 @@ _dispatch_socket_callback(PTP_CALLBACK_INSTANCE inst, void *context, DWORD dwBytesAvailable = 1; if (lNetworkEvents & FD_CLOSE) { dwBytesAvailable = 0; - // Post to all registered read and write handlers - lNetworkEvents |= FD_READ | FD_WRITE; } else if (lNetworkEvents & FD_READ) { ioctlsocket(sock, FIONREAD, &dwBytesAvailable); } - if (lNetworkEvents & FD_READ) { + if ((lNetworkEvents & FD_CLOSE) || + ((lNetworkEvents & FD_READ) && (dwBytesAvailable > 0))) { _dispatch_muxnote_retain(dmn); if (!PostQueuedCompletionStatus(hPort, dwBytesAvailable, (ULONG_PTR)DISPATCH_PORT_SOCKET_READ, (LPOVERLAPPED)dmn)) { @@ -372,9 +371,10 @@ _dispatch_socket_callback(PTP_CALLBACK_INSTANCE inst, void *context, "PostQueuedCompletionStatus"); } } - if (lNetworkEvents & FD_WRITE) { + if ((lNetworkEvents & FD_CLOSE) || (lNetworkEvents & FD_WRITE)) { _dispatch_muxnote_retain(dmn); - if (!PostQueuedCompletionStatus(hPort, dwBytesAvailable, + if (!PostQueuedCompletionStatus(hPort, + lNetworkEvents & FD_CLOSE ? 0 : 1, (ULONG_PTR)DISPATCH_PORT_SOCKET_WRITE, (LPOVERLAPPED)dmn)) { DISPATCH_INTERNAL_CRASH(GetLastError(), "PostQueuedCompletionStatus"); diff --git a/src/event/workqueue.c b/src/event/workqueue.c index 28f167517..afc82c02a 100644 --- a/src/event/workqueue.c +++ b/src/event/workqueue.c @@ -243,7 +243,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED) int i, target_runnable = (int)dispatch_hw_config(active_cpus); foreach_qos_bucket_reverse(i) { dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i]; - mon->dq = _dispatch_get_root_queue(DISPATCH_QOS_FOR_BUCKET(i), false); + mon->dq = _dispatch_get_root_queue(DISPATCH_QOS_FOR_BUCKET(i), 0); void *buf = _dispatch_calloc(WORKQ_MAX_TRACKED_TIDS, sizeof(dispatch_tid)); mon->registered_tids = buf; mon->target_runnable = target_runnable; diff --git a/src/eventlink.c b/src/eventlink.c new file mode 100644 index 000000000..ffba90002 --- /dev/null +++ b/src/eventlink.c @@ -0,0 +1,555 @@ +/* + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#include + +#if OS_EVENTLINK_USE_MACH_EVENTLINK + +OS_OBJECT_CLASS_DECL(os_eventlink); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(os_eventlink, + (void (*)(_os_object_t))_os_eventlink_xref_dispose, + (void (*)(_os_object_t))_os_eventlink_dispose); +#endif // USE_OBJC +#define EVENTLINK_CLASS OS_OBJECT_VTABLE(os_eventlink) + +/* Convenience macros for accessing into the struct os_eventlink_s */ +#define ev_local_port port_pair.pair[0] +#define ev_remote_port port_pair.pair[1] +#define ev_port_pair port_pair.desc + +#pragma mark Internal functions + +void +_os_eventlink_xref_dispose(os_eventlink_t ev) { + return _os_object_release_internal(ev->_as_os_obj); +} + +void +_os_eventlink_dispose(os_eventlink_t ev) { + if (ev->ev_state & OS_EVENTLINK_LABEL_NEEDS_FREE) { + free((void *) ev->name); + } + + if (MACH_PORT_VALID(ev->ev_local_port)) { + mach_port_deallocate(mach_task_self(), ev->ev_local_port); + } + if (MACH_PORT_VALID(ev->ev_remote_port)) { + mach_port_deallocate(mach_task_self(), ev->ev_remote_port); + } +} + +static inline os_eventlink_t +_os_eventlink_create_internal(const char *name) +{ + os_eventlink_t ev = NULL; + ev = (os_eventlink_t) _os_object_alloc(EVENTLINK_CLASS, + sizeof(struct os_eventlink_s)); + if (ev == NULL) { + errno = ENOMEM; + return NULL; + } + + if (name) { + const char *tmp = _dispatch_strdup_if_mutable(name); + if (tmp != name) { + ev->ev_state |= OS_EVENTLINK_LABEL_NEEDS_FREE; + } + ev->name = tmp; + } + + return ev; +} + +static inline int +_mach_error_to_errno(kern_return_t kr) +{ + int ret = 0; + + switch (kr) { + case KERN_NAME_EXISTS: + ret = EALREADY; + break; + case KERN_INVALID_ARGUMENT: + ret = EINVAL; + break; + case KERN_OPERATION_TIMED_OUT: + ret = ETIMEDOUT; + break; + case KERN_INVALID_NAME: + /* This is most likely due to waiting on a cancelled eventlink but also + * possible to hit this if there is a bug and a double free of the port. */ + case KERN_TERMINATED: /* Other side died */ + ret = ECANCELED; + break; + case KERN_ABORTED: + ret = ECONNABORTED; + break; + case KERN_SUCCESS: + ret = 0; + break; + default: + return -1; + } + + errno = ret; + return ret; +} + +static uint64_t +_os_clockid_normalize_to_machabs(os_clockid_t inclock, uint64_t intimeout) +{ + uint64_t timeout = 0; + + switch (inclock) { + case OS_CLOCK_MACH_ABSOLUTE_TIME: + timeout = intimeout; + break; + } + + return timeout; +} + +static int +os_eventlink_wait_until_internal(os_eventlink_t ev, os_clockid_t clock, + uint64_t deadline, uint64_t *signals_consumed_out) +{ + int ret = 0; + os_assert(clock == OS_CLOCK_MACH_ABSOLUTE_TIME); + + // These checks are racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + kern_return_t kr = KERN_SUCCESS; + uint64_t count_to_exceed = ev->local_count; + + kr = mach_eventlink_wait_until(ev->ev_local_port, &ev->local_count, + MELSW_OPTION_NONE, KERN_CLOCK_MACH_ABSOLUTE_TIME, deadline); + if (kr == KERN_SUCCESS && (signals_consumed_out != NULL)) { + *signals_consumed_out = ev->local_count - count_to_exceed; + } else if (kr == KERN_INVALID_NAME) { + /* This means that the eventlink got cancelled after the cancel check + * above but before we waited --> assert that that is indeed the case */ + os_assert(_os_eventlink_is_cancelled(ev->ev_state)); + } + + return _mach_error_to_errno(kr); +} + +static int +os_eventlink_signal_and_wait_until_internal(os_eventlink_t ev, os_clockid_t clock, + uint64_t deadline, uint64_t * _Nullable signals_consumed_out) +{ + int ret = 0; + kern_return_t kr = KERN_SUCCESS; + os_assert(clock == OS_CLOCK_MACH_ABSOLUTE_TIME); + + // These checks are racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + uint64_t count_to_exceed = ev->local_count; + kr = mach_eventlink_signal_wait_until(ev->ev_local_port, &ev->local_count, 0, + MELSW_OPTION_NONE, KERN_CLOCK_MACH_ABSOLUTE_TIME, deadline); + + if (kr == KERN_SUCCESS && (signals_consumed_out != NULL)) { + *signals_consumed_out = ev->local_count - count_to_exceed; + } else if (kr == KERN_INVALID_NAME) { + /* This means that the eventlink got cancelled after the cancel check + * above but before we signal and waited --> assert that that is indeed + * the case */ + os_assert(_os_eventlink_is_cancelled(ev->ev_state)); + } + + return _mach_error_to_errno(kr); +} + + +#pragma mark Private functions + +os_eventlink_t +os_eventlink_create(const char *name) +{ + return _os_eventlink_create_internal(name); +} + +int +os_eventlink_activate(os_eventlink_t ev) +{ + int ret = 0; + + // These checks are racy but allow us to shortcircuit before we make the syscall + if (MACH_PORT_VALID(ev->ev_local_port)) { + return ret; + } + + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + struct os_eventlink_s tmp_ev; + bzero(&tmp_ev, sizeof(tmp_ev)); + + kern_return_t kr = mach_eventlink_create(mach_task_self(), MELC_OPTION_NO_COPYIN, &tmp_ev.ev_local_port); + if (kr == KERN_SUCCESS) { + // Only atomically store the new ports if we have + // EVENTLINK_INACTIVE_PORT there. The only reason this would fail is + // cause it was concurrently activated. + uint64_t dummy; + bool success = os_atomic_cmpxchgv(&ev->ev_port_pair, EVENTLINK_INACTIVE_PORT, tmp_ev.ev_port_pair, &dummy, relaxed); + if (!success) { + // tmp_ev still has valid ports that need to be released + if (MACH_PORT_VALID(tmp_ev.ev_local_port)) { + mach_port_deallocate(mach_task_self(), tmp_ev.ev_local_port); + } + if (MACH_PORT_VALID(tmp_ev.ev_remote_port)) { + mach_port_deallocate(mach_task_self(), tmp_ev.ev_remote_port); + } + return EINVAL; + } + } + + return _mach_error_to_errno(kr); +} + +int +os_eventlink_extract_remote_port(os_eventlink_t ev, mach_port_t *port_out) +{ + int ret = 0; + + // These checks are racy but allows us to shortcircuit and give the right + // errors + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + /* We're giving away our +1 to the remote port */ + mach_port_t port = os_atomic_xchg(&ev->ev_remote_port, EVENTLINK_CLEARED_PORT, relaxed); + if (!MACH_PORT_VALID(port)) { + errno = ret = EINVAL; + return ret; + } + *port_out = port; + + return ret; +} + +os_eventlink_t +os_eventlink_create_with_port(const char *name, mach_port_t port) +{ + os_eventlink_t ev = _os_eventlink_create_internal(name); + if (ev == NULL) { + return NULL; + } + /* Take our own +1 on the port */ + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); + os_assert(kr == KERN_SUCCESS); + + os_assert(ev->ev_local_port == EVENTLINK_INACTIVE_PORT); + ev->ev_local_port = port; + return ev; +} + +os_eventlink_t +os_eventlink_create_remote_with_eventlink(const char *name, os_eventlink_t template) +{ + mach_port_t mp; + int ret = os_eventlink_extract_remote_port(template, &mp); + if (ret) { + errno = ret; + return NULL; + } + + os_eventlink_t ev = os_eventlink_create_with_port(name, mp); + + /* os_eventlink_create_with_port doesn't consume the right it was given, we + * should release our reference */ + mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_SEND, -1); + + return ev; +} + +int +os_eventlink_associate(os_eventlink_t ev, os_eventlink_associate_options_t + options) +{ + int ret = 0; + + // These checks are racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + mach_eventlink_associate_option_t mela_options; + mela_options = (options == OE_ASSOCIATE_ON_WAIT) ? + MELA_OPTION_ASSOCIATE_ON_WAIT : MELA_OPTION_NONE; + mach_port_t thread_port = (options == OE_ASSOCIATE_ON_WAIT) ? MACH_PORT_NULL : _dispatch_thread_port(); + + kern_return_t kr = KERN_SUCCESS; + kr = mach_eventlink_associate(ev->ev_local_port, thread_port, 0, 0, 0, 0, mela_options); + return _mach_error_to_errno(kr); +} + +int +os_eventlink_disassociate(os_eventlink_t ev) +{ + int ret = 0; + + // These checks are racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + /* Don't bother call mach_eventlink_disassociate since the backing + * eventlink object in the kernel will be gone */ + return ret; + } + + /* TODO: Track the associated thread in the eventlink object and error out + * in user space if the thread calling disassociate isn't the same thread. + * The kernel doesn't enforce this */ + kern_return_t kr = KERN_SUCCESS; + kr = mach_eventlink_disassociate(ev->ev_local_port, MELD_OPTION_NONE); + + if (kr == KERN_TERMINATED) { + /* Absorb this error in libdispatch, knowing that the other side died + * first is not helpful here */ + return 0; + } + + return _mach_error_to_errno(kr); +} + + +int +os_eventlink_wait_until(os_eventlink_t ev, os_clockid_t clock, + uint64_t timeout, uint64_t *signals_consumed_out) +{ + uint64_t machabs_timeout = _os_clockid_normalize_to_machabs(clock, timeout); + + /* Convert timeout to deadline */ + return os_eventlink_wait_until_internal(ev, clock, mach_absolute_time() + machabs_timeout, + signals_consumed_out); +} + +int +os_eventlink_wait(os_eventlink_t ev, uint64_t *signals_consumed_out) +{ + /* Passing in deadline = 0 means wait forever */ + return os_eventlink_wait_until_internal(ev, OS_CLOCK_MACH_ABSOLUTE_TIME, 0, + signals_consumed_out); +} + +int +os_eventlink_signal(os_eventlink_t ev) +{ + int ret = 0; + + // This is racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + kern_return_t kr = KERN_SUCCESS; + kr = mach_eventlink_signal(ev->ev_local_port, 0); + + return _mach_error_to_errno(kr); +} + +int +os_eventlink_signal_and_wait(os_eventlink_t ev, uint64_t *signals_consumed_out) +{ + /* Passing in deadline = 0 means wait forever */ + return os_eventlink_signal_and_wait_until_internal(ev, OS_CLOCK_MACH_ABSOLUTE_TIME, 0, + signals_consumed_out); +} + +int +os_eventlink_signal_and_wait_until(os_eventlink_t ev, os_clockid_t clock, + uint64_t timeout, uint64_t * _Nullable signals_consumed_out) +{ + uint64_t machabs_timeout = _os_clockid_normalize_to_machabs(clock, timeout); + + /* Converts timeout to deadline */ + return os_eventlink_signal_and_wait_until_internal(ev, clock, mach_absolute_time() + machabs_timeout, + signals_consumed_out); +} + +void +os_eventlink_cancel(os_eventlink_t ev) +{ + if (_os_eventlink_is_cancelled(ev->ev_state)) { + return; + } + + os_atomic_or(&ev->ev_state, OS_EVENTLINK_CANCELLED, relaxed); + + + mach_port_t p = ev->ev_local_port; + if (MACH_PORT_VALID(p)) { + /* mach_eventlink_destroy consumes a ref on the ports. We therefore take + * +1 on the local port so that other threads using the ev_local_port have valid + * ports even if it isn't backed by an eventlink object. The last ref of + * the port in the eventlink object will be dropped in xref dispose */ + kern_return_t kr = mach_port_mod_refs(mach_task_self(), p, MACH_PORT_RIGHT_SEND, 1); + os_assert(kr == KERN_SUCCESS); + mach_eventlink_destroy(p); + } + + // If the remote port was valid, then we already called destroy on the + // local port and we don't need to call it again on the remote port. We keep + // the reference we already have on the remote port (if any) and deallocate + // it in xref dispose + +} + +#else /* OS_EVENTLINK_USE_MACH_EVENTLINK */ +#pragma mark Simulator + +void +_os_eventlink_dispose(os_eventlink_t __unused ev) { +} + +os_eventlink_t +os_eventlink_create(const char * __unused name) +{ + return NULL; +} + +int +os_eventlink_activate(os_eventlink_t __unused ev) +{ + return ENOTSUP; +} + +int +os_eventlink_extract_remote_port(os_eventlink_t __unused eventlink, mach_port_t *port_out) +{ + *port_out = MACH_PORT_NULL; + return ENOTSUP; +} + +os_eventlink_t +os_eventlink_create_with_port(const char * __unused name, mach_port_t __unused mach_port) +{ + errno = ENOTSUP; + return NULL; +} + +os_eventlink_t +os_eventlink_create_remote_with_eventlink(const char * __unused name, os_eventlink_t __unused eventlink) +{ + errno = ENOTSUP; + return NULL; +} + +int +os_eventlink_associate(os_eventlink_t __unused eventlink, os_eventlink_associate_options_t __unused options) +{ + int ret = errno = ENOTSUP; + return ret; +} + +int +os_eventlink_disassociate(os_eventlink_t __unused eventlink) +{ + int ret = errno = ENOTSUP; + return ret; +} + +int +os_eventlink_wait(os_eventlink_t __unused eventlink, uint64_t * _Nullable signals_consumed_out) +{ + int ret = errno = ENOTSUP; + *signals_consumed_out = 0; + return ret; +} + +int +os_eventlink_wait_until(os_eventlink_t __unused eventlink, os_clockid_t __unused clock, + uint64_t __unused timeout, uint64_t * _Nullable signals_consumed_out) +{ + int ret = errno = ENOTSUP; + *signals_consumed_out = 0; + return ret; +} + +int +os_eventlink_signal(os_eventlink_t __unused eventlink) +{ + int ret = errno = ENOTSUP; + return ret; +} + +int +os_eventlink_signal_and_wait(os_eventlink_t __unused eventlink, uint64_t * _Nullable signals_consumed_out) +{ + int ret = errno = ENOTSUP; + *signals_consumed_out = 0; + return ret; +} + +int +os_eventlink_signal_and_wait_until(os_eventlink_t __unused eventlink, os_clockid_t __unused clock, + uint64_t __unused timeout, uint64_t * _Nullable signals_consumed_out) +{ + int ret = errno = ENOTSUP; + *signals_consumed_out = 0; + return ret; +} + +void +os_eventlink_cancel(os_eventlink_t __unused ev) +{ +} + +#endif /* OS_EVENTLINK_USE_MACH_EVENTLINK */ diff --git a/src/eventlink_internal.h b/src/eventlink_internal.h new file mode 100644 index 000000000..4c8f0d288 --- /dev/null +++ b/src/eventlink_internal.h @@ -0,0 +1,67 @@ +// +// eventlink_internal.h +// libdispatch +// +// Created by Rokhini Prabhu on 12/13/19. +// + +#ifndef __OS_EVENTLINK_INTERNAL__ +#define __OS_EVENTLINK_INTERNAL__ + +#if OS_EVENTLINK_USE_MACH_EVENTLINK +#include +#endif + +#define OS_EVENTLINK_LABEL_NEEDS_FREE 0x1ull +#define OS_EVENTLINK_CANCELLED 0x2ull + +union eventlink_internal { + mach_port_t pair[2]; + uint64_t desc; +}; + +struct os_eventlink_s { + struct _os_object_s _as_os_obj[0]; + OS_OBJECT_STRUCT_HEADER(eventlink); + + const char *name; + uint64_t ev_state; + + /* Note: We use the union which allows us to write to both local and remote + * port atomically during activate and cancellation APIs. The combination of + * the state of the local_port as well as the ev_state tells us the state of + * the eventlink + * + * local_port = EVENTLINK_INACTIVE_PORT means that it hasn't been created yet. + * local_port = a valid mach port means that it has been created. + * + * If the OS_EVENTLINK_CANCELLED bit is set, that means that the port does + * not point to a valid kernel eventlink object. + * + * The ref of the ports are only dropped when the last external ref is + * dropped. + */ + union eventlink_internal port_pair; + + uint64_t local_count; +}; + +#define EVENTLINK_INACTIVE_PORT ((uint64_t) 0) +#define EVENTLINK_CLEARED_PORT ((uint64_t) 0) + +static inline bool +_os_eventlink_inactive(mach_port_t port) +{ + return port == EVENTLINK_INACTIVE_PORT; +} + +static inline bool +_os_eventlink_is_cancelled(uint64_t ev_state) +{ + return (ev_state & OS_EVENTLINK_CANCELLED) == OS_EVENTLINK_CANCELLED; +} + +void _os_eventlink_xref_dispose(os_eventlink_t ev); +void _os_eventlink_dispose(os_eventlink_t ev); + +#endif /* __OS_EVENTLINK_INTERNAL */ diff --git a/src/firehose/firehose.defs b/src/firehose/firehose.defs index 83d46ef03..0f62d3adb 100644 --- a/src/firehose/firehose.defs +++ b/src/firehose/firehose.defs @@ -28,6 +28,7 @@ serverprefix firehose_server_; userprefix firehose_send_; UseSpecialReplyPort 1; +ConsumeOnSendError Timeout; simpleroutine register( server_port : mach_port_t; diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c index 4631755d1..a79053c9d 100644 --- a/src/firehose/firehose_buffer.c +++ b/src/firehose/firehose_buffer.c @@ -18,7 +18,19 @@ * @APPLE_APACHE_LICENSE_HEADER_END@ */ +#include #include // VM_MEMORY_GENEALOGY + +#ifndef __LP64__ +// libdispatch has too many Double-Wide loads for this to be practical +// so just rename everything to the wide variants +#undef os_atomic_load +#define os_atomic_load os_atomic_load_wide + +#undef os_atomic_store +#define os_atomic_store os_atomic_store_wide +#endif + #ifdef KERNEL #define OS_VOUCHER_ACTIVITY_SPI_TYPES 1 @@ -26,8 +38,12 @@ #define __OS_EXPOSE_INTERNALS_INDIRECT__ 1 #define DISPATCH_PURE_C 1 +#ifndef os_likely #define os_likely(x) __builtin_expect(!!(x), 1) +#endif +#ifndef os_unlikely #define os_unlikely(x) __builtin_expect(!!(x), 0) +#endif #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) @@ -74,7 +90,6 @@ static void _dispatch_firehose_gate_wait(dispatch_gate_t l, uint32_t flags); #include #include #include -#include // os/internal/atomic.h #include // #include // #include // @@ -295,7 +310,7 @@ firehose_buffer_update_limits_unlocked(firehose_buffer_t fb) if (old.fbs_atomic_state == new.fbs_atomic_state) { return; } - os_atomic_add2o(&fb->fb_header, fbh_bank.fbb_state.fbs_atomic_state, + os_atomic_add(&fb->fb_header.fbh_bank.fbb_state.fbs_atomic_state, new.fbs_atomic_state - old.fbs_atomic_state, relaxed); } #endif // !KERNEL @@ -511,11 +526,11 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, #endif } - if (firehose_atomic_maxv2o(fbh, fbh_bank.fbb_mem_flushed, + if (firehose_atomic_maxv(&fbh->fbh_bank.fbb_mem_flushed, reply.fpr_mem_flushed_pos, &old_flushed_pos, relaxed)) { mem_delta = (uint16_t)(reply.fpr_mem_flushed_pos - old_flushed_pos); } - if (firehose_atomic_maxv2o(fbh, fbh_bank.fbb_io_flushed, + if (firehose_atomic_maxv(&fbh->fbh_bank.fbb_io_flushed, reply.fpr_io_flushed_pos, &old_flushed_pos, relaxed)) { io_delta = (uint16_t)(reply.fpr_io_flushed_pos - old_flushed_pos); } @@ -527,14 +542,14 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, if (!mem_delta && !io_delta) { if (state_out) { - state_out->fbs_atomic_state = os_atomic_load2o(fbh, - fbh_bank.fbb_state.fbs_atomic_state, relaxed); + state_out->fbs_atomic_state = os_atomic_load( + &fbh->fbh_bank.fbb_state.fbs_atomic_state, relaxed); } return; } __firehose_critical_region_enter(); - os_atomic_rmw_loop2o(fbh, fbh_ring_tail.frp_atomic_tail, + os_atomic_rmw_loop(&fbh->fbh_ring_tail.frp_atomic_tail, otail.frp_atomic_tail, ntail.frp_atomic_tail, relaxed, { ntail = otail; // overflow handles the generation wraps @@ -544,18 +559,18 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) | ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1)); - state.fbs_atomic_state = os_atomic_add2o(fbh, - fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release); + state.fbs_atomic_state = os_atomic_add( + &fbh->fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release); __firehose_critical_region_leave(); if (state_out) *state_out = state; if (async_notif) { if (io_delta) { - os_atomic_inc2o(fbh, fbh_bank.fbb_io_notifs, relaxed); + os_atomic_add(&fbh->fbh_bank.fbb_io_notifs, 1u, relaxed); } if (mem_delta) { - os_atomic_inc2o(fbh, fbh_bank.fbb_mem_notifs, relaxed); + os_atomic_add(&fbh->fbh_bank.fbb_mem_notifs, 1u, relaxed); } } } @@ -676,8 +691,8 @@ firehose_client_send_push_and_wait(firehose_buffer_t fb, bool for_io, } if (state_out) { - state_out->fbs_atomic_state = os_atomic_load2o(&fb->fb_header, - fbh_bank.fbb_state.fbs_atomic_state, relaxed); + state_out->fbs_atomic_state = os_atomic_load( + &fb->fb_header.fbh_bank.fbb_state.fbs_atomic_state, relaxed); } return; @@ -689,9 +704,9 @@ firehose_client_send_push_and_wait(firehose_buffer_t fb, bool for_io, } if (for_io) { - os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_sync_pushes, relaxed); + os_atomic_inc(&fb->fb_header.fbh_bank.fbb_io_sync_pushes, relaxed); } else { - os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_mem_sync_pushes, relaxed); + os_atomic_inc(&fb->fb_header.fbh_bank.fbb_mem_sync_pushes, relaxed); } // TODO // @@ -808,7 +823,7 @@ firehose_buffer_chunk_init(firehose_chunk_t fc, stamp_and_len = stamp - fc->fc_timestamp; stamp_and_len |= (uint64_t)flp_size << 48; - os_atomic_store2o(*lft, ft_stamp_and_length, stamp_and_len, relaxed); + os_atomic_store(&(*lft)->ft_stamp_and_length, stamp_and_len, relaxed); (*lft)->ft_thread = thread; // not really meaningful @@ -828,7 +843,7 @@ firehose_buffer_chunk_init(firehose_chunk_t fc, // write the length before making the chunk visible stamp_and_len = ask->stamp - fc->fc_timestamp; stamp_and_len |= (uint64_t)ask->pubsize << 48; - os_atomic_store2o(ft, ft_stamp_and_length, stamp_and_len, relaxed); + os_atomic_store(&ft->ft_stamp_and_length, stamp_and_len, relaxed); ft->ft_thread = thread; @@ -863,7 +878,7 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, bool installed = false; firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref); - if (fc->fc_pos.fcp_atomic_pos) { + if (os_atomic_load(&fc->fc_pos.fcp_atomic_pos, relaxed)) { // Needed for process death handling (recycle-reuse): // No atomic fences required, we merely want to make sure the // observers will see memory effects in program (asm) order. @@ -880,7 +895,7 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, dispatch_compiler_barrier(); if (ask->stream == firehose_stream_metadata) { - os_atomic_or2o(fbh, fbh_bank.fbb_metadata_bitmap, 1ULL << ref, + os_atomic_or(&fbh->fbh_bank.fbb_metadata_bitmap, 1ULL << ref, relaxed); } @@ -898,13 +913,13 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, // event needs to be placed at the beginning of the chunk in addition to // the first actual tracepoint. state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); if (likely(!state.fss_loss)) { ft = firehose_buffer_chunk_init(fc, ask, privptr, thread, NULL, 0); // release to publish the chunk init - installed = os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + installed = os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, release, { if (state.fss_loss) { os_atomic_rmw_loop_give_up(break); @@ -921,14 +936,14 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, uint64_t loss_start, loss_end; // ensure we can see the start stamp - (void)os_atomic_load2o(fbs, fbs_state.fss_atomic_state, acquire); + (void)os_atomic_load(&fbs->fbs_state.fss_atomic_state, acquire); loss_start = fbs->fbs_loss_start; fbs->fbs_loss_start = 0; // reset under fss_gate loss_end = mach_continuous_time(); ft = firehose_buffer_chunk_init(fc, ask, privptr, thread, &lft, loss_start); - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, release, { // no giving up this time! new_state = (firehose_stream_state_u){ @@ -952,19 +967,19 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, } }; // publish the contents of the loss tracepoint - os_atomic_store2o(lft, ft_id.ftid_atomic_value, ftid.ftid_value, + os_atomic_store(&lft->ft_id.ftid_atomic_value, ftid.ftid_value, release); } } else { // the allocator gave up - just clear the allocator and waiter bits and // increment the loss count state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); if (!state.fss_timestamped) { fbs->fbs_loss_start = mach_continuous_time(); // release to publish the timestamp - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, release, { new_state = (firehose_stream_state_u){ @@ -975,7 +990,7 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, }; }); } else { - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, relaxed, { new_state = (firehose_stream_state_u){ @@ -1004,9 +1019,9 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, firehose_buffer_update_limits(fb); } - if (unlikely(os_atomic_load2o(fbh, fbh_quarantined_state, relaxed) == + if (unlikely(os_atomic_load(&fbh->fbh_quarantined_state, relaxed) == FBH_QUARANTINE_PENDING)) { - if (os_atomic_cmpxchg2o(fbh, fbh_quarantined_state, + if (os_atomic_cmpxchg(&fbh->fbh_quarantined_state, FBH_QUARANTINE_PENDING, FBH_QUARANTINE_STARTED, relaxed)) { firehose_client_start_quarantine(fb); } @@ -1190,7 +1205,7 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) firehose_chunk_t fc; bool for_io; - os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, + os_atomic_rmw_loop(&fb->fb_header.fbh_ring_tail.frp_atomic_tail, old.frp_atomic_tail, pos.frp_atomic_tail, relaxed, { pos = old; if (likely(old.frp_mem_tail != old.frp_mem_flushed)) { @@ -1228,13 +1243,13 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) fc = firehose_buffer_ref_to_chunk(fb, ref); if (!for_io && fc->fc_pos.fcp_stream == firehose_stream_metadata) { - os_atomic_and2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, + os_atomic_and(&fb->fb_header.fbh_bank.fbb_metadata_bitmap, ~(1ULL << ref), relaxed); } - os_atomic_store2o(fc, fc_pos.fcp_atomic_pos, + os_atomic_store(&fc->fc_pos.fcp_atomic_pos, FIREHOSE_CHUNK_POS_FULL_BIT, relaxed); dispatch_compiler_barrier(); - os_atomic_store(&fbh_ring[tail], gen | 0, relaxed); + os_atomic_store(&fbh_ring[tail], gen, relaxed); return ref; } @@ -1256,7 +1271,7 @@ firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(firehose_buffer_t f // first wait for our bank to have space, if needed if (unlikely(!ask->is_bank_ok)) { state.fbs_atomic_state = - os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); + os_atomic_load(&fbb->fbb_state.fbs_atomic_state, relaxed); while (!firehose_buffer_bank_try_reserve_slot(fb, for_io, &state)) { if (ask->quarantined) { __FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(fb, for_io, @@ -1334,7 +1349,7 @@ firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, #endif // KERNEL state.fbs_atomic_state = - os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); + os_atomic_load(&fbb->fbb_state.fbs_atomic_state, relaxed); reserved = firehose_buffer_bank_try_reserve_slot(fb, for_io, &state); #ifndef KERNEL @@ -1415,13 +1430,26 @@ __firehose_buffer_tracepoint_flush(firehose_tracepoint_t ft, return firehose_buffer_tracepoint_flush(kernel_firehose_buffer, ft, ftid); } -void +bool __firehose_merge_updates(firehose_push_reply_t update) { firehose_buffer_t fb = kernel_firehose_buffer; + bool has_more = false; + uint16_t head; + if (likely(fb)) { + firehose_buffer_header_t fbh = &fb->fb_header; firehose_client_merge_updates(fb, true, update, false, NULL); + head = os_atomic_load(&fbh->fbh_ring_io_head, relaxed); + if (head != (uint16_t)os_atomic_load(&fbh->fbh_bank.fbb_io_flushed, relaxed)) { + has_more = true; + } + head = os_atomic_load(&fbh->fbh_ring_mem_head, relaxed); + if (head != (uint16_t)os_atomic_load(&fbh->fbh_bank.fbb_mem_flushed, relaxed)) { + has_more = true; + } } + return has_more; } int diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h index a2c80c2b7..ea7632801 100644 --- a/src/firehose/firehose_inline_internal.h +++ b/src/firehose/firehose_inline_internal.h @@ -26,14 +26,14 @@ __typeof__(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) #endif -#define firehose_atomic_maxv2o(p, f, v, o, m) \ - os_atomic_rmw_loop2o(p, f, *(o), (v), m, { \ +#define firehose_atomic_maxv(p, v, o, m) \ + os_atomic_rmw_loop(p, *(o), (v), m, { \ if (*(o) >= (v)) os_atomic_rmw_loop_give_up(break); \ }) -#define firehose_atomic_max2o(p, f, v, m) ({ \ - _os_atomic_basetypeof(&(p)->f) _old; \ - firehose_atomic_maxv2o(p, f, v, &_old, m); \ +#define firehose_atomic_max(p, v, m) ({ \ + _os_atomic_basetypeof(p) _old; \ + firehose_atomic_maxv(p, v, &_old, m); \ }) #ifndef KERNEL @@ -134,6 +134,7 @@ firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz, } if (unlikely(rc != KERN_SUCCESS && rc != MIG_NO_REPLY)) { // destroy the request - but not the reply port + // (MIG moved it into the msg_reply). hdr->msgh_remote_port = 0; mach_msg_destroy(hdr); } @@ -168,6 +169,7 @@ firehose_buffer_ref_to_chunk(firehose_buffer_t fb, firehose_chunk_ref_t ref) #ifndef FIREHOSE_SERVER #if DISPATCH_PURE_C +#if OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY OS_ALWAYS_INLINE static inline void firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) @@ -180,7 +182,7 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) long result; old_state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); ref = old_state.fss_current; if (!ref || ref == FIREHOSE_STREAM_STATE_PRISTINE) { // there is no installed page, nothing to flush, go away @@ -206,11 +208,11 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) // allocators know how to handle in the first place new_state = old_state; new_state.fss_current = 0; - (void)os_atomic_cmpxchg2o(fbs, fbs_state.fss_atomic_state, + (void)os_atomic_cmpxchg(&fbs->fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, relaxed); } -/** +/*! * @function firehose_buffer_tracepoint_reserve * * @abstract @@ -262,9 +264,9 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, long result; firehose_chunk_ref_t ref; - // cannot use os_atomic_rmw_loop2o, _page_try_reserve does a store + // cannot use os_atomic_rmw_loop, _page_try_reserve does a store old_state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); for (;;) { new_state = old_state; @@ -297,7 +299,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, new_state.fss_loss = MIN(old_state.fss_loss + 1, FIREHOSE_LOSS_COUNT_MAX); - success = os_atomic_cmpxchgv2o(fbs, fbs_state.fss_atomic_state, + success = os_atomic_cmpxchgv(&fbs->fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, &old_state.fss_atomic_state, relaxed); if (success) { @@ -318,7 +320,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, waited = true; old_state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); #else if (likely(reliable)) { new_state.fss_allocator |= FIREHOSE_GATE_RELIABLE_WAITERS_BIT; @@ -328,8 +330,8 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, bool already_equal = (new_state.fss_atomic_state == old_state.fss_atomic_state); - success = already_equal || os_atomic_cmpxchgv2o(fbs, - fbs_state.fss_atomic_state, old_state.fss_atomic_state, + success = already_equal || os_atomic_cmpxchgv( + &fbs->fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, &old_state.fss_atomic_state, relaxed); if (success) { @@ -341,8 +343,8 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, DLOCK_LOCK_DATA_CONTENTION); waited = true; - old_state.fss_atomic_state = os_atomic_load2o(fbs, - fbs_state.fss_atomic_state, relaxed); + old_state.fss_atomic_state = os_atomic_load( + &fbs->fbs_state.fss_atomic_state, relaxed); } #endif continue; @@ -354,11 +356,11 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, // firehose_buffer_stream_chunk_install()) __firehose_critical_region_enter(); #if KERNEL - new_state.fss_allocator = (uint32_t)cpu_number(); + new_state.fss_allocator = 1; #else new_state.fss_allocator = _dispatch_lock_value_for_self(); #endif - success = os_atomic_cmpxchgv2o(fbs, fbs_state.fss_atomic_state, + success = os_atomic_cmpxchgv(&fbs->fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, &old_state.fss_atomic_state, relaxed); if (likely(success)) { @@ -388,7 +390,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, return firehose_buffer_tracepoint_reserve_slow(fb, &ask, privptr); } -/** +/*! * @function firehose_buffer_tracepoint_flush * * @abstract @@ -440,7 +442,7 @@ firehose_buffer_bank_try_reserve_slot(firehose_buffer_t fb, bool for_io, new_state = old_state; new_state.fbs_banks[for_io]--; - success = os_atomic_cmpxchgvw(&fbb->fbb_state.fbs_atomic_state, + success = os_atomic_cmpxchgv(&fbb->fbb_state.fbs_atomic_state, old_state.fbs_atomic_state, new_state.fbs_atomic_state, &old_state.fbs_atomic_state, acquire); } while (unlikely(!success)); @@ -448,6 +450,7 @@ firehose_buffer_bank_try_reserve_slot(firehose_buffer_t fb, bool for_io, *state_in_out = new_state; return true; } +#endif // OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY #ifndef KERNEL OS_ALWAYS_INLINE @@ -459,17 +462,18 @@ firehose_buffer_stream_signal_waiting_for_logd(firehose_buffer_t fb, firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); if (!state.fss_timestamped) { fbs->fbs_loss_start = mach_continuous_time(); // release to publish the timestamp - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, release, { new_state = (firehose_stream_state_u){ .fss_allocator = (state.fss_allocator & ~FIREHOSE_GATE_UNRELIABLE_WAITERS_BIT), + .fss_current = state.fss_current, .fss_loss = state.fss_loss, .fss_timestamped = true, .fss_waiting_for_logd = true, @@ -477,12 +481,13 @@ firehose_buffer_stream_signal_waiting_for_logd(firehose_buffer_t fb, }; }); } else { - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, relaxed, { new_state = (firehose_stream_state_u){ .fss_allocator = (state.fss_allocator & ~FIREHOSE_GATE_UNRELIABLE_WAITERS_BIT), + .fss_current = state.fss_current, .fss_loss = state.fss_loss, .fss_timestamped = true, .fss_waiting_for_logd = true, @@ -506,7 +511,7 @@ firehose_buffer_clear_bank_flags(firehose_buffer_t fb, unsigned long bits) firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; unsigned long orig_flags; - orig_flags = os_atomic_and_orig2o(fbb, fbb_flags, ~bits, relaxed); + orig_flags = os_atomic_and_orig(&fbb->fbb_flags, ~bits, relaxed); if (orig_flags != (orig_flags & ~bits)) { firehose_buffer_update_limits(fb); } @@ -519,7 +524,7 @@ firehose_buffer_set_bank_flags(firehose_buffer_t fb, unsigned long bits) firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; unsigned long orig_flags; - orig_flags = os_atomic_or_orig2o(fbb, fbb_flags, bits, relaxed); + orig_flags = os_atomic_or_orig(&fbb->fbb_flags, bits, relaxed); if (orig_flags != (orig_flags | bits)) { firehose_buffer_update_limits(fb); } @@ -530,7 +535,7 @@ static inline void firehose_buffer_bank_relinquish_slot(firehose_buffer_t fb, bool for_io) { firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; - os_atomic_add2o(fbb, fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), + os_atomic_add(&fbb->fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), relaxed); } #endif // !KERNEL diff --git a/src/firehose/firehose_reply.defs b/src/firehose/firehose_reply.defs index caef7b43e..b5737c030 100644 --- a/src/firehose/firehose_reply.defs +++ b/src/firehose/firehose_reply.defs @@ -27,6 +27,7 @@ subsystem firehoseReply 11700; serverprefix firehose_client_; userprefix firehose_send_; +ConsumeOnSendError Timeout; skip; // firehose_register diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c index a674c8fc8..11e3f3fa8 100644 --- a/src/firehose/firehose_server.c +++ b/src/firehose/firehose_server.c @@ -41,6 +41,7 @@ static struct firehose_server_s { dispatch_mach_t fs_mach_channel; dispatch_queue_t fs_snapshot_gate_queue; dispatch_queue_t fs_io_drain_queue; + dispatch_workloop_t fs_io_wl; dispatch_queue_t fs_mem_drain_queue; firehose_handler_t fs_handler; @@ -212,9 +213,9 @@ firehose_client_notify(firehose_client_t fc, mach_port_t reply_port) }; kern_return_t kr; - firehose_atomic_max2o(fc, fc_mem_sent_flushed_pos, + firehose_atomic_max(&fc->fc_mem_sent_flushed_pos, push_reply.fpr_mem_flushed_pos, relaxed); - firehose_atomic_max2o(fc, fc_io_sent_flushed_pos, + firehose_atomic_max(&fc->fc_io_sent_flushed_pos, push_reply.fpr_io_flushed_pos, relaxed); if (!fc->fc_pid) { @@ -291,6 +292,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags firehose_buffer_t fb = fc->fc_buffer; firehose_chunk_t fbc; firehose_event_t evt; + firehose_snapshot_event_t sevt; uint16_t volatile *fbh_ring; uint16_t flushed, count = 0; firehose_chunk_ref_t ref; @@ -300,6 +302,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags if (for_io) { evt = FIREHOSE_EVENT_IO_BUFFER_RECEIVED; + sevt = FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER; _Static_assert(FIREHOSE_EVENT_IO_BUFFER_RECEIVED == FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, ""); fbh_ring = fb->fb_header.fbh_io_ring; @@ -308,6 +311,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags if (fc->fc_needs_io_snapshot) snapshot = server_config.fs_snapshot; } else { evt = FIREHOSE_EVENT_MEM_BUFFER_RECEIVED; + sevt = FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER; _Static_assert(FIREHOSE_EVENT_MEM_BUFFER_RECEIVED == FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, ""); fbh_ring = fb->fb_header.fbh_mem_ring; @@ -353,7 +357,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags } server_config.fs_handler(fc, evt, fbc, fc_pos); if (unlikely(snapshot)) { - snapshot->handler(fc, evt, fbc, fc_pos); + snapshot->handler(fc, sevt, fbc, fc_pos); } if (fc_pos.fcp_stream == firehose_stream_metadata) { os_unfair_lock_unlock(&fc->fc_lock); @@ -491,7 +495,7 @@ firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED) } fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS; fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS; - _os_object_release(&fc->fc_as_os_object); + _os_object_release_without_xref_dispose(&fc->fc_object_header); } OS_NOINLINE @@ -614,15 +618,15 @@ firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, break; } - msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); - if (msg_hdr->msgh_id == MACH_NOTIFY_NO_SENDERS) { - _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)", - firehose_client_get_unique_pid(fc, NULL)); - for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { - dispatch_mach_cancel(fc->fc_mach_channel[i]); - } + mach_msg_destroy(dispatch_mach_msg_get_msg(dmsg, NULL)); + break; + + case DISPATCH_MACH_NO_SENDERS: + _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)", + firehose_client_get_unique_pid(fc, NULL)); + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + dispatch_mach_cancel(fc->fc_mach_channel[i]); } - mach_msg_destroy(msg_hdr); break; case DISPATCH_MACH_DISCONNECTED: @@ -760,7 +764,8 @@ firehose_client_create(firehose_buffer_t fb, firehose_token_t token, server_config.fs_mem_drain_queue, server_config.fs_io_drain_queue }; - fc->fc_mach_channel_refcnt = FIREHOSE_BUFFER_NPUSHPORTS; + + os_atomic_init(&fc->fc_mach_channel_refcnt, FIREHOSE_BUFFER_NPUSHPORTS); for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { fc->fc_recvp[i] = recvp[i]; firehose_mach_port_guard(fc->fc_recvp[i], true, &fc->fc_recvp[i]); @@ -829,13 +834,6 @@ _firehose_client_dispose(firehose_client_t fc) (firehose_chunk_pos_u){ .fcp_pos = 0 }); } -void -_firehose_client_xref_dispose(firehose_client_t fc) -{ - _dispatch_debug("Cleaning up client info for unique_pid 0x%llx", - firehose_client_get_unique_pid(fc, NULL)); -} - uint64_t firehose_client_get_unique_pid(firehose_client_t fc, pid_t *pid_out) { @@ -911,7 +909,8 @@ firehose_server_init(mach_port_t comm_port, firehose_handler_t handler) { struct firehose_server_s *fs = &server_config; dispatch_queue_attr_t attr = DISPATCH_QUEUE_SERIAL_WITH_AUTORELEASE_POOL; - dispatch_queue_attr_t attr_inactive, attr_utility_inactive; + dispatch_queue_attr_t attr_inactive = + dispatch_queue_attr_make_initially_inactive(attr); dispatch_mach_t dm; dispatch_source_t ds; @@ -921,14 +920,12 @@ firehose_server_init(mach_port_t comm_port, firehose_handler_t handler) fs->fs_snapshot_gate_queue = dispatch_queue_create_with_target( "com.apple.firehose.snapshot-gate", attr, NULL); - attr_inactive = dispatch_queue_attr_make_initially_inactive(attr); - attr_utility_inactive = dispatch_queue_attr_make_with_qos_class( - attr_inactive, QOS_CLASS_UTILITY, 0); + fs->fs_io_wl = dispatch_workloop_create_inactive("com.apple.firehose.io-wl"); + dispatch_set_qos_class_fallback(fs->fs_io_wl, QOS_CLASS_UTILITY); + dispatch_activate(fs->fs_io_wl); fs->fs_io_drain_queue = dispatch_queue_create_with_target( - "com.apple.firehose.drain-io", attr_utility_inactive, NULL); - dispatch_set_qos_class_fallback(fs->fs_io_drain_queue, QOS_CLASS_UTILITY); - dispatch_activate(fs->fs_io_drain_queue); + "com.apple.firehose.drain-io", attr, (dispatch_queue_t)fs->fs_io_wl); fs->fs_mem_drain_queue = dispatch_queue_create_with_target( "com.apple.firehose.drain-mem", attr_inactive, NULL); @@ -1058,6 +1055,9 @@ firehose_server_copy_queue(firehose_server_queue_t which) case FIREHOSE_SERVER_QUEUE_MEMORY: dq = server_config.fs_mem_drain_queue; break; + case FIREHOSE_SERVER_QUEUE_IO_WL: + dq = (dispatch_queue_t)server_config.fs_io_wl; + break; default: DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type"); } @@ -1337,20 +1337,6 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, return KERN_INVALID_VALUE; } - /* - * Request a MACH_NOTIFY_NO_SENDERS notification for the mem_recvp. That - * should indicate the client going away. - */ - mach_port_t previous = MACH_PORT_NULL; - kr = mach_port_request_notification(mach_task_self(), comm_mem_recvp, - MACH_NOTIFY_NO_SENDERS, 0, comm_mem_recvp, - MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) { - return KERN_FAILURE; - } - dispatch_assert(previous == MACH_PORT_NULL); - /* Map the memory handle into the server address space */ kr = mach_vm_map(mach_task_self(), &base_addr, mem_size, 0, VM_FLAGS_ANYWHERE, mem_port, 0, FALSE, @@ -1381,6 +1367,12 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, fc = firehose_client_create((firehose_buffer_t)base_addr, (firehose_token_t)&atoken, comm_mem_recvp, comm_io_recvp, comm_sendp); + /* + * Request a no senders notification for the memory channel. + * That should indicate the client going away. + */ + dispatch_mach_notify_no_senders( + fc->fc_mach_channel[FIREHOSE_BUFFER_PUSHPORT_MEM], true); firehose_client_resume(fc, &fcci); if (fcci.fcci_size) { diff --git a/src/firehose/firehose_server_internal.h b/src/firehose/firehose_server_internal.h index 571cc2a0e..c3ea87982 100644 --- a/src/firehose/firehose_server_internal.h +++ b/src/firehose/firehose_server_internal.h @@ -31,10 +31,7 @@ struct firehose_snapshot_s { }; struct firehose_client_s { - union { - _OS_OBJECT_HEADER(void *os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); - struct _os_object_s fc_as_os_object; - }; + struct _os_object_s fc_object_header; TAILQ_ENTRY(firehose_client_s) fc_entry; struct firehose_client_s *volatile fc_next[2]; @@ -80,8 +77,6 @@ struct firehose_client_s { bool volatile fc_quarantined; } DISPATCH_ATOMIC64_ALIGN; -void -_firehose_client_xref_dispose(struct firehose_client_s *fc); void _firehose_client_dispose(struct firehose_client_s *fc); diff --git a/src/firehose/firehose_server_object.m b/src/firehose/firehose_server_object.m index 6965ca0f5..c5243c149 100644 --- a/src/firehose/firehose_server_object.m +++ b/src/firehose/firehose_server_object.m @@ -24,20 +24,15 @@ #error the firehose server requires the objc-runtime, no ARC #endif +OS_OBJECT_NONLAZY_CLASS @implementation OS_OBJECT_CLASS(firehose_client) +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() -+ (void)load { } -- (void)_xref_dispose -{ - _firehose_client_xref_dispose((struct firehose_client_s *)self); - [super _xref_dispose]; -} - -- (void)_dispose +- (void)dealloc { _firehose_client_dispose((struct firehose_client_s *)self); - [super _dispose]; + [super dealloc]; } - (NSString *)debugDescription diff --git a/src/firehose/firehose_types.defs b/src/firehose/firehose_types.defs index 9462fd808..56f60957b 100644 --- a/src/firehose/firehose_types.defs +++ b/src/firehose/firehose_types.defs @@ -21,6 +21,7 @@ #include #include +import ; import ; import ; diff --git a/src/init.c b/src/init.c index f4c3bae4c..08f790828 100644 --- a/src/init.c +++ b/src/init.c @@ -40,7 +40,6 @@ #pragma mark - #pragma mark dispatch_init - #if USE_LIBDISPATCH_INIT_CONSTRUCTOR DISPATCH_NOTHROW __attribute__((constructor)) void @@ -67,6 +66,7 @@ void dispatch_atfork_parent(void) { _os_object_atfork_parent(); + _voucher_atfork_parent(); } DISPATCH_EXPORT DISPATCH_NOTHROW @@ -142,8 +142,11 @@ pthread_key_t dispatch_bcounter_key; pthread_key_t dispatch_wlh_key; pthread_key_t dispatch_voucher_key; pthread_key_t dispatch_deferred_items_key; +pthread_key_t dispatch_enqueue_key; +pthread_key_t os_workgroup_key; #endif // !DISPATCH_USE_DIRECT_TSD && !DISPATCH_USE_THREAD_LOCAL_STORAGE + #if VOUCHER_USE_MACH_VOUCHER dispatch_once_t _voucher_task_mach_voucher_pred; mach_voucher_t _voucher_task_mach_voucher; @@ -157,6 +160,10 @@ uint64_t _voucher_unique_pid; voucher_activity_hooks_t _voucher_libtrace_hooks; dispatch_mach_t _voucher_activity_debug_channel; #endif + +dispatch_once_t _voucher_process_can_use_arbitrary_personas_pred; +bool _voucher_process_can_use_arbitrary_personas = false; + #if HAVE_PTHREAD_WORKQUEUE_QOS && DISPATCH_DEBUG bool _dispatch_set_qos_class_enabled; #endif @@ -166,7 +173,7 @@ bool _dispatch_kevent_workqueue_enabled = 1; DISPATCH_HW_CONFIG(); uint8_t _dispatch_unsafe_fork; -uint8_t _dispatch_mode; +uint8_t _dispatch_mode = DISPATCH_MODE_NO_FAULTS; bool _dispatch_child_of_unsafe_fork; #if DISPATCH_USE_MEMORYPRESSURE_SOURCE bool _dispatch_memory_warn; @@ -300,10 +307,12 @@ static struct dispatch_pthread_root_queue_context_s // renaming this symbol struct dispatch_queue_global_s _dispatch_root_queues[] = { #define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \ - ((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \ + (((flags) & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \ DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \ - DISPATCH_ROOT_QUEUE_IDX_##n##_QOS) -#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \ + (((flags) & DISPATCH_PRIORITY_FLAG_COOPERATIVE) ? \ + DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_COOPERATIVE : \ + DISPATCH_ROOT_QUEUE_IDX_##n##_QOS)) +#define _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(n, flags, ...) \ [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), \ .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \ @@ -314,83 +323,127 @@ struct dispatch_queue_global_s _dispatch_root_queues[] = { _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \ __VA_ARGS__ \ } - _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0, + +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE +#define _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(n, flags, ...) \ + [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ + DISPATCH_GLOBAL_OBJECT_HEADER(queue_cooperative), \ + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \ + .do_ctxt = NULL, \ + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \ + .dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \ + _dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \ + _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \ + __VA_ARGS__ \ + } +#else /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ + /* We initialize the rest of the fields in + * _dispatch_cooperative_root_queue_init_fallback */ +#define _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(n, flags, ...) \ + [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ + .do_vtable = DISPATCH_VTABLE(queue_concurrent), \ + .dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \ + _dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \ + _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \ + __VA_ARGS__ \ + } +#endif /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ + + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(MAINTENANCE, 0, .dq_label = "com.apple.root.maintenance-qos", .dq_serialnum = 4, ), - _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.maintenance-qos.overcommit", .dq_serialnum = 5, ), - _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0, - .dq_label = "com.apple.root.background-qos", + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.maintenance-qos.cooperative", .dq_serialnum = 6, ), - _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, - .dq_label = "com.apple.root.background-qos.overcommit", + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(BACKGROUND, 0, + .dq_label = "com.apple.root.background-qos", .dq_serialnum = 7, ), - _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0, - .dq_label = "com.apple.root.utility-qos", + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + .dq_label = "com.apple.root.background-qos.overcommit", .dq_serialnum = 8, ), - _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, - .dq_label = "com.apple.root.utility-qos.overcommit", + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.background-qos.cooperative", .dq_serialnum = 9, ), - _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK, - .dq_label = "com.apple.root.default-qos", + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(UTILITY, 0, + .dq_label = "com.apple.root.utility-qos", .dq_serialnum = 10, ), - _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + .dq_label = "com.apple.root.utility-qos.overcommit", + .dq_serialnum = 11, + ), + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.utility-qos.cooperative", + .dq_serialnum = 12, + ), + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK, + .dq_label = "com.apple.root.default-qos", + .dq_serialnum = 13, + ), + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.default-qos.overcommit", - .dq_serialnum = 11, + .dq_serialnum = 14, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0, + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(DEFAULT, + DISPATCH_PRIORITY_FLAG_COOPERATIVE | DISPATCH_PRIORITY_FLAG_FALLBACK, + .dq_label = "com.apple.root.default-qos.cooperative", + .dq_serialnum = 15, + ), + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(USER_INITIATED, 0, .dq_label = "com.apple.root.user-initiated-qos", - .dq_serialnum = 12, + .dq_serialnum = 16, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.user-initiated-qos.overcommit", - .dq_serialnum = 13, + .dq_serialnum = 17, + ), + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.user-initiated-qos.cooperative", + .dq_serialnum = 18, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0, .dq_label = "com.apple.root.user-interactive-qos", - .dq_serialnum = 14, + .dq_serialnum = 19, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.user-interactive-qos.overcommit", - .dq_serialnum = 15, + .dq_serialnum = 20, + ), + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.user-interactive-qos.cooperative", + .dq_serialnum = 21, ), }; -unsigned long volatile _dispatch_queue_serial_numbers = - DISPATCH_QUEUE_SERIAL_NUMBER_INIT; - +__dispatch_is_array(_dispatch_root_queues); +_Static_assert(sizeof(_dispatch_root_queues) == + sizeof(struct dispatch_queue_global_s) * DISPATCH_ROOT_QUEUE_COUNT, + "_dispatch_root_queues array size mismatch"); -dispatch_queue_global_t -dispatch_get_global_queue(intptr_t priority, uintptr_t flags) -{ - dispatch_assert(countof(_dispatch_root_queues) == - DISPATCH_ROOT_QUEUE_COUNT); +const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { + DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, + .do_ctxt = NULL, + .dq_label = "com.apple.root.workloop-custom", + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), + .dq_priority = _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT) | + DISPATCH_PRIORITY_SATURATED_OVERRIDE, + .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, + .dgq_thread_pool_size = 1, +}; - if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { - return DISPATCH_BAD_INPUT; - } - dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority); -#if !HAVE_PTHREAD_WORKQUEUE_QOS - if (qos == QOS_CLASS_MAINTENANCE) { - qos = DISPATCH_QOS_BACKGROUND; - } else if (qos == QOS_CLASS_USER_INTERACTIVE) { - qos = DISPATCH_QOS_USER_INITIATED; - } -#endif - if (qos == DISPATCH_QOS_UNSPECIFIED) { - return DISPATCH_BAD_INPUT; - } - return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT); -} +unsigned long volatile _dispatch_queue_serial_numbers = + DISPATCH_QUEUE_SERIAL_NUMBER_INIT; dispatch_queue_t dispatch_get_current_queue(void) @@ -646,8 +699,7 @@ DISPATCH_VTABLE_INSTANCE(disk, DISPATCH_NOINLINE static void -_dispatch_queue_no_activate(dispatch_queue_class_t dqu, - DISPATCH_UNUSED bool *allow_resume) +_dispatch_queue_no_activate(dispatch_queue_class_t dqu) { DISPATCH_INTERNAL_CRASH(dx_type(dqu._dq), "dq_activate called"); } @@ -706,6 +758,17 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane, .dq_push = _dispatch_root_queue_push, ); +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_cooperative, lane, + .do_type = DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE, + .do_dispose = _dispatch_object_no_dispose, + .do_debug = _dispatch_queue_debug, + .do_invoke = _dispatch_object_no_invoke, + + .dq_activate = _dispatch_queue_no_activate, + .dq_wakeup = _dispatch_root_queue_wakeup, + .dq_push = _dispatch_root_queue_push, +); + #if DISPATCH_USE_PTHREAD_ROOT_QUEUES DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_pthread_root, lane, .do_type = DISPATCH_QUEUE_PTHREAD_ROOT_TYPE, @@ -769,6 +832,17 @@ DISPATCH_VTABLE_INSTANCE(source, .dq_push = _dispatch_lane_push, ); +DISPATCH_VTABLE_INSTANCE(channel, + .do_type = DISPATCH_CHANNEL_TYPE, + .do_dispose = _dispatch_channel_dispose, + .do_debug = _dispatch_channel_debug, + .do_invoke = _dispatch_channel_invoke, + + .dq_activate = _dispatch_lane_activate, + .dq_wakeup = _dispatch_channel_wakeup, + .dq_push = _dispatch_lane_push, +); + #if HAVE_MACH DISPATCH_VTABLE_INSTANCE(mach, .do_type = DISPATCH_MACH_CHANNEL_TYPE, @@ -1045,7 +1119,11 @@ _dispatch_bug_kevent_vanished(dispatch_unote_t du) _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_vanished", "BUG in libdispatch client: %s, monitored resource vanished before " "the source cancel handler was invoked " +#if !defined(_WIN32) + "{ %p[%s], ident: %d / 0x%x, handler: %p }", +#else // !defined(_WIN32) "{ %p[%s], ident: %" PRIdPTR " / 0x%" PRIxPTR ", handler: %p }", +#endif // !defined(_WIN32) dux_type(du._du)->dst_kind, dou._dq, dou._dq->dq_label ? dou._dq->dq_label : "", du._du->du_ident, du._du->du_ident, func); @@ -1220,31 +1298,31 @@ _dispatch_vsyslog(const char *msg, va_list ap) static inline void _dispatch_syslog(const char *msg) { - OutputDebugStringA(msg); + OutputDebugStringA(msg); } static inline void _dispatch_vsyslog(const char *msg, va_list ap) { - va_list argp; + va_list argp; - va_copy(argp, ap); + va_copy(argp, ap); - int length = _vscprintf(msg, ap); - if (length == -1) - return; + int length = _vscprintf(msg, ap); + if (length == -1) + return; - char *buffer = malloc((size_t)length + 1); - if (buffer == NULL) - return; + char *buffer = malloc((size_t)length + 1); + if (buffer == NULL) + return; - _vsnprintf(buffer, (size_t)length + 1, msg, argp); + _vsnprintf(buffer, (size_t)length + 1, msg, argp); - va_end(argp); + va_end(argp); - _dispatch_syslog(buffer); + _dispatch_syslog(buffer); - free(buffer); + free(buffer); } #else // DISPATCH_USE_SIMPLE_ASL static inline void @@ -1388,7 +1466,7 @@ _dispatch_calloc(size_t num_items, size_t size) return buf; } -/** +/* * If the source string is mutable, allocates memory and copies the contents. * Otherwise returns the source string. */ @@ -1514,6 +1592,20 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, } #endif // HAVE_MACH +#undef _dispatch_client_callout3_a +DISPATCH_NOINLINE +void +_dispatch_client_callout3_a(void *ctxt, size_t i, size_t w, dispatch_apply_attr_function_t f) +{ + _dispatch_get_tsd_base(); + void *u = _dispatch_get_unwind_tsd(); + if (likely(!u)) return f(ctxt, i, w); + _dispatch_set_unwind_tsd(NULL); + f(ctxt, i, w); + _dispatch_free_unwind_tsd(); + _dispatch_set_unwind_tsd(u); +} + #endif // DISPATCH_USE_CLIENT_CALLOUT #pragma mark - @@ -1545,7 +1637,7 @@ _os_object_t _os_object_alloc(const void *cls, size_t size) { if (!cls) cls = &_os_object_vtable; - return _os_object_alloc_realized(cls, size); + return _os_object_alloc_realized((const void * _Nonnull) cls, size); } void diff --git a/src/inline_internal.h b/src/inline_internal.h index 67ecfc922..a78e50277 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -48,6 +48,11 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, dispatch_mach_handler_function_t f); #endif // HAVE_MACH +typedef void (*dispatch_apply_attr_function_t)(void *, size_t, size_t); + +DISPATCH_NOTHROW void +_dispatch_client_callout3_a(void *ctxt, size_t i, size_t w, dispatch_apply_attr_function_t f); + #else // !DISPATCH_USE_CLIENT_CALLOUT DISPATCH_ALWAYS_INLINE @@ -83,6 +88,13 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, } #endif // HAVE_MACH +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout3_a(void *ctxt, size_t i, size_t w, void (*f)(void *, size_t)) +{ + return f(ctxt, i, w); +} + #endif // !DISPATCH_USE_CLIENT_CALLOUT #pragma mark - @@ -191,6 +203,16 @@ _dispatch_object_is_sync_waiter(dispatch_object_t dou) return (dou._dc->dc_flags & DC_FLAG_SYNC_WAITER); } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_channel_item(dispatch_object_t dou) +{ + if (_dispatch_object_has_vtable(dou)) { + return false; + } + return (dou._dc->dc_flags & DC_FLAG_CHANNEL_ITEM); +} + DISPATCH_ALWAYS_INLINE static inline bool _dispatch_object_is_sync_waiter_non_barrier(dispatch_object_t dou) @@ -749,7 +771,7 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_workloop_t _dispatch_wlh_to_workloop(dispatch_wlh_t wlh) { - if (wlh == DISPATCH_WLH_ANON) { + if (wlh == NULL || wlh == DISPATCH_WLH_ANON) { return NULL; } if (dx_metatype((dispatch_workloop_t)wlh) == _DISPATCH_WORKLOOP_TYPE) { @@ -901,7 +923,7 @@ DISPATCH_ALWAYS_INLINE static inline bool _dq_state_is_suspended(uint64_t dq_state) { - return dq_state >= DISPATCH_QUEUE_NEEDS_ACTIVATION; + return dq_state & DISPATCH_QUEUE_SUSPEND_BITS_MASK; } #define DISPATCH_QUEUE_IS_SUSPENDED(x) \ _dq_state_is_suspended(os_atomic_load2o(x, dq_state, relaxed)) @@ -910,14 +932,24 @@ DISPATCH_ALWAYS_INLINE static inline bool _dq_state_is_inactive(uint64_t dq_state) { - return dq_state & DISPATCH_QUEUE_INACTIVE; + return (dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK) == + DISPATCH_QUEUE_INACTIVE; } DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_needs_activation(uint64_t dq_state) +_dq_state_is_activated(uint64_t dq_state) { - return dq_state & DISPATCH_QUEUE_NEEDS_ACTIVATION; + return (dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK) == + DISPATCH_QUEUE_ACTIVATED; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_activating(uint64_t dq_state) +{ + return (dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK) == + DISPATCH_QUEUE_ACTIVATING; } DISPATCH_ALWAYS_INLINE @@ -992,9 +1024,9 @@ _dq_state_is_enqueued_on_manager(uint64_t dq_state) DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_in_sync_transfer(uint64_t dq_state) +_dq_state_in_uncontended_sync(uint64_t dq_state) { - return dq_state & DISPATCH_QUEUE_SYNC_TRANSFER; + return dq_state & DISPATCH_QUEUE_UNCONTENDED_SYNC; } DISPATCH_ALWAYS_INLINE @@ -1013,6 +1045,18 @@ _dq_state_received_sync_wait(uint64_t dq_state) (dq_state & DISPATCH_QUEUE_RECEIVED_SYNC_WAIT); } +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_needs_ensure_ownership(uint64_t dq_state) +{ + if (_dq_state_is_base_wlh(dq_state) && + _dq_state_in_uncontended_sync(dq_state)) { + return dq_state & (DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | + DISPATCH_QUEUE_ENQUEUED); + } + return false; +} + DISPATCH_ALWAYS_INLINE static inline dispatch_qos_t _dq_state_max_qos(uint64_t dq_state) @@ -1090,6 +1134,20 @@ _dq_state_is_runnable(uint64_t dq_state) DISPATCH_ALWAYS_INLINE static inline bool _dq_state_should_override(uint64_t dq_state) +{ + if (_dq_state_is_suspended(dq_state) || + _dq_state_is_enqueued_on_manager(dq_state)) { + return false; + } + if (_dq_state_is_enqueued_on_target(dq_state)) { + return true; + } + return _dq_state_drain_locked(dq_state); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_should_override_for_waiter(uint64_t dq_state) { if (_dq_state_is_suspended(dq_state) || _dq_state_is_enqueued_on_manager(dq_state)) { @@ -1099,6 +1157,11 @@ _dq_state_should_override(uint64_t dq_state) return true; } if (_dq_state_is_base_wlh(dq_state)) { + // _dq_state_should_override is called only when the enqueued bit + // hasn't changed. For kqworkloop based code, if there's no thread + // request, then we should not try to assign a QoS/kevent override + // at all, because turnstiles are the only thing needed to resolve + // priority inversions. return false; } return _dq_state_drain_locked(dq_state); @@ -1118,6 +1181,19 @@ static inline dispatch_priority_t _dispatch_set_basepri(dispatch_priority_t dbp) #if DISPATCH_PURE_C +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_setter_assert_inactive(dispatch_queue_class_t dq) +{ + uint64_t dq_state = os_atomic_load2o(dq._dq, dq_state, relaxed); + if (likely(_dq_state_is_inactive(dq_state))) return; +#ifndef __LP64__ + dq_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "dispatch queue/source property setter called after activation"); +} + // Note to later developers: ensure that any initialization changes are // made for statically allocated queues (i.e. _dispatch_main_q). static inline dispatch_queue_class_t @@ -1131,14 +1207,13 @@ _dispatch_queue_init(dispatch_queue_class_t dqu, dispatch_queue_flags_t dqf, DISPATCH_QUEUE_INACTIVE)) == 0); if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) { - dq_state |= DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION; dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_lane_resume if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) { dq->do_ref_cnt++; // released when DSF_DELETED is set } } - dq_state |= (initial_state_bits & DISPATCH_QUEUE_ROLE_MASK); + dq_state |= initial_state_bits; dq->do_next = DISPATCH_OBJECT_LISTLESS; dqf |= DQF_WIDTH(width); os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed); @@ -1285,7 +1360,11 @@ _dispatch_queue_drain_try_lock_wlh(dispatch_queue_t dq, uint64_t *dq_state) if (unlikely(_dq_state_is_suspended(old_state))) { new_state &= ~DISPATCH_QUEUE_ENQUEUED; } else if (unlikely(_dq_state_drain_locked(old_state))) { - os_atomic_rmw_loop_give_up(break); + if (_dq_state_in_uncontended_sync(old_state)) { + new_state |= DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; + } else { + os_atomic_rmw_loop_give_up(break); + } } else { new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; new_state |= lock_bits; @@ -1324,6 +1403,7 @@ _dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_lane_t dq, uint64_t init = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | _dispatch_lock_value_from_tid(tid) | + DISPATCH_QUEUE_UNCONTENDED_SYNC | (suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL); uint64_t old_state, new_state; @@ -1506,6 +1586,27 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) return true; } +DISPATCH_ALWAYS_INLINE +static inline +dispatch_swift_job_invoke_flags_t +_dispatch_invoke_flags_to_swift_invoke_flags(dispatch_invoke_flags_t invoke_flags) +{ + return (invoke_flags & DISPATCH_INVOKE_COOPERATIVE_DRAIN) ? + DISPATCH_SWIFT_JOB_INVOKE_COOPERATIVE : DISPATCH_SWIFT_JOB_INVOKE_NONE; +} + +/* + * Clears UNCONTENDED_SYNC and RECEIVED_SYNC_WAIT + */ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) +{ + uint64_t clearbits = DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | + DISPATCH_QUEUE_UNCONTENDED_SYNC; + os_atomic_and2o(dq, dq_state, ~clearbits, relaxed); +} + #pragma mark - #pragma mark os_mpsc_queue @@ -1524,6 +1625,7 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) #define os_mpsc_push_update_tail(Q, tail, _o_next) ({ \ os_mpsc_node_type(Q) _tl = (tail); \ os_atomic_store2o(_tl, _o_next, NULL, relaxed); \ + _dispatch_set_enqueuer_for(_os_mpsc_tail Q); \ os_atomic_xchg(_os_mpsc_tail Q, _tl, release); \ }) @@ -1536,6 +1638,7 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) } else { \ (void)os_atomic_store(_os_mpsc_head Q, (head), relaxed); \ } \ + _dispatch_clear_enqueuer(); \ }) #define os_mpsc_push_list(Q, head, tail, _o_next) ({ \ @@ -1563,17 +1666,19 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) os_mpsc_node_type(Q) _node; \ _node = os_atomic_load(__n, dependency); \ if (unlikely(_node == NULL)) { \ - _node = _dispatch_wait_for_enqueuer((void **)__n); \ + _node = _dispatch_wait_for_enqueuer((void **)__n, \ + (void **) _os_mpsc_tail Q); \ } \ _node; \ }) -#define os_mpsc_get_next(_n, _o_next) ({ \ +#define os_mpsc_get_next(_n, _o_next, tailp) ({ \ __typeof__(_n) __n = (_n); \ _os_atomic_basetypeof(&__n->_o_next) _node; \ _node = os_atomic_load(&__n->_o_next, dependency); \ if (unlikely(_node == NULL)) { \ - _node = _dispatch_wait_for_enqueuer((void **)&__n->_o_next); \ + _node = _dispatch_wait_for_enqueuer((void **)&__n->_o_next, \ + (void **) tailp); \ } \ _node; \ }) @@ -1586,7 +1691,7 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) /* to head above doesn't clobber head from concurrent enqueuer */ \ if (unlikely(!_n && \ !os_atomic_cmpxchg(_os_mpsc_tail Q, _head, NULL, release))) { \ - _n = os_mpsc_get_next(_head, _o_next); \ + _n = os_mpsc_get_next(_head, _o_next, _os_mpsc_tail Q); \ os_atomic_store(_os_mpsc_head Q, _n, relaxed); \ } \ _n; \ @@ -1619,7 +1724,7 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \ __typeof__(head) _head = (head), _tail = (tail), _n = NULL; \ - if (_head != _tail) _n = os_mpsc_get_next(_head, _o_next); \ + if (_head != _tail) _n = os_mpsc_get_next(_head, _o_next, NULL); \ _n; \ }) @@ -1877,6 +1982,15 @@ _dispatch_queue_class_probe(dispatch_lane_class_t dqu) return unlikely(tail != NULL); } +extern const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue; + +DISPATCH_ALWAYS_INLINE DISPATCH_CONST +inline bool +_dispatch_is_custom_pri_workloop(dispatch_queue_t dq) +{ + return (dq->do_targetq) == (dispatch_queue_t) _dispatch_custom_workloop_root_queue._as_dq; +} + DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline bool _dispatch_is_in_root_queues_array(dispatch_queue_class_t dqu) @@ -1885,14 +1999,28 @@ _dispatch_is_in_root_queues_array(dispatch_queue_class_t dqu) (dqu._dgq < _dispatch_root_queues + _DISPATCH_ROOT_QUEUE_IDX_COUNT); } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_is_cooperative(dispatch_queue_class_t dqu) +{ + return (dqu._dgq)->dq_priority & DISPATCH_PRIORITY_FLAG_COOPERATIVE; +} + DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline dispatch_queue_global_t -_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit) +_dispatch_get_root_queue(dispatch_qos_t qos, uintptr_t flags) { if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) { DISPATCH_CLIENT_CRASH(qos, "Corrupted priority"); } - return &_dispatch_root_queues[2 * (qos - 1) + overcommit]; + unsigned int add_on = 0; + if (flags & DISPATCH_QUEUE_OVERCOMMIT) { + add_on = DISPATCH_ROOT_QUEUE_IDX_OFFSET_OVERCOMMIT; + } else if (flags & DISPATCH_QUEUE_COOPERATIVE) { + add_on = DISPATCH_ROOT_QUEUE_IDX_OFFSET_COOPERATIVE; + } + + return &_dispatch_root_queues[3 * (qos - 1) + add_on]; } #define _dispatch_get_default_queue(overcommit) \ @@ -2023,7 +2151,7 @@ _dispatch_set_basepri(dispatch_priority_t dq_dbp) dbp = dq_dbp & ~DISPATCH_PRIORITY_OVERRIDE_MASK; } else if (dq_dbp & DISPATCH_PRIORITY_REQUESTED_MASK) { dbp &= (DISPATCH_PRIORITY_OVERRIDE_MASK | - DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + DISPATCH_PRIORITY_THREAD_TYPE_MASK); dbp |= MAX(old_dbp & DISPATCH_PRIORITY_REQUESTED_MASK, dq_dbp & DISPATCH_PRIORITY_REQUESTED_MASK); if (_dispatch_priority_fallback_qos(dq_dbp) > @@ -2138,24 +2266,29 @@ _dispatch_priority_compute_update(pthread_priority_t pp) { dispatch_assert(pp != DISPATCH_NO_PRIORITY); if (!_dispatch_set_qos_class_enabled) return 0; - // the priority in _dispatch_get_priority() only tracks manager-ness - // and overcommit, which is inherited from the current value for each update - // however if the priority had the NEEDS_UNBIND flag set we need to clear it - // the first chance we get + // the priority in _dispatch_get_priority() only tracks manager-ness and + // thread request type, which is inherited from the current value for each + // update however if the priority had the NEEDS_UNBIND flag set we need to + // clear it the first chance we get // // the manager bit is invalid input, but we keep it to get meaningful // assertions in _dispatch_set_priority_and_voucher_slow() pp &= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; pthread_priority_t cur_priority = _dispatch_get_priority(); pthread_priority_t unbind = _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; - pthread_priority_t overcommit = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + pthread_priority_t thread_type = _PTHREAD_PRIORITY_THREAD_TYPE_MASK; + + // The thread request type only matters if we have NEEDS_UNBIND. For the + // rest, we don't consider the thread request type when deciding if we need + // to consider changing current thread's priority. + if (unlikely(cur_priority & unbind)) { - // else we always need an update if the NEEDS_UNBIND flag is set - // the slow path in _dispatch_set_priority_and_voucher_slow() will + // if the NEEDS_UNBIND flag is set, we always need to update and take + // the slow path in _dispatch_set_priority_and_voucher_slow() which will // adjust the priority further with the proper overcommitness return pp ? pp : (cur_priority & ~unbind); } else { - cur_priority &= ~overcommit; + cur_priority &= ~thread_type; } if (unlikely(pp != cur_priority)) return pp; return 0; @@ -2263,6 +2396,19 @@ _dispatch_queue_need_override(dispatch_queue_class_t dq, dispatch_qos_t qos) #define DISPATCH_PRIORITY_PROPAGATE_CURRENT 0x1 #define DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC 0x2 +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_qos_propagate(dispatch_qos_t qos) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + // Cap QOS for propagation at user-initiated + return MIN(qos, DISPATCH_QOS_USER_INITIATED); +#else + (void)qos; + return 0; +#endif +} + DISPATCH_ALWAYS_INLINE static inline pthread_priority_t _dispatch_priority_compute_propagated(pthread_priority_t pp, @@ -2493,13 +2639,37 @@ _dispatch_continuation_pop_inline(dispatch_object_t dou, if (observer_hooks) observer_hooks->queue_will_execute(dqu._dq); flags &= _DISPATCH_INVOKE_PROPAGATE_MASK; if (_dispatch_object_has_vtable(dou)) { - dx_invoke(dou._dq, dic, flags); + if (dx_type(dou._do) == DISPATCH_SWIFT_JOB_TYPE) { + dx_invoke(dou._dsjc, NULL, + _dispatch_invoke_flags_to_swift_invoke_flags(flags)); + } else { + dx_invoke(dou._dq, dic, flags); + } } else { _dispatch_continuation_invoke_inline(dou, flags, dqu); } if (observer_hooks) observer_hooks->queue_did_execute(dqu._dq); } +// used to forward the do_invoke of a continuation with a vtable to its real +// implementation. +// +// Unlike _dispatch_continuation_pop_forwarded, +// this doesn't free the continuation +#define _dispatch_continuation_pop_forwarded_no_free(dc, dc_flags, dq, ...) \ + ({ \ + dispatch_continuation_t _dc = (dc); \ + uintptr_t _dc_flags = (dc_flags); \ + _dispatch_continuation_voucher_adopt(_dc, _dc_flags); \ + if (!(_dc_flags & DC_FLAG_NO_INTROSPECTION)) { \ + _dispatch_trace_item_pop(dq, dc); \ + } \ + __VA_ARGS__; \ + if (!(_dc_flags & DC_FLAG_NO_INTROSPECTION)) { \ + _dispatch_trace_item_complete(_dc); \ + } \ + }) + // used to forward the do_invoke of a continuation with a vtable to its real // implementation. #define _dispatch_continuation_pop_forwarded(dc, dc_flags, dq, ...) \ diff --git a/src/internal.h b/src/internal.h index cf4ccfe84..d22a3ac09 100644 --- a/src/internal.h +++ b/src/internal.h @@ -35,6 +35,8 @@ #define __DISPATCH_BUILDING_DISPATCH__ #define __DISPATCH_INDIRECT__ +#define __OS_WORKGROUP_INDIRECT__ +#define __OS_WORKGROUP_PRIVATE_INDIRECT__ #ifdef __APPLE__ #include @@ -61,6 +63,9 @@ #if !defined(DISPATCH_LAYOUT_SPI) && TARGET_OS_MAC #define DISPATCH_LAYOUT_SPI 1 #endif +#if !defined(DISPATCH_CHANNEL_SPI) +#define DISPATCH_CHANNEL_SPI 1 +#endif #if __has_include() #include @@ -94,6 +99,19 @@ #include #include +#if __has_feature(ptrauth_calls) +#include +#define DISPATCH_VTABLE_ENTRY(op) \ + (* __ptrauth(ptrauth_key_process_independent_code, true, \ + ptrauth_string_discriminator("dispatch." #op)) const op) +#define DISPATCH_FUNCTION_POINTER \ + __ptrauth(ptrauth_key_process_dependent_code, true, \ + ptrauth_string_discriminator("dispatch.handler")) +#else +#define DISPATCH_VTABLE_ENTRY(op) (* const op) +#define DISPATCH_FUNCTION_POINTER +#endif + #define __DISPATCH_HIDE_SYMBOL(sym, version) \ __asm__(".section __TEXT,__const\n\t" \ ".globl $ld$hide$os" #version "$_" #sym "\n\t" \ @@ -138,6 +156,7 @@ typedef union { struct dispatch_queue_global_s *_dgq; struct dispatch_queue_pthread_root_s *_dpq; struct dispatch_source_s *_ds; + struct dispatch_channel_s *_dch; struct dispatch_mach_s *_dm; #ifdef __OBJC__ id _objc_dq; // unsafe cast for the sake of object.m @@ -153,6 +172,7 @@ typedef union { struct dispatch_queue_global_s *_dgq; struct dispatch_queue_pthread_root_s *_dpq; struct dispatch_source_s *_ds; + struct dispatch_channel_s *_dch; struct dispatch_mach_s *_dm; dispatch_lane_class_t _dlu; #ifdef __OBJC__ @@ -168,6 +188,7 @@ typedef union { struct dispatch_queue_attr_s *_dqa; struct dispatch_group_s *_dg; struct dispatch_source_s *_ds; + struct dispatch_channel_s *_dch; struct dispatch_mach_s *_dm; struct dispatch_mach_msg_s *_dmsg; struct dispatch_semaphore_s *_dsema; @@ -175,6 +196,7 @@ typedef union { struct dispatch_io_s *_dchannel; struct dispatch_continuation_s *_dc; + struct dispatch_swift_continuation_s *_dsjc; struct dispatch_sync_context_s *_dsc; struct dispatch_operation_s *_doperation; struct dispatch_disk_s *_ddisk; @@ -197,6 +219,10 @@ upcast(dispatch_object_t dou) #endif // __OBJC__ #include +#include +#include +#include +#include #include #include #include @@ -207,17 +233,24 @@ upcast(dispatch_object_t dou) #include #include #include +#include /* private.h must be included last to avoid picking up installed headers. */ #if !defined(_WIN32) #include #endif #include "os/object_private.h" +#include "os/eventlink_private.h" +#include "os/workgroup_object_private.h" +#include "os/workgroup_interval_private.h" +#include "apply_private.h" #include "queue_private.h" +#include "channel_private.h" #include "workloop_private.h" #include "source_private.h" #include "mach_private.h" #include "data_private.h" +#include "time_private.h" #include "os/voucher_private.h" #include "os/voucher_activity_private.h" #include "io_private.h" @@ -253,7 +286,7 @@ upcast(dispatch_object_t dou) #include #endif #endif /* HAVE_MACH */ -#if __has_include() +#if __has_include() && __has_include() #define HAVE_OS_FAULT_WITH_PAYLOAD 1 #include #include @@ -318,6 +351,10 @@ upcast(dispatch_object_t dou) #endif #include +#if __has_include() +#include +#endif + /* More #includes at EOF (dependent on the contents of internal.h) ... */ __BEGIN_DECLS @@ -463,6 +500,7 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define DISPATCH_MODE_STRICT (1U << 0) #define DISPATCH_MODE_NO_FAULTS (1U << 1) +#define DISPATCH_COOPERATIVE_POOL_STRICT (1U << 2) extern uint8_t _dispatch_mode; DISPATCH_EXPORT DISPATCH_NOINLINE DISPATCH_COLD @@ -492,7 +530,7 @@ DISPATCH_NOINLINE DISPATCH_NORETURN DISPATCH_COLD void _dispatch_abort(size_t line, long val); #if !defined(DISPATCH_USE_OS_DEBUG_LOG) && DISPATCH_DEBUG -#if __has_include() +#if __has_include() && !TARGET_OS_DRIVERKIT #define DISPATCH_USE_OS_DEBUG_LOG 1 #include #endif @@ -688,8 +726,7 @@ _dispatch_fork_becomes_unsafe(void) #ifndef HAVE_PTHREAD_WORKQUEUE_WORKLOOP #if HAVE_PTHREAD_WORKQUEUE_KEVENT && defined(WORKQ_FEATURE_WORKLOOP) && \ - defined(KEVENT_FLAG_WORKLOOP) && \ - DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) + defined(KEVENT_FLAG_WORKLOOP) #define HAVE_PTHREAD_WORKQUEUE_WORKLOOP 1 #else #define HAVE_PTHREAD_WORKQUEUE_WORKLOOP 0 @@ -697,13 +734,21 @@ _dispatch_fork_becomes_unsafe(void) #endif // !defined(HAVE_PTHREAD_WORKQUEUE_WORKLOOP) #ifndef DISPATCH_USE_WORKQUEUE_NARROWING -#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) +#if HAVE_PTHREAD_WORKQUEUES #define DISPATCH_USE_WORKQUEUE_NARROWING 1 #else #define DISPATCH_USE_WORKQUEUE_NARROWING 0 #endif #endif // !defined(DISPATCH_USE_WORKQUEUE_NARROWING) +#ifndef DISPATCH_USE_COOPERATIVE_WORKQUEUE +#if defined(WORKQ_FEATURE_COOPERATIVE_WORKQ) && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) +#define DISPATCH_USE_COOPERATIVE_WORKQUEUE 1 +#else +#define DISPATCH_USE_COOPERATIVE_WORKQUEUE 0 +#endif +#endif + #ifndef DISPATCH_USE_PTHREAD_ROOT_QUEUES #if defined(__BLOCKS__) && defined(__APPLE__) #define DISPATCH_USE_PTHREAD_ROOT_QUEUES 1 // @@ -750,6 +795,22 @@ _dispatch_fork_becomes_unsafe(void) #endif #endif // !defined(DISPATCH_USE_KEVENT_WORKLOOP) +#ifndef DISPATCH_USE_WL_SYNC_IPC_HANDOFF +#if DISPATCH_USE_KEVENT_WORKLOOP && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) +#define DISPATCH_USE_WL_SYNC_IPC_HANDOFF 1 +#else +#define DISPATCH_USE_WL_SYNC_IPC_HANDOFF 0 +#endif +#endif // !defined DISPATCH_USE_WL_SYNC_IPC_HANDOFF + +#ifndef DISPATCH_USE_KEVENT_SETUP +#if DISPATCH_USE_KEVENT_WORKLOOP && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) +#define DISPATCH_USE_KEVENT_SETUP 1 +#else +#define DISPATCH_USE_KEVENT_SETUP 0 +#endif +#endif // !defined(DISPATCH_USE_KEVENT_SETUP) + #ifdef EVFILT_MEMORYSTATUS #ifndef DISPATCH_USE_MEMORYSTATUS #define DISPATCH_USE_MEMORYSTATUS 1 @@ -774,10 +835,10 @@ extern int malloc_engaged_nano(void); extern bool _dispatch_memory_warn; #endif -#if defined(MACH_SEND_SYNC_OVERRIDE) && defined(MACH_RCV_SYNC_WAIT) && \ - DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) && \ - !defined(DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE) -#define DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE 1 +#if defined(MACH_MSG_QOS_LAST) && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101600) +#define DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED 1 +#else +#define DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED 0 #endif #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE) @@ -795,7 +856,6 @@ extern bool _dispatch_memory_warn; #endif #endif // MACH_SEND_NOIMPORTANCE - #if HAVE_LIBPROC_INTERNAL_H #include #include @@ -966,7 +1026,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #ifndef VOUCHER_USE_PERSONA #if VOUCHER_USE_MACH_VOUCHER && defined(BANK_PERSONA_TOKEN) && \ - !TARGET_OS_SIMULATOR + !TARGET_OS_SIMULATOR && !TARGET_CPU_ARM #define VOUCHER_USE_PERSONA 1 #else #define VOUCHER_USE_PERSONA 0 @@ -982,6 +1042,23 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define VOUCHER_USE_PERSONA 0 #endif // VOUCHER_USE_MACH_VOUCHER +#ifndef VOUCHER_USE_PERSONA_ADOPT_ANY +#if VOUCHER_USE_PERSONA && defined(BANK_PERSONA_ADOPT_ANY) && \ + DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) +#define VOUCHER_USE_PERSONA_ADOPT_ANY 1 +#else +#define VOUCHER_USE_PERSONA_ADOPT_ANY 0 +#endif +#endif + +#ifndef OS_EVENTLINK_USE_MACH_EVENTLINK +#if DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101600) && __has_include() +#define OS_EVENTLINK_USE_MACH_EVENTLINK 1 +#else +#define OS_EVENTLINK_USE_MACH_EVENTLINK 0 +#endif +#endif // OS_EVENTLINK_USE_MACH_EVENTLINK + #define _dispatch_hardware_crash() \ __asm__(""); __builtin_trap() // @@ -1063,7 +1140,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define DISPATCH_NO_VOUCHER ((voucher_t)(void*)~0ul) #define DISPATCH_NO_PRIORITY ((pthread_priority_t)~0ul) -DISPATCH_ENUM(dispatch_thread_set_self, unsigned long, +DISPATCH_OPTIONS(dispatch_thread_set_self, unsigned long, DISPATCH_PRIORITY_ENFORCE = 0x1, DISPATCH_VOUCHER_REPLACE = 0x2, DISPATCH_VOUCHER_CONSUME = 0x4, @@ -1075,6 +1152,7 @@ static inline voucher_t _dispatch_adopt_priority_and_set_voucher( dispatch_thread_set_self_t flags); #if HAVE_MACH mach_port_t _dispatch_get_mach_host_port(void); +bool _dispatch_mach_msg_sender_is_kernel(mach_msg_header_t *hdr); #endif #if HAVE_PTHREAD_WORKQUEUE_QOS @@ -1102,6 +1180,8 @@ extern bool _dispatch_kevent_workqueue_enabled; /* #includes dependent on internal.h */ #include "object_internal.h" +#include "workgroup_internal.h" +#include "eventlink_internal.h" #include "semaphore_internal.h" #include "introspection_internal.h" #include "queue_internal.h" diff --git a/src/introspection.c b/src/introspection.c index f38f9e372..bee263917 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -249,12 +249,16 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, waiter = pthread_from_mach_thread_np(dsc->dsc_waiter); ctxt = dsc->dsc_ctxt; func = dsc->dsc_func; + } else if (_dispatch_object_is_channel_item(dc)) { + dispatch_channel_callbacks_t callbacks = upcast(dq)._dch->dch_callbacks; + ctxt = dc->dc_ctxt; + func = (dispatch_function_t)callbacks->dcc_invoke; } else if (func == _dispatch_apply_invoke || func == _dispatch_apply_redirect_invoke) { dispatch_apply_t da = ctxt; - if (da->da_todo) { + if (os_atomic_load2o(da, da_todo, relaxed)) { dc = da->da_dc; - dq = dc->dc_data; + dq = dc->dc_other; ctxt = dc->dc_ctxt; func = dc->dc_func; apply = true; @@ -389,7 +393,8 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t dq, } if (metatype == _DISPATCH_CONTINUATION_TYPE) { _dispatch_introspection_continuation_get_info(dq, dc, &diqi); - } else if (metatype == _DISPATCH_LANE_TYPE) { + } else if (metatype == _DISPATCH_LANE_TYPE || + type == DISPATCH_CHANNEL_TYPE) { diqi.type = dispatch_introspection_queue_item_type_queue; diqi.queue = _dispatch_introspection_lane_get_info(dou._dl); } else if (metatype == _DISPATCH_WORKLOOP_TYPE) { @@ -422,7 +427,7 @@ dispatch_introspection_get_queues(dispatch_queue_t start, size_t count, dispatch_queue_introspection_context_t next; if (start) { - next = start->do_finalizer; + next = start->do_introspection_ctxt; } else { next = LIST_FIRST(&_dispatch_introspection.queues); } @@ -611,7 +616,7 @@ _dispatch_object_finalizer(dispatch_object_t dou) switch (dx_metatype(dou._do)) { case _DISPATCH_LANE_TYPE: case _DISPATCH_WORKLOOP_TYPE: - dqic = dou._dq->do_finalizer; + dqic = dou._dq->do_introspection_ctxt; return dqic->dqic_finalizer; default: return dou._do->do_finalizer; @@ -626,7 +631,7 @@ _dispatch_object_set_finalizer(dispatch_object_t dou, switch (dx_metatype(dou._do)) { case _DISPATCH_LANE_TYPE: case _DISPATCH_WORKLOOP_TYPE: - dqic = dou._dq->do_finalizer; + dqic = dou._dq->do_introspection_ctxt; dqic->dqic_finalizer = finalizer; break; default: @@ -651,7 +656,7 @@ _dispatch_introspection_queue_create(dispatch_queue_t dq) LIST_INIT(&dqic->dqic_order_top_head); LIST_INIT(&dqic->dqic_order_bottom_head); } - dq->do_finalizer = dqic; + dq->do_introspection_ctxt = dqic; _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); LIST_INSERT_HEAD(&_dispatch_introspection.queues, dqic, dqic_list); @@ -684,7 +689,7 @@ _dispatch_introspection_queue_dispose_hook(dispatch_queue_t dq) void _dispatch_introspection_queue_dispose(dispatch_queue_t dq) { - dispatch_queue_introspection_context_t dqic = dq->do_finalizer; + dispatch_queue_introspection_context_t dqic = dq->do_introspection_ctxt; DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_destroy, dq); if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_dispose)) { @@ -978,7 +983,7 @@ _dispatch_introspection_queue_order_dispose( LIST_FOREACH_SAFE(e, &head, dqoe_order_top_list, te) { otherq = e->dqoe_bottom_tq; - o_dqic = otherq->do_finalizer; + o_dqic = otherq->do_introspection_ctxt; _dispatch_unfair_lock_lock(&o_dqic->dqic_order_bottom_head_lock); LIST_REMOVE(e, dqoe_order_bottom_list); _dispatch_unfair_lock_unlock(&o_dqic->dqic_order_bottom_head_lock); @@ -993,7 +998,7 @@ _dispatch_introspection_queue_order_dispose( LIST_FOREACH_SAFE(e, &head, dqoe_order_bottom_list, te) { otherq = e->dqoe_top_tq; - o_dqic = otherq->do_finalizer; + o_dqic = otherq->do_introspection_ctxt; _dispatch_unfair_lock_lock(&o_dqic->dqic_order_top_head_lock); LIST_REMOVE(e, dqoe_order_top_list); _dispatch_unfair_lock_unlock(&o_dqic->dqic_order_top_head_lock); @@ -1065,7 +1070,8 @@ _dispatch_introspection_order_check(dispatch_order_frame_t dof_prev, dispatch_queue_t bottom_q, dispatch_queue_t bottom_tq) { struct dispatch_order_frame_s dof = { .dof_prev = dof_prev }; - dispatch_queue_introspection_context_t btqic = bottom_tq->do_finalizer; + dispatch_queue_introspection_context_t btqic = + bottom_tq->do_introspection_ctxt; // has anyone above bottom_tq ever sync()ed onto top_tq ? _dispatch_unfair_lock_lock(&btqic->dqic_order_top_head_lock); @@ -1094,8 +1100,9 @@ _dispatch_introspection_order_record(dispatch_queue_t top_q) dispatch_queue_t top_tq = _dispatch_queue_bottom_target_queue(top_q); dispatch_queue_t bottom_tq = _dispatch_queue_bottom_target_queue(bottom_q); - dispatch_queue_introspection_context_t ttqic = top_tq->do_finalizer; - dispatch_queue_introspection_context_t btqic = bottom_tq->do_finalizer; + dispatch_queue_introspection_context_t ttqic, btqic; + ttqic = top_tq->do_introspection_ctxt; + btqic = bottom_tq->do_introspection_ctxt; _dispatch_unfair_lock_lock(&ttqic->dqic_order_top_head_lock); LIST_FOREACH(it, &ttqic->dqic_order_top_head, dqoe_order_top_list) { @@ -1182,7 +1189,7 @@ _dispatch_introspection_target_queue_changed(dispatch_queue_t dq) [2] = "a recipient", [3] = "both an initiator and a recipient" }; - dispatch_queue_introspection_context_t dqic = dq->do_finalizer; + dispatch_queue_introspection_context_t dqic = dq-> do_introspection_ctxt; bool as_top = !LIST_EMPTY(&dqic->dqic_order_top_head); bool as_bottom = !LIST_EMPTY(&dqic->dqic_order_top_head); @@ -1195,7 +1202,7 @@ _dispatch_introspection_target_queue_changed(dispatch_queue_t dq) "a dispatch_sync", dq, dq->dq_label ?: "", reasons[(int)as_top + 2 * (int)as_bottom]); _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); - _dispatch_introspection_queue_order_dispose(dq->do_finalizer); + _dispatch_introspection_queue_order_dispose(dq->do_introspection_ctxt); _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock); } } diff --git a/src/io.c b/src/io.c index cbea654cb..fa721bdd0 100644 --- a/src/io.c +++ b/src/io.c @@ -151,11 +151,16 @@ enum { #define _dispatch_io_log(x, ...) #endif // DISPATCH_IO_DEBUG +#if !defined(_WIN32) +#define _dispatch_fd_debug(msg, fd, ...) \ + _dispatch_io_log("fd[0x%x]: " msg, fd, ##__VA_ARGS__) +#else // !defined(_WIN32) #define _dispatch_fd_debug(msg, fd, ...) \ _dispatch_io_log("fd[0x%" PRIx64 "]: " msg, fd, ##__VA_ARGS__) +#endif // !defined(_WIN32) #define _dispatch_op_debug(msg, op, ...) \ _dispatch_io_log("op[%p]: " msg, op, ##__VA_ARGS__) -#define _dispatch_channel_debug(msg, channel, ...) \ +#define _dispatch_io_channel_debug(msg, channel, ...) \ _dispatch_io_log("channel[%p]: " msg, channel, ##__VA_ARGS__) #define _dispatch_fd_entry_debug(msg, fd_entry, ...) \ _dispatch_io_log("fd_entry[%p]: " msg, fd_entry, ##__VA_ARGS__) @@ -261,7 +266,7 @@ _dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry, _dispatch_retain(queue); dispatch_async(!err ? fd_entry->close_queue : channel->queue, ^{ dispatch_async(queue, ^{ - _dispatch_channel_debug("cleanup handler invoke: err %d", + _dispatch_io_channel_debug("cleanup handler invoke: err %d", channel, err); cleanup_handler(err); }); @@ -355,7 +360,7 @@ dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, } dispatch_io_t channel = _dispatch_io_create(type); channel->fd = fd; - _dispatch_channel_debug("create", channel); + _dispatch_io_channel_debug("create", channel); channel->fd_actual = fd; dispatch_suspend(channel->queue); _dispatch_retain(queue); @@ -436,7 +441,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, } dispatch_io_t channel = _dispatch_io_create(type); channel->fd = -1; - _dispatch_channel_debug("create with path %s", channel, path); + _dispatch_io_channel_debug("create with path %s", channel, path); channel->fd_actual = -1; path_data->channel = channel; path_data->oflag = oflag; @@ -536,7 +541,7 @@ dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, return DISPATCH_BAD_INPUT; } dispatch_io_t channel = _dispatch_io_create(type); - _dispatch_channel_debug("create with channel %p", channel, in_channel); + _dispatch_io_channel_debug("create with channel %p", channel, in_channel); dispatch_suspend(channel->queue); _dispatch_retain(queue); _dispatch_retain(channel); @@ -654,7 +659,7 @@ dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_channel_debug("set high water: %zu", channel, high_water); + _dispatch_io_channel_debug("set high water: %zu", channel, high_water); if (channel->params.low > high_water) { channel->params.low = high_water; } @@ -668,7 +673,7 @@ dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_channel_debug("set low water: %zu", channel, low_water); + _dispatch_io_channel_debug("set low water: %zu", channel, low_water); if (channel->params.high < low_water) { channel->params.high = low_water ? low_water : 1; } @@ -683,7 +688,7 @@ dispatch_io_set_interval(dispatch_io_t channel, uint64_t interval, { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_channel_debug("set interval: %llu", channel, + _dispatch_io_channel_debug("set interval: %llu", channel, (unsigned long long)interval); channel->params.interval = interval < INT64_MAX ? interval : INT64_MAX; channel->params.interval_flags = flags; @@ -728,7 +733,7 @@ dispatch_io_get_descriptor(dispatch_io_t channel) static void _dispatch_io_stop(dispatch_io_t channel) { - _dispatch_channel_debug("stop", channel); + _dispatch_io_channel_debug("stop", channel); (void)os_atomic_or2o(channel, atomic_flags, DIO_STOPPED, relaxed); _dispatch_retain(channel); dispatch_async(channel->queue, ^{ @@ -736,7 +741,7 @@ _dispatch_io_stop(dispatch_io_t channel) _dispatch_object_debug(channel, "%s", __func__); dispatch_fd_entry_t fd_entry = channel->fd_entry; if (fd_entry) { - _dispatch_channel_debug("stop cleanup", channel); + _dispatch_io_channel_debug("stop cleanup", channel); _dispatch_fd_entry_cleanup_operations(fd_entry, channel); if (!(channel->atomic_flags & DIO_CLOSED)) { if (fd_entry->path_data) { @@ -750,7 +755,7 @@ _dispatch_io_stop(dispatch_io_t channel) _dispatch_retain(channel); dispatch_async(_dispatch_io_fds_lockq, ^{ _dispatch_object_debug(channel, "%s", __func__); - _dispatch_channel_debug("stop cleanup after close", + _dispatch_io_channel_debug("stop cleanup after close", channel); dispatch_fd_entry_t fdi; uintptr_t hash = DIO_HASH(channel->fd); @@ -786,7 +791,7 @@ dispatch_io_close(dispatch_io_t channel, unsigned long flags) dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ _dispatch_object_debug(channel, "%s", __func__); - _dispatch_channel_debug("close", channel); + _dispatch_io_channel_debug("close", channel); if (!(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { (void)os_atomic_or2o(channel, atomic_flags, DIO_CLOSED, relaxed); @@ -1072,7 +1077,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } else if (direction == DOP_DIR_WRITE && !err) { d = NULL; } - _dispatch_channel_debug("IO handler invoke: err %d", channel, + _dispatch_io_channel_debug("IO handler invoke: err %d", channel, err); handler(true, d, err); _dispatch_release(channel); @@ -1084,7 +1089,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } dispatch_operation_t op = _dispatch_object_alloc(DISPATCH_VTABLE(operation), sizeof(struct dispatch_operation_s)); - _dispatch_channel_debug("operation create: %p", channel, op); + _dispatch_io_channel_debug("operation create: %p", channel, op); op->do_next = DISPATCH_OBJECT_LISTLESS; op->do_xref_cnt = -1; // operation object is not exposed externally op->op_q = dispatch_queue_create_with_target("com.apple.libdispatch-io.opq", @@ -1312,15 +1317,15 @@ _dispatch_fd_entry_guarded_open(dispatch_fd_entry_t fd_entry, const char *path, (void)mode; DWORD dwDesiredAccess = 0; switch (oflag & (_O_RDONLY | _O_WRONLY | _O_RDWR)) { - case _O_RDONLY: - dwDesiredAccess = GENERIC_READ; - break; - case _O_WRONLY: - dwDesiredAccess = GENERIC_WRITE; - break; - case _O_RDWR: - dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; - break; + case _O_RDONLY: + dwDesiredAccess = GENERIC_READ; + break; + case _O_WRONLY: + dwDesiredAccess = GENERIC_WRITE; + break; + case _O_RDWR: + dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; + break; } DWORD dwCreationDisposition = OPEN_EXISTING; if (oflag & _O_CREAT) { @@ -1422,7 +1427,11 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // On fds lock queue dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( _dispatch_io_fds_lockq); - _dispatch_fd_entry_debug("create: fd %" PRId64, fd_entry, fd); +#if !defined(_WIN32) + _dispatch_fd_entry_debug("create: fd %d", fd_entry, fd); +#else // !defined(_WIN32) + _dispatch_fd_entry_debug("create: fd %"PRId64, fd_entry, fd); +#endif // !defined(_WIN32) fd_entry->fd = fd; LIST_INSERT_HEAD(&_dispatch_io_fds[hash], fd_entry, fd_list); fd_entry->barrier_queue = dispatch_queue_create( diff --git a/src/mach.c b/src/mach.c index 726368b01..3a39d8d9c 100644 --- a/src/mach.c +++ b/src/mach.c @@ -31,7 +31,7 @@ #define DM_CHECKIN_CANCELED ((dispatch_mach_msg_t)~0ul) -DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t, +DISPATCH_OPTIONS(dispatch_mach_send_invoke_flags, uint32_t, DM_SEND_INVOKE_NONE = 0x0, DM_SEND_INVOKE_MAKE_DIRTY = 0x1, DM_SEND_INVOKE_NEEDS_BARRIER = 0x2, @@ -64,12 +64,16 @@ static void _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm, static void _dispatch_mach_push_async_reply_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, dispatch_queue_t drq); static dispatch_queue_t _dispatch_mach_msg_context_async_reply_queue( + dispatch_mach_t dm, void *ctxt); static dispatch_continuation_t _dispatch_mach_msg_async_reply_wrap( dispatch_mach_msg_t dmsg, dispatch_mach_t dm); static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm); static void _dispatch_mach_notification_kevent_register(dispatch_mach_t dm, mach_port_t send); +static inline mach_msg_option_t +_dispatch_mach_send_msg_prepare(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options); // For tests only. DISPATCH_EXPORT void _dispatch_mach_hooks_install_default(void); @@ -101,10 +105,17 @@ _dispatch_mach_hooks_install_default(void) #pragma mark - #pragma mark dispatch_mach_t +DISPATCH_OPTIONS(dispatch_mach_create_flags, unsigned, + DMCF_NONE = 0x00000000, + DMCF_HANDLER_IS_BLOCK = 0x00000001, + DMCF_IS_XPC = 0x00000002, + DMCF_USE_STRICT_REPLY = 0x00000004, +); + static dispatch_mach_t _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, - dispatch_mach_handler_function_t handler, bool handler_is_block, - bool is_xpc) + dispatch_mach_handler_function_t handler, + dispatch_mach_create_flags_t dmcf) { dispatch_mach_recv_refs_t dmrr; dispatch_mach_send_refs_t dmsr; @@ -113,14 +124,18 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, dm = _dispatch_queue_alloc(mach, DQF_MUTABLE, 1, DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER)._dm; dm->dq_label = label; - dm->dm_is_xpc = is_xpc; + dm->dm_is_xpc = (bool)(dmcf & DMCF_IS_XPC); + dm->dm_strict_reply = (bool)(dmcf & DMCF_USE_STRICT_REPLY); dmrr = dux_create(&_dispatch_mach_type_recv, 0, 0)._dmrr; dispatch_assert(dmrr->du_is_direct); dmrr->du_owner_wref = _dispatch_ptr2wref(dm); dmrr->dmrr_handler_func = handler; dmrr->dmrr_handler_ctxt = context; - dmrr->dmrr_handler_is_block = handler_is_block; + dmrr->dmrr_handler_is_block = (bool)(dmcf & DMCF_HANDLER_IS_BLOCK); + if (dm->dm_strict_reply) { + dmrr->du_fflags |= MACH_MSG_STRICT_REPLY; + } dm->dm_recv_refs = dmrr; dmsr = dux_create(&_dispatch_mach_type_send, 0, @@ -131,6 +146,9 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, if (unlikely(!q)) { q = _dispatch_get_default_queue(true); } else { + if (_dispatch_queue_is_cooperative(q)) { + DISPATCH_CLIENT_CRASH(q, "Cannot target object to cooperative root queue - not implemented"); + } _dispatch_retain(q); } dm->do_targetq = q; @@ -144,22 +162,22 @@ dispatch_mach_create(const char *label, dispatch_queue_t q, { dispatch_block_t bb = _dispatch_Block_copy((void*)handler); return _dispatch_mach_create(label, q, bb, - (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true, - false); + (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), + DMCF_HANDLER_IS_BLOCK); } dispatch_mach_t dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context, dispatch_mach_handler_function_t handler) { - return _dispatch_mach_create(label, q, context, handler, false, false); + return _dispatch_mach_create(label, q, context, handler, DMCF_NONE); } dispatch_mach_t dispatch_mach_create_4libxpc(const char *label, dispatch_queue_t q, void *context, dispatch_mach_handler_function_t handler) { - return _dispatch_mach_create(label, q, context, handler, false, true); + return _dispatch_mach_create(label, q, context, handler, DMCF_IS_XPC | DMCF_USE_STRICT_REPLY); } void @@ -177,6 +195,72 @@ _dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free) _dispatch_lane_class_dispose(dm, allow_free); } +void +dispatch_mach_request_no_senders(dispatch_mach_t dm) +{ + dm->dm_arm_no_senders = true; + _dispatch_queue_setter_assert_inactive(dm); +} + +void +dispatch_mach_notify_no_senders(dispatch_mach_t dm, bool made_sendrights) +{ + dm->dm_arm_no_senders = true; + dm->dm_made_sendrights = made_sendrights; + _dispatch_queue_setter_assert_inactive(dm); +} + +void +dispatch_mach_set_flags(dispatch_mach_t dm, dispatch_mach_flags_t flags) +{ + dm->dm_strict_reply = !!(flags & DMF_USE_STRICT_REPLY); + _dispatch_queue_setter_assert_inactive(dm); +} + +static void +_dispatch_mach_arm_no_senders(dispatch_mach_t dm, bool allow_previous) +{ + mach_port_t recvp = (mach_port_t)dm->dm_recv_refs->du_ident; + mach_port_t previous = MACH_PORT_NULL; + kern_return_t kr; + + if (MACH_PORT_VALID(recvp)) { + // + // + // Establishing a peer-connection can be done in two ways: + // 1) the client makes a receive right with an inserted send right, + // and ships the receive right across in a checkin message, + // + // 2) the server makes a receive right and "make-send" a send right + // in the checkin reply. + // + // While for the case (1) which is the typical XPC case, at the time + // dispatch_mach_connect() is called the send right for the peer + // connection is made, for case (2) it will only be made later. + // + // We use dm->dm_made_sendrights to determine which case we're in. If + // (1), sync = 0 since the send right could have gone away and we want + // no-senders to fire immediately. If (2), sync = 1, we want to fire + // no-senders only after creating at least one send right. + + mach_port_mscount_t sync = dm->dm_made_sendrights ? 0 : 1; + + kr = mach_port_request_notification(mach_task_self(), recvp, + MACH_NOTIFY_NO_SENDERS, sync, recvp, + MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + if (unlikely(previous)) { + if (!allow_previous) { + DISPATCH_CLIENT_CRASH(previous, "Mach port notification collision"); + } + kr = mach_port_deallocate(mach_task_self(), previous); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } +} + void dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, mach_port_t send, dispatch_mach_msg_t checkin) @@ -197,6 +281,10 @@ dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, dmsr->dmsr_checkin = checkin; } + if (dm->dm_arm_no_senders && !dmsr->dmsr_checkin) { + _dispatch_mach_arm_no_senders(dm, false); + } + uint32_t disconnect_cnt = os_atomic_and_orig2o(dmsr, dmsr_disconnect_cnt, ~DISPATCH_MACH_NEVER_CONNECTED, relaxed); if (unlikely(!(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED))) { @@ -290,7 +378,7 @@ _dispatch_mach_reply_unregister(dispatch_mach_t dm, dispatch_queue_t drq = NULL; if (disconnected) { if (dm->dm_is_xpc && dmr->dmr_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmr->dmr_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmr->dmr_ctxt); } dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr, drq ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED @@ -367,9 +455,13 @@ _dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, dispatch_queue_t drq = NULL; if (dm->dm_is_xpc && dmsg->do_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt); + } + if (dm->dm_strict_reply) { + dmr->du_fflags |= MACH_MSG_STRICT_REPLY; } - if (unlikely(!drq && _dispatch_unote_wlh(dm->dm_recv_refs))) { + if (unlikely((!drq || drq == dm->_as_dq) && + _dispatch_unote_wlh(dm->dm_recv_refs))) { wlh = _dispatch_unote_wlh(dm->dm_recv_refs); pri = dm->dq_priority; } else if (dx_hastypeflag(drq, QUEUE_ROOT)) { @@ -412,30 +504,13 @@ _dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, #pragma mark - #pragma mark dispatch_mach_msg -DISPATCH_ALWAYS_INLINE DISPATCH_CONST -static inline bool -_dispatch_use_mach_special_reply_port(void) -{ -#if DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE - return true; -#else -#define thread_get_special_reply_port() ({__builtin_trap(); MACH_PORT_NULL;}) - return false; -#endif -} - static void _dispatch_destruct_reply_port(mach_port_t reply_port, enum thread_destruct_special_reply_port_rights rights) { kern_return_t kr = KERN_SUCCESS; - if (_dispatch_use_mach_special_reply_port()) { - kr = thread_destruct_special_reply_port(reply_port, rights); - } else if (rights == THREAD_SPECIAL_REPLY_PORT_ALL || - rights == THREAD_SPECIAL_REPLY_PORT_RECEIVE_ONLY) { - kr = mach_port_destruct(mach_task_self(), reply_port, 0, 0); - } + kr = thread_destruct_special_reply_port(reply_port, rights); DISPATCH_VERIFY_MIG(kr); dispatch_assume_zero(kr); } @@ -444,25 +519,16 @@ static mach_port_t _dispatch_get_thread_reply_port(void) { mach_port_t reply_port, mrp; - if (_dispatch_use_mach_special_reply_port()) { - mrp = _dispatch_get_thread_special_reply_port(); - } else { - mrp = _dispatch_get_thread_mig_reply_port(); - } + mrp = _dispatch_get_thread_special_reply_port(); if (mrp) { reply_port = mrp; _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port", reply_port); } else { - if (_dispatch_use_mach_special_reply_port()) { - reply_port = thread_get_special_reply_port(); - _dispatch_set_thread_special_reply_port(reply_port); - } else { - reply_port = mach_reply_port(); - _dispatch_set_thread_mig_reply_port(reply_port); - } + reply_port = thread_get_special_reply_port(); + _dispatch_set_thread_special_reply_port(reply_port); if (unlikely(!MACH_PORT_VALID(reply_port))) { - DISPATCH_CLIENT_CRASH(_dispatch_use_mach_special_reply_port(), + DISPATCH_CLIENT_CRASH(0, "Unable to allocate reply port, possible port leak"); } _dispatch_debug("machport[0x%08x]: allocated thread sync reply port", @@ -475,12 +541,7 @@ _dispatch_get_thread_reply_port(void) static void _dispatch_clear_thread_reply_port(mach_port_t reply_port) { - mach_port_t mrp; - if (_dispatch_use_mach_special_reply_port()) { - mrp = _dispatch_get_thread_special_reply_port(); - } else { - mrp = _dispatch_get_thread_mig_reply_port(); - } + mach_port_t mrp = _dispatch_get_thread_special_reply_port(); if (reply_port != mrp) { if (mrp) { _dispatch_debug("machport[0x%08x]: did not clear thread sync reply " @@ -488,11 +549,7 @@ _dispatch_clear_thread_reply_port(mach_port_t reply_port) } return; } - if (_dispatch_use_mach_special_reply_port()) { - _dispatch_set_thread_special_reply_port(MACH_PORT_NULL); - } else { - _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL); - } + _dispatch_set_thread_special_reply_port(MACH_PORT_NULL); _dispatch_debug_machport(reply_port); _dispatch_debug("machport[0x%08x]: cleared thread sync reply port", reply_port); @@ -502,23 +559,14 @@ static void _dispatch_set_thread_reply_port(mach_port_t reply_port) { _dispatch_debug_machport(reply_port); - mach_port_t mrp; - if (_dispatch_use_mach_special_reply_port()) { - mrp = _dispatch_get_thread_special_reply_port(); - } else { - mrp = _dispatch_get_thread_mig_reply_port(); - } + mach_port_t mrp = _dispatch_get_thread_special_reply_port(); if (mrp) { _dispatch_destruct_reply_port(reply_port, THREAD_SPECIAL_REPLY_PORT_ALL); _dispatch_debug("machport[0x%08x]: deallocated sync reply port " "(found 0x%08x)", reply_port, mrp); } else { - if (_dispatch_use_mach_special_reply_port()) { - _dispatch_set_thread_special_reply_port(reply_port); - } else { - _dispatch_set_thread_mig_reply_port(reply_port); - } + _dispatch_set_thread_special_reply_port(reply_port); _dispatch_debug("machport[0x%08x]: restored thread sync reply port", reply_port); } @@ -599,6 +647,18 @@ _dispatch_mach_msg_create_recv(mach_msg_header_t *hdr, mach_msg_size_t siz, return dmsg; } +DISPATCH_NOINLINE +static void +_dispatch_mach_no_senders_invoke(dispatch_mach_t dm) +{ + if (!(_dispatch_queue_atomic_flags(dm) & DSF_CANCELED)) { + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, + DISPATCH_MACH_NO_SENDERS, NULL, 0, dmrr->dmrr_handler_func); + } + _dispatch_perfmon_workitem_inc(); +} + void _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, mach_msg_header_t *hdr, mach_msg_size_t siz, @@ -622,6 +682,19 @@ _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { free(hdr); } + } else if (hdr->msgh_id == MACH_NOTIFY_NO_SENDERS && dm->dm_arm_no_senders){ + if (dispatch_assume(_dispatch_mach_msg_sender_is_kernel(hdr))) { + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + (void)_dispatch_continuation_init_f(dc, dm, dm, + (dispatch_function_t)_dispatch_mach_no_senders_invoke, + DISPATCH_BLOCK_HAS_PRIORITY | DISPATCH_BLOCK_NO_VOUCHER, + DC_FLAG_CONSUME); + _dispatch_continuation_async(dm, dc, 0, dc->dc_flags); + } + mach_msg_destroy(hdr); + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(hdr); + } } else { // Once the mach channel disarming is visible, cancellation will switch // to immediately destroy messages. If we're preempted here, then the @@ -636,8 +709,13 @@ _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, _dispatch_mach_handle_or_push_received_msg(dm, dmsg, ovr_pp); } - if (unlikely(_dispatch_unote_needs_delete(du))) { + // Note: it is ok to do a relaxed load of the dq_state_bits as we only care + // about bits that are in the top bits of the 64bit dq_state. + // This avoids expensive CAS on 32bit acrhictures. + if (unlikely(_dispatch_unote_needs_delete(du) || + _dq_state_is_activating((uint64_t)dm->dq_state_bits << 32))) { return dx_wakeup(dm, 0, DISPATCH_WAKEUP_EVENT | + DISPATCH_WAKEUP_CLEAR_ACTIVATING | DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY); } return _dispatch_release_2_tailcall(dm); @@ -664,7 +742,7 @@ _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, if (dmsg) { dispatch_queue_t drq = NULL; if (dm->dm_is_xpc && dmsg->do_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt); } if (drq) { _dispatch_mach_push_async_reply_msg(dm, dmsg, drq); @@ -691,11 +769,7 @@ DISPATCH_ALWAYS_INLINE static void _dispatch_mach_stack_probe(void *addr, size_t size) { -#if TARGET_OS_MAC && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) && \ - (defined(__x86_64__) || defined(__arm64__)) - // there should be a __has_feature() macro test - // for this, for now we approximate it, for when the compiler - // is generating calls to ____chkstk_darwin on our behalf +#if __has_feature(stack_check) (void)addr; (void)size; #else for (mach_vm_address_t p = mach_vm_trunc_page(addr + vm_page_size); @@ -730,6 +804,9 @@ _dispatch_mach_msg_reply_recv(dispatch_mach_t dm, notify = send; options |= MACH_RCV_SYNC_WAIT; } + if (dm->dm_strict_reply) { + options |= MACH_MSG_STRICT_REPLY; + } retry: _dispatch_debug_machport(reply_port); @@ -944,7 +1021,7 @@ _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou, unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ? 0 : DISPATCH_MACH_MESSAGE_NOT_SENT; if (dm->dm_is_xpc && dmsg->do_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt); } dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, dwr ? &dwr->dwr_refs : NULL, @@ -961,6 +1038,39 @@ _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou, } } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_mach_send_priority_in_voucher(void) +{ + return DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED; +} + +DISPATCH_ALWAYS_INLINE +static inline mach_msg_priority_t +_dispatch_mach_send_priority(dispatch_mach_msg_t dmsg, + dispatch_qos_t qos_ovr, mach_msg_option_t *opts) +{ + qos_ovr = _dispatch_qos_propagate(qos_ovr); + if (qos_ovr) { +#if DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED + if (!_dispatch_mach_send_priority_in_voucher()) { + mach_msg_qos_t qos; + int relpri; + + qos = (mach_msg_qos_t)_dispatch_qos_from_pp(dmsg->dmsg_priority); + relpri = _pthread_priority_relpri(dmsg->dmsg_priority); + *opts |= MACH_SEND_OVERRIDE; + return mach_msg_priority_encode((mach_msg_qos_t)qos_ovr, qos, relpri); + } +#else + (void)dmsg; +#endif + *opts |= MACH_SEND_OVERRIDE; + return (mach_msg_priority_t)_dispatch_qos_to_pp(qos_ovr); + } + return MACH_MSG_PRIORITY_UNSPECIFIED; +} + DISPATCH_NOINLINE static uint32_t _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, @@ -988,10 +1098,24 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, dm->dm_needs_mgr = true; goto out; } + // Tag the checkin message with a voucher and priority and necessary + // options + (void) _dispatch_mach_send_msg_prepare(dm, dsrr->dmsr_checkin, 0); if (unlikely(!_dispatch_mach_msg_send(dm, dsrr->dmsr_checkin, NULL, qos, DM_SEND_INVOKE_NONE))) { + + // We failed to send the checkin message, clear the voucher on + // it and let the retry tag it with the voucher later. + voucher_t v = dsrr->dmsr_checkin->dmsg_voucher; + if (v) { + _voucher_release(v); + dsrr->dmsr_checkin->dmsg_voucher = NULL; + } goto out; } + if (dm->dm_arm_no_senders) { + _dispatch_mach_arm_no_senders(dm, true); + } dsrr->dmsr_checkin = NULL; } } @@ -1010,38 +1134,35 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, msg->msgh_remote_port); dispatch_assert(_dispatch_unote_registered(dsrr)); } - if (dsrr->dmsr_notification_armed) { + if (os_atomic_load(&dsrr->dmsr_notification_armed, relaxed)) { goto out; } opts |= MACH_SEND_NOTIFY; } opts |= MACH_SEND_TIMEOUT; - if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) { + if (_dispatch_mach_send_priority_in_voucher() && + dmsg->dmsg_priority != _voucher_get_priority(voucher)) { ipc_kvoucher = _voucher_create_mach_voucher_with_priority( voucher, dmsg->dmsg_priority); } _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg); - if (ipc_kvoucher) { + if (_dispatch_mach_send_priority_in_voucher() && ipc_kvoucher) { kvoucher_move_send = true; clear_voucher = _voucher_mach_msg_set_mach_voucher(msg, ipc_kvoucher, kvoucher_move_send); } else { clear_voucher = _voucher_mach_msg_set(msg, voucher); } - if (qos) { - opts |= MACH_SEND_OVERRIDE; - msg_priority = (mach_msg_priority_t) - _dispatch_priority_compute_propagated( - _dispatch_qos_to_pp(qos), 0); + msg_priority = _dispatch_mach_send_priority(dmsg, qos, &opts); + if (reply_port && dm->dm_strict_reply) { + opts |= MACH_MSG_STRICT_REPLY; } } _dispatch_debug_machport(msg->msgh_remote_port); if (reply_port) _dispatch_debug_machport(reply_port); if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) { if (dwr->dwr_refs.dmr_reply_port_owned) { - if (_dispatch_use_mach_special_reply_port()) { - opts |= MACH_SEND_SYNC_OVERRIDE; - } + opts |= MACH_SEND_SYNC_OVERRIDE; _dispatch_clear_thread_reply_port(reply_port); } _dispatch_mach_reply_waiter_register(dm, dwr, reply_port, dmsg); @@ -1068,13 +1189,31 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, } } if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) { + if (msg->msgh_remote_port == MACH_PORT_DEAD) { + // It's possible that the remote port may have died after the + // attempt to enqueue the message timed out. In this case, the + // pseudo-receive will copy-out MOVE_SEND over the disposition and + // MACH_PORT_DEAD for the remote port name, without giving us a + // deadname ref for the send right name. + // + // When we next attempt to resend this message, we'll overwrite the + // remote port back to the channel send right. It is therefore + // crucial that we reset the disposition to COPY_SEND, since the ref + // the MOVE_SEND was referring to never actually arrived. + // + // rdar://77994175 + + msg->msgh_bits &= ~((mach_msg_bits_t)MACH_MSGH_BITS_REMOTE_MASK); + msg->msgh_bits |= MACH_MSG_TYPE_COPY_SEND; + } + if (opts & MACH_SEND_NOTIFY) { _dispatch_mach_notification_set_armed(dsrr); } else { // send kevent must be installed on the manager queue dm->dm_needs_mgr = true; } - if (ipc_kvoucher) { + if (_dispatch_mach_send_priority_in_voucher() && ipc_kvoucher) { _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher); voucher_t ipc_voucher; ipc_voucher = _voucher_create_with_priority_and_mach_voucher( @@ -1101,7 +1240,7 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, if (unlikely(kr)) { // Send failed, so reply was never registered if (dm->dm_is_xpc && dmsg->do_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt); } dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, dwr ? &dwr->dwr_refs : NULL, @@ -1296,7 +1435,7 @@ _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, qos = _dmsr_state_max_qos(new_state); if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) { os_atomic_thread_fence(dependency); - dmsr = os_atomic_force_dependency_on(dmsr, new_state); + dmsr = os_atomic_inject_dependency(dmsr, new_state); goto again; } @@ -1421,9 +1560,13 @@ _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, uint64_t old_state, new_state, state_flags = 0; struct dispatch_object_s *prev; dispatch_wakeup_flags_t wflags = 0; - bool is_send_barrier = (dou._dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)); + bool is_send_barrier = false; dispatch_tid owner; + if (_dispatch_object_has_vtable(dou._dc)) { + is_send_barrier = (dou._dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)); + } + // the send queue needs to retain // the mach channel if not empty, for the whole duration of this call // @@ -1664,13 +1807,11 @@ _dispatch_mach_checkin_options(void) return options; } - - static inline mach_msg_option_t _dispatch_mach_send_options(void) { - mach_msg_option_t options = 0; - return options; + //rdar://problem/13740985&47300191&47605096 + return (_dispatch_is_background_thread() ? MACH_SEND_NOIMPORTANCE : 0); } DISPATCH_ALWAYS_INLINE @@ -1682,7 +1823,7 @@ _dispatch_mach_send_msg_prepare(dispatch_mach_t dm, if (dm->dm_is_xpc && (options & DISPATCH_MACH_WAIT_FOR_REPLY) == 0 && _dispatch_mach_msg_get_reply_port(dmsg)) { dispatch_assert( - _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt)); + _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt)); } #else (void)dm; @@ -1691,8 +1832,7 @@ _dispatch_mach_send_msg_prepare(dispatch_mach_t dm, dmsg->dmsg_priority = 0; } else { unsigned int flags = DISPATCH_PRIORITY_PROPAGATE_CURRENT; - if ((options & DISPATCH_MACH_WAIT_FOR_REPLY) && - _dispatch_use_mach_special_reply_port()) { + if (options & DISPATCH_MACH_WAIT_FOR_REPLY) { // TODO: remove QoS contribution of sync IPC messages to send queue // rdar://31848737 flags |= DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC; @@ -1829,11 +1969,9 @@ _dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options); if (dwr->dwr_refs.dmr_reply_port_owned) { _dispatch_clear_thread_reply_port(reply_port); - if (_dispatch_use_mach_special_reply_port()) { - // link special reply port to send right for remote receive right - // TODO: extend to pre-connect phase - send = dm->dm_send_refs->dmsr_send; - } + // link special reply port to send right for remote receive right + // TODO: extend to pre-connect phase + send = dm->dm_send_refs->dmsr_send; } dmsg = _dispatch_mach_msg_reply_recv(dm, dwr, reply_port, send); #if DISPATCH_DEBUG @@ -2136,6 +2274,15 @@ _dispatch_mach_handoff_context(mach_port_t port) return dihc; } +bool +dispatch_mach_can_handoff_4libxpc(void) +{ + dispatch_thread_context_t dtc; + + dtc = _dispatch_thread_context_find(_dispatch_mach_msg_context_key); + return dtc && dtc->dtc_dmsg && dtc->dtc_dih->dih_dc.dc_other == NULL; +} + static void _dispatch_ipc_handoff_release(dispatch_ipc_handoff_t dih) { @@ -2149,13 +2296,15 @@ _dispatch_mach_handoff_set_wlh(dispatch_ipc_handoff_t dih, dispatch_queue_t dq) { while (likely(dq->do_targetq)) { if (unlikely(_dispatch_queue_is_mutable(dq))) { - DISPATCH_CLIENT_CRASH(0, - "Trying to handoff IPC onto mutable hierarchy"); + _dispatch_queue_sidelock_lock(upcast(dq)._dl); + _dispatch_queue_atomic_flags_clear(dq, DQF_MUTABLE); + _dispatch_queue_sidelock_unlock(upcast(dq)._dl); } if (_dq_state_is_base_wlh(dq->dq_state)) { os_atomic_store(&dih->dih_wlh, (uint64_t)dq, relaxed); return; } + dq = dq->do_targetq; } /* unsupported hierarchy */ @@ -2169,12 +2318,13 @@ dispatch_mach_handoff_reply_f(dispatch_queue_t dq, _dispatch_ipc_handoff_ctxt_t dihc = _dispatch_mach_handoff_context(port); dispatch_ipc_handoff_t dih = dihc->dihc_dtc.dtc_dih; dispatch_continuation_t dc = &dih->dih_dc; + uintptr_t dc_flags = DC_FLAG_CONSUME; _dispatch_mach_handoff_set_wlh(dih, dq); _dispatch_retain(dq); dihc->dihc_dq = dq; - dihc->dihc_qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, 0, 0); - dc->dc_data = (void *)dc->dc_flags; + dihc->dihc_qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, + 0, dc_flags); dc->do_vtable = DC_VTABLE(MACH_IPC_HANDOFF); } @@ -2185,10 +2335,12 @@ dispatch_mach_handoff_reply(dispatch_queue_t dq, _dispatch_ipc_handoff_ctxt_t dihc = _dispatch_mach_handoff_context(port); dispatch_ipc_handoff_t dih = dihc->dihc_dtc.dtc_dih; dispatch_continuation_t dc = &dih->dih_dc; + uintptr_t dc_flags = DC_FLAG_CONSUME; + _dispatch_mach_handoff_set_wlh(dih, dq); _dispatch_retain(dq); dihc->dihc_dq = dq; - dihc->dihc_qos = _dispatch_continuation_init(dc, dq, block, 0, 0); + dihc->dihc_qos = _dispatch_continuation_init(dc, dq, block, 0, dc_flags); dc->dc_data = (void *)dc->dc_flags; dc->do_vtable = DC_VTABLE(MACH_IPC_HANDOFF); } @@ -2253,10 +2405,13 @@ _dispatch_mach_ipc_handoff_invoke(dispatch_continuation_t dc, _dispatch_thread_context_push(&dihc.dihc_dtc); - _dispatch_continuation_pop_forwarded(dc, dc_flags, cq, { + // DC_FLAG_CONSUME has been set, as we want the block and vouchers + // to be consumed, however the continuation is not from the continuation + // cache and its lifetime is managed explicitly by the handoff mechanism. + DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DC_FLAG_CONSUME); + _dispatch_continuation_pop_forwarded_no_free(dc, dc_flags, cq, { dispatch_invoke_with_autoreleasepool(flags, { _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - _dispatch_trace_item_complete(dc); }); }); @@ -2399,7 +2554,7 @@ _dispatch_mach_barrier_set_vtable(dispatch_continuation_t dc, { dc->dc_data = (void *)dc->dc_flags; dc->dc_other = dm; - dc->do_vtable = vtable; // Must be after dc_flags load, dc_vtable aliases + dc->do_vtable = vtable; // Must be after dc_flags load, do_vtable aliases } DISPATCH_NOINLINE @@ -2505,9 +2660,18 @@ _dispatch_mach_install(dispatch_mach_t dm, dispatch_wlh_t wlh, dispatch_assert(!dm->ds_is_installed); dm->ds_is_installed = true; - if (!cancelled && dmrr->du_ident) { - (void)_dispatch_unote_register(dmrr, wlh, pri); - dispatch_assert(dmrr->du_is_direct); + uint32_t disconnect_cnt = os_atomic_load2o(dm->dm_send_refs, + dmsr_disconnect_cnt, relaxed); + if (unlikely(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED)) { + DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel never connected"); + } + + if (!dm->dq_priority) { + // _dispatch_mach_reply_kevent_register assumes this has been done + // which is unlike regular sources or queues, the FALLBACK flag + // is used so that the priority of the channel doesn't act as + // a QoS floor for incoming messages (26761457) + dm->dq_priority = pri; } if (!cancelled && dm->dm_is_xpc && @@ -2519,32 +2683,28 @@ _dispatch_mach_install(dispatch_mach_t dm, dispatch_wlh_t wlh, dm->dm_xpc_term_refs = _dxtr; _dispatch_unote_register(dm->dm_xpc_term_refs, wlh, pri); } - if (!dm->dq_priority) { - // _dispatch_mach_reply_kevent_register assumes this has been done - // which is unlike regular sources or queues, the FALLBACK flag - // is used so that the priority of the channel doesn't act as - // a QoS floor for incoming messages (26761457) - dm->dq_priority = pri; - } - uint32_t disconnect_cnt = os_atomic_load2o(dm->dm_send_refs, - dmsr_disconnect_cnt, relaxed); - if (unlikely(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED)) { - DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel never connected"); + if (!cancelled && dmrr->du_ident) { + dispatch_assert(dmrr->du_is_direct); + // rdar://45419440 this absolutely needs to be done last + // as this can cause an event to be delivered + // and to finish the activation concurrently + (void)_dispatch_unote_register(dmrr, wlh, pri); } } void -_dispatch_mach_activate(dispatch_mach_t dm, bool *allow_resume) +_dispatch_mach_activate(dispatch_mach_t dm) { dispatch_priority_t pri; dispatch_wlh_t wlh; // call "super" - _dispatch_lane_activate(dm, allow_resume); + _dispatch_lane_activate(dm); if (!dm->ds_is_installed) { pri = _dispatch_queue_compute_priority_and_wlh(dm, &wlh); + // rdar://45419440 this needs to be last if (pri) _dispatch_mach_install(dm, wlh, pri); } } @@ -2639,7 +2799,8 @@ _dispatch_mach_invoke2(dispatch_mach_t dm, } if (dmsr->dmsr_tail) { - if (!dmsr->dmsr_notification_armed || dmsr->dmsr_disconnect_cnt) { + if (!os_atomic_load(&dmsr->dmsr_notification_armed, relaxed) || + dmsr->dmsr_disconnect_cnt) { bool requires_mgr = dmsr->dmsr_disconnect_cnt ? _dispatch_unote_registered(dmsr) : dm->dm_needs_mgr; // The channel has pending messages to send. @@ -2726,7 +2887,8 @@ _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, goto done; } - if (!dmsr->dmsr_notification_armed || dmsr->dmsr_disconnect_cnt) { + if (!os_atomic_load(&dmsr->dmsr_notification_armed, relaxed) || + dmsr->dmsr_disconnect_cnt) { bool requires_mgr = dmsr->dmsr_disconnect_cnt ? _dispatch_unote_registered(dmsr) : dm->dm_needs_mgr; if (unlikely(requires_mgr)) { @@ -2898,9 +3060,15 @@ _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) DISPATCH_ALWAYS_INLINE static dispatch_queue_t -_dispatch_mach_msg_context_async_reply_queue(void *msg_context) +_dispatch_mach_msg_context_async_reply_queue(dispatch_mach_t dm, + void *msg_context) { - return _dispatch_mach_xpc_hooks->dmxh_msg_context_reply_queue(msg_context); + dispatch_queue_t dq; + dq = _dispatch_mach_xpc_hooks->dmxh_msg_context_reply_queue(msg_context); + if (dq == DMXH_MSG_CONTEXT_REPLY_QUEUE_SELF) { + dq = dm->_as_dq; + } + return dq; } static dispatch_continuation_t @@ -2936,19 +3104,80 @@ _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, _dispatch_continuation_free(dc); } +void +dispatch_mach_msg_get_filter_policy_id(dispatch_mach_msg_t msg, mach_msg_filter_id *filter_id) +{ + mach_msg_trailer_t *tlr = NULL; + mach_msg_mac_trailer_t *mac_tlr; + + if (!filter_id) { + DISPATCH_CLIENT_CRASH((uintptr_t)filter_id, "Filter id should be non-NULL"); + } + + mach_msg_header_t *hdr = dispatch_mach_msg_get_msg(msg, NULL); + if (!hdr) { + DISPATCH_CLIENT_CRASH((uintptr_t)msg, "Messsage should be non-NULL"); + } + tlr = (mach_msg_trailer_t *)((unsigned char *)hdr + + round_msg(hdr->msgh_size)); + + // The trailer should always be of format zero. + if (tlr->msgh_trailer_type != MACH_MSG_TRAILER_FORMAT_0) { + DISPATCH_INTERNAL_CRASH(tlr->msgh_trailer_type, "Trailer format is invalid"); + } + + if (tlr->msgh_trailer_size >= sizeof(mach_msg_mac_trailer_t)) { + mac_tlr = (mach_msg_mac_trailer_t *)tlr; + *filter_id = mac_tlr->msgh_ad; + } else { + DISPATCH_INTERNAL_CRASH(tlr->msgh_trailer_size, "Trailer doesn't contain filter policy id"); + } +} + #pragma mark - #pragma mark dispatch_mig_server +static inline kern_return_t +_dispatch_mig_return_code(mig_reply_error_t *msg) +{ + if (msg->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { + return KERN_SUCCESS; + } + return msg->RetCode; +} + +static inline void +_dispatch_mig_consume_unsent_message(mach_msg_header_t *hdr) +{ + mach_port_t port = hdr->msgh_local_port; + if (MACH_PORT_VALID(port)) { + kern_return_t kr = KERN_SUCCESS; + switch (MACH_MSGH_BITS_LOCAL(hdr->msgh_bits)) { + case MACH_MSG_TYPE_MOVE_SEND: + case MACH_MSG_TYPE_MOVE_SEND_ONCE: + kr = mach_port_deallocate(mach_task_self(), port); + break; + case MACH_MSG_TYPE_MOVE_RECEIVE: + kr = mach_port_mod_refs(mach_task_self(), port, + MACH_PORT_RIGHT_RECEIVE, -1); + break; + } + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + mach_msg_destroy(hdr); +} + mach_msg_return_t dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback) { mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT - | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) + | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AV) | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER; mach_msg_options_t tmp_options; mig_reply_error_t *bufTemp, *bufRequest, *bufReply; - mach_msg_return_t kr = 0; + mach_msg_return_t kr = 0, skr; uint64_t assertion_token = 0; uint32_t cnt = 1000; // do not stall out serial queues boolean_t demux_success; @@ -2970,7 +3199,9 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, tmp_options = options; // XXX FIXME -- change this to not starve out the target queue for (;;) { - if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) { + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); + if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (dqf & DSF_CANCELED) || + (--cnt == 0)) { options &= ~MACH_RCV_MSG; tmp_options &= ~MACH_RCV_MSG; @@ -2987,15 +3218,13 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, switch (kr) { case MACH_SEND_INVALID_DEST: case MACH_SEND_TIMED_OUT: - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { - mach_msg_destroy(&bufReply->Head); - } + _dispatch_mig_consume_unsent_message(&bufReply->Head); break; case MACH_RCV_TIMED_OUT: // Don't return an error if a message was sent this time or // a message was successfully received previously // rdar://problems/7363620&7791738 - if(bufReply->Head.msgh_remote_port || received) { + if (bufReply->Head.msgh_remote_port || received) { kr = MACH_MSG_SUCCESS; } break; @@ -3005,7 +3234,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, case MACH_RCV_TOO_LARGE: // receive messages that are too large and log their id and size // rdar://problem/8422992 - tmp_options &= ~MACH_RCV_LARGE; + tmp_options &= ~(MACH_RCV_LARGE | MACH_SEND_MSG); size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE; void *large_buf = malloc(large_size); if (large_buf) { @@ -3020,9 +3249,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, "requested size %zd: id = 0x%x, size = %d", maxmsgsz, bufReply->Head.msgh_id, bufReply->Head.msgh_size); - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { - mach_msg_destroy(&bufReply->Head); - } + mach_msg_destroy(&bufReply->Head); } if (large_buf) { free(large_buf); @@ -3069,21 +3296,21 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, demux_success = callback(&bufRequest->Head, &bufReply->Head); if (!demux_success) { + skr = MIG_BAD_ID; + } else { + skr = _dispatch_mig_return_code(bufReply); + } + switch (skr) { + case KERN_SUCCESS: + break; + case MIG_NO_REPLY: + bufReply->Head.msgh_remote_port = MACH_PORT_NULL; + break; + default: // destroy the request - but not the reply port + // (MIG moved it into the bufReply). bufRequest->Head.msgh_remote_port = 0; mach_msg_destroy(&bufRequest->Head); - } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { - // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode - // is present - if (unlikely(bufReply->RetCode)) { - if (bufReply->RetCode == MIG_NO_REPLY) { - continue; - } - - // destroy the request - but not the reply port - bufRequest->Head.msgh_remote_port = 0; - mach_msg_destroy(&bufRequest->Head); - } } if (bufReply->Head.msgh_remote_port) { @@ -3160,15 +3387,18 @@ dispatch_mach_mig_demux(void *context, desc->stub_routine(hdr, &bufReply->Head); - // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode is present - if (unlikely(!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) && - bufReply->RetCode)) { + switch (_dispatch_mig_return_code(bufReply)) { + case KERN_SUCCESS: + break; + case MIG_NO_REPLY: + bufReply->Head.msgh_remote_port = MACH_PORT_NULL; + break; + default: // destroy the request - but not the reply port + // (MIG moved it into the bufReply). hdr->msgh_remote_port = 0; - if (bufReply->RetCode != MIG_NO_REPLY && - (hdr->msgh_bits & MACH_MSGH_BITS_COMPLEX)) { - mach_msg_destroy(hdr); - } + mach_msg_destroy(hdr); + break; } if (bufReply->Head.msgh_remote_port) { @@ -3184,9 +3414,7 @@ dispatch_mach_mig_demux(void *context, break; case MACH_SEND_INVALID_DEST: case MACH_SEND_TIMED_OUT: - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { - mach_msg_destroy(&bufReply->Head); - } + _dispatch_mig_consume_unsent_message(&bufReply->Head); break; default: DISPATCH_VERIFY_MIG(kr); @@ -3228,7 +3456,7 @@ _dispatch_mach_debug_attr(dispatch_mach_t dm, char *buf, size_t bufsiz) target && target->dq_label ? target->dq_label : "", target, (mach_port_t)dmrr->du_ident, dmsr->dmsr_send, (mach_port_t)dmsr->du_ident, - dmsr->dmsr_notification_armed ? " (armed)" : "", + os_atomic_load(&dmsr->dmsr_notification_armed, relaxed) ? " (armed)" : "", dmsr->dmsr_checkin_port, dmsr->dmsr_checkin ? " (pending)" : "", dmsr->dmsr_state, dmsr->dmsr_disconnect_cnt, (bool)(dm->dq_atomic_flags & DSF_CANCELED)); diff --git a/src/mach_internal.h b/src/mach_internal.h index 90a59845a..9f1840eac 100644 --- a/src/mach_internal.h +++ b/src/mach_internal.h @@ -99,7 +99,7 @@ void _dispatch_mach_ipc_handoff_invoke(dispatch_continuation_t dc, void _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free); -void _dispatch_mach_activate(dispatch_mach_t dm, bool *allow_resume); +void _dispatch_mach_activate(dispatch_mach_t dm); void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, diff --git a/src/object.c b/src/object.c index 91841c007..8fad3ebad 100644 --- a/src/object.c +++ b/src/object.c @@ -86,18 +86,37 @@ _os_object_retain_with_resurrect(_os_object_t obj) return obj; } -DISPATCH_NOINLINE -void -_os_object_release(_os_object_t obj) +DISPATCH_ALWAYS_INLINE +static inline bool +_os_object_release_inline(_os_object_t obj) { int xref_cnt = _os_object_xrefcnt_dec(obj); if (likely(xref_cnt >= 0)) { - return; + return false; } if (unlikely(xref_cnt < -1)) { _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); } - return _os_object_xref_dispose(obj); + return true; +} + + +DISPATCH_NOINLINE +void +_os_object_release(_os_object_t obj) +{ + if (_os_object_release_inline(obj)) { + return _os_object_xref_dispose(obj); + } +} + +DISPATCH_NOINLINE +void +_os_object_release_without_xref_dispose(_os_object_t obj) +{ + if (_os_object_release_inline(obj)) { + return _os_object_release_internal(obj); + } } bool @@ -183,34 +202,39 @@ void dispatch_release(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_release, dou); - _os_object_release(dou._os_obj); + if (_os_object_release_inline(dou._os_obj)) { + // bypass -_xref_dispose to avoid the dynamic dispatch + _os_object_xrefcnt_dispose_barrier(dou._os_obj); + _dispatch_xref_dispose(dou); + } } -#if !USE_OBJC void _dispatch_xref_dispose(dispatch_object_t dou) { if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { _dispatch_queue_xref_dispose(dou._dq); - } - switch (dx_type(dou._do)) { - case DISPATCH_SOURCE_KEVENT_TYPE: - _dispatch_source_xref_dispose(dou._ds); - break; + switch (dx_type(dou._do)) { + case DISPATCH_SOURCE_KEVENT_TYPE: + _dispatch_source_xref_dispose(dou._ds); + break; + case DISPATCH_CHANNEL_TYPE: + _dispatch_channel_xref_dispose(dou._dch); + break; #if HAVE_MACH - case DISPATCH_MACH_CHANNEL_TYPE: - _dispatch_mach_xref_dispose(dou._dm); - break; + case DISPATCH_MACH_CHANNEL_TYPE: + _dispatch_mach_xref_dispose(dou._dm); + break; #endif #if DISPATCH_COCOA_COMPAT - case DISPATCH_QUEUE_RUNLOOP_TYPE: - _dispatch_runloop_queue_xref_dispose(dou._dl); - break; + case DISPATCH_QUEUE_RUNLOOP_TYPE: + _dispatch_runloop_queue_xref_dispose(dou._dl); + break; #endif + } } return _dispatch_release_tailcall(dou._os_obj); } -#endif void _dispatch_dispose(dispatch_object_t dou) @@ -227,7 +251,7 @@ _dispatch_dispose(dispatch_object_t dou) if (unlikely(tq && tq->dq_serialnum == DISPATCH_QUEUE_SERIAL_NUMBER_WLF)) { // the workloop fallback global queue is never serviced, so redirect // the finalizer onto a global queue - tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)->_as_dq; + tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, 0)->_as_dq; } dx_dispose(dou._do, &allow_free); @@ -291,6 +315,10 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq) if (tq == DISPATCH_TARGET_QUEUE_DEFAULT) { tq = _dispatch_get_default_queue(false); } + + if (_dispatch_queue_is_cooperative(tq)) { + DISPATCH_CLIENT_CRASH(tq, "Cannot target object to cooperative root queue - not implemented"); + } _dispatch_object_set_target_queue_inline(dou._do, tq); } @@ -305,7 +333,7 @@ dispatch_activate(dispatch_object_t dou) return _dispatch_workloop_activate(dou._dwl); } if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { - return _dispatch_lane_resume(dou._dl, true); + return _dispatch_lane_resume(dou._dl, DISPATCH_ACTIVATE); } } @@ -331,7 +359,7 @@ dispatch_resume(dispatch_object_t dou) return; } if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { - _dispatch_lane_resume(dou._dl, false); + _dispatch_lane_resume(dou._dl, DISPATCH_RESUME); } } diff --git a/src/object.m b/src/object.m index 925fccc43..273c5fa3f 100644 --- a/src/object.m +++ b/src/object.m @@ -124,7 +124,7 @@ { struct _os_object_s *o = (struct _os_object_s *)obj; _os_object_refcnt_dispose_barrier(o); - [obj _dispose]; + _os_object_dealloc(obj); } #undef os_retain @@ -170,7 +170,7 @@ -(id)retain { } -(oneway void)release { - return _os_object_release(self); + return _os_object_release_without_xref_dispose(self); } -(NSUInteger)retainCount { @@ -194,10 +194,6 @@ - (void)_xref_dispose { return _os_object_release_internal(self); } -- (void)_dispose { - return _os_object_dealloc(self); -} - @end #pragma mark - @@ -281,16 +277,9 @@ - (void)_dispose { #pragma mark - #pragma mark _dispatch_object -// Force non-lazy class realization rdar://10640168 -#define DISPATCH_OBJC_LOAD() + (void)load {} - @implementation DISPATCH_CLASS(object) DISPATCH_UNAVAILABLE_INIT() -- (void)_dispose { - return _dispatch_dispose(self); // calls _os_object_dealloc() -} - - (NSString *)debugDescription { Class nsstring = objc_lookUpClass("NSString"); if (!nsstring) return nil; @@ -306,16 +295,20 @@ - (NSString *)debugDescription { return [nsstring stringWithFormat:format, object_getClassName(self), buf]; } -- (void)dealloc DISPATCH_NORETURN { - DISPATCH_INTERNAL_CRASH(0, "Calling dealloc on a dispatch object"); - [super dealloc]; // make clang happy +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wobjc-missing-super-calls" +- (void)dealloc { + return _dispatch_dispose(self); } +#pragma clang diagnostic pop @end +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(queue) -DISPATCH_OBJC_LOAD() +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() - (NSString *)description { Class nsstring = objc_lookUpClass("NSString"); @@ -333,9 +326,25 @@ - (void)_xref_dispose { @end +OS_OBJECT_NONLAZY_CLASS +@implementation DISPATCH_CLASS(channel) +OS_OBJECT_NONLAZY_CLASS_LOAD +DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() + +- (void)_xref_dispose { + _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); + _dispatch_channel_xref_dispose(self); + [super _xref_dispose]; +} + +@end + +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(source) -DISPATCH_OBJC_LOAD() +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose { _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); @@ -345,9 +354,11 @@ - (void)_xref_dispose { @end +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(mach) -DISPATCH_OBJC_LOAD() +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose { _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); @@ -357,9 +368,11 @@ - (void)_xref_dispose { @end +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(queue_runloop) -DISPATCH_OBJC_LOAD() +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose { _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); @@ -369,12 +382,16 @@ - (void)_xref_dispose { @end -#define DISPATCH_CLASS_IMPL(name) \ - @implementation DISPATCH_CLASS(name) \ - DISPATCH_OBJC_LOAD() \ +#define EMPTY_OS_OBJECT_CLASS_IMPL(name) \ + OS_OBJECT_NONLAZY_CLASS \ + @implementation name \ + OS_OBJECT_NONLAZY_CLASS_LOAD \ DISPATCH_UNAVAILABLE_INIT() \ @end +#define DISPATCH_CLASS_IMPL(name) \ + EMPTY_OS_OBJECT_CLASS_IMPL(DISPATCH_CLASS(name)) + #if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA DISPATCH_CLASS_IMPL(data) #endif @@ -388,6 +405,7 @@ - (void)_xref_dispose { #if DISPATCH_USE_PTHREAD_ROOT_QUEUES DISPATCH_CLASS_IMPL(queue_pthread_root) #endif +DISPATCH_CLASS_IMPL(queue_cooperative) DISPATCH_CLASS_IMPL(queue_mgr) DISPATCH_CLASS_IMPL(queue_attr) DISPATCH_CLASS_IMPL(mach_msg) @@ -395,9 +413,71 @@ - (void)_xref_dispose { DISPATCH_CLASS_IMPL(operation) DISPATCH_CLASS_IMPL(disk) +#pragma mark os_workgroups + +@implementation OS_OBJECT_CLASS(os_workgroup) +DISPATCH_UNAVAILABLE_INIT() +OS_OBJECT_USES_XREF_DISPOSE() + +- (void)_xref_dispose { + _os_workgroup_xref_dispose(self); + [super _xref_dispose]; +} + +- (void) dealloc { + _os_workgroup_dispose(self); + [super dealloc]; +} + +- (NSString *) debugDescription { + Class nsstring = objc_lookUpClass("NSString"); + if (!nsstring) return nil; + char buf[2048]; + + os_workgroup_t wg = (os_workgroup_t) self; + _os_workgroup_debug(wg, buf, sizeof(buf)); + + return [nsstring stringWithUTF8String:buf]; +} +@end + +@implementation OS_OBJECT_CLASS(os_workgroup_interval) +DISPATCH_UNAVAILABLE_INIT() + +- (void) _xref_dispose { + _os_workgroup_interval_xref_dispose(self); + [super _xref_dispose]; +} + +- (void) dealloc { + _os_workgroup_interval_dispose(self); + [super dealloc]; +} +@end + +@implementation OS_OBJECT_CLASS(os_workgroup_parallel) +DISPATCH_UNAVAILABLE_INIT() +@end + +#pragma mark eventlink + +@implementation OS_OBJECT_CLASS(os_eventlink) +DISPATCH_UNAVAILABLE_INIT() + +- (void) dealloc { + _os_eventlink_dispose(self); + [super dealloc]; +} + +@end + + +#pragma mark vouchers + +OS_OBJECT_NONLAZY_CLASS @implementation OS_OBJECT_CLASS(voucher) +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() -DISPATCH_OBJC_LOAD() -(id)retain { return (id)_voucher_retain_inline((struct voucher_s *)self); @@ -407,12 +487,9 @@ -(oneway void)release { return _voucher_release_inline((struct voucher_s *)self); } -- (void)_xref_dispose { - return _voucher_xref_dispose(self); // calls _os_object_release_internal() -} - -- (void)_dispose { - return _voucher_dispose(self); // calls _os_object_dealloc() +- (void)dealloc { + _voucher_dispose(self); + [super dealloc]; } - (NSString *)debugDescription { @@ -428,13 +505,10 @@ - (NSString *)debugDescription { @end #if VOUCHER_ENABLE_RECIPE_OBJECTS +OS_OBJECT_NONLAZY_CLASS @implementation OS_OBJECT_CLASS(voucher_recipe) +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() -DISPATCH_OBJC_LOAD() - -- (void)_dispose { - -} - (NSString *)debugDescription { return nil; // TODO: voucher_recipe debugDescription @@ -443,7 +517,6 @@ - (NSString *)debugDescription { @end #endif - #pragma mark - #pragma mark dispatch_last_resort_autorelease_pool @@ -532,6 +605,18 @@ - (NSString *)debugDescription { } #endif // HAVE_MACH +#undef _dispatch_client_callout3_a +void +_dispatch_client_callout3_a(void *ctxt, size_t i, size_t w, dispatch_apply_attr_function_t f) +{ + @try { + return f(ctxt, i, w); + } + @catch (...) { + objc_terminate(); + } +} + #endif // DISPATCH_USE_CLIENT_CALLOUT #endif // USE_OBJC diff --git a/src/object_internal.h b/src/object_internal.h index 6985decc7..f11b9c66c 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -178,10 +178,12 @@ #define DISPATCH_OBJECT_VTABLE_HEADER(x) \ unsigned long const do_type; \ - void (*const do_dispose)(struct x##_s *, bool *allow_free); \ - size_t (*const do_debug)(struct x##_s *, char *, size_t); \ - void (*const do_invoke)(struct x##_s *, dispatch_invoke_context_t, \ - dispatch_invoke_flags_t) + void DISPATCH_VTABLE_ENTRY(do_dispose)(struct x##_s *, \ + bool *allow_free); \ + size_t DISPATCH_VTABLE_ENTRY(do_debug)(struct x##_s *, \ + char *, size_t); \ + void DISPATCH_VTABLE_ENTRY(do_invoke)(struct x##_s *, \ + dispatch_invoke_context_t, dispatch_invoke_flags_t) #else #define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, ctype, ...) \ OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(dispatch_##name, dispatch_##ctype, \ @@ -191,19 +193,21 @@ #define DISPATCH_OBJECT_VTABLE_HEADER(x) \ unsigned long const do_type; \ const char *const do_kind; \ - void (*const do_dispose)(struct x##_s *, bool *allow_free); \ - size_t (*const do_debug)(struct x##_s *, char *, size_t); \ - void (*const do_invoke)(struct x##_s *, dispatch_invoke_context_t, \ - dispatch_invoke_flags_t) + void DISPATCH_VTABLE_ENTRY(do_dispose)(struct x##_s *, \ + bool *allow_free); \ + size_t DISPATCH_VTABLE_ENTRY(do_debug)(struct x##_s *, \ + char *, size_t); \ + void DISPATCH_VTABLE_ENTRY(do_invoke)(struct x##_s *, \ + dispatch_invoke_context_t, dispatch_invoke_flags_t) #endif #define DISPATCH_QUEUE_VTABLE_HEADER(x); \ DISPATCH_OBJECT_VTABLE_HEADER(x); \ - void (*const dq_activate)(dispatch_queue_class_t, bool *allow_resume); \ - void (*const dq_wakeup)(dispatch_queue_class_t, dispatch_qos_t, \ - dispatch_wakeup_flags_t); \ - void (*const dq_push)(dispatch_queue_class_t, dispatch_object_t, \ - dispatch_qos_t) + void DISPATCH_VTABLE_ENTRY(dq_activate)(dispatch_queue_class_t); \ + void DISPATCH_VTABLE_ENTRY(dq_wakeup)(dispatch_queue_class_t, \ + dispatch_qos_t, dispatch_wakeup_flags_t); \ + void DISPATCH_VTABLE_ENTRY(dq_push)(dispatch_queue_class_t, \ + dispatch_object_t, dispatch_qos_t) #define dx_vtable(x) (&(x)->do_vtable->_os_obj_vtable) #define dx_type(x) dx_vtable(x)->do_type @@ -240,7 +244,7 @@ #define DISPATCH_OBJECT_LISTLESS ((void *)0x89abcdef) #endif -DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t, +DISPATCH_OPTIONS(dispatch_wakeup_flags, uint32_t, // The caller of dx_wakeup owns two internal refcounts on the object being // woken up. Two are needed for WLH wakeups where two threads need // the object to remain valid in a non-coordinated way @@ -262,6 +266,9 @@ DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t, // This wakeup may cause the source to leave its DSF_NEEDS_EVENT state DISPATCH_WAKEUP_EVENT = 0x00000010, + + // This wakeup is allowed to clear the ACTIVATING state of the object + DISPATCH_WAKEUP_CLEAR_ACTIVATING = 0x00000020, ); typedef struct dispatch_invoke_context_s { @@ -278,17 +285,25 @@ typedef struct dispatch_invoke_context_s { #if DISPATCH_USE_WORKQUEUE_NARROWING #define DISPATCH_THREAD_IS_NARROWING 1 -#define dispatch_with_disabled_narrowing(dic, ...) ({ \ +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE +#define dispatch_with_disabled_narrowing(dic, flags, ...) ({ \ + flags |= DISPATCH_INVOKE_DISABLED_NARROWING; \ + __VA_ARGS__; \ + flags &= ~DISPATCH_INVOKE_DISABLED_NARROWING; \ +}) +#else /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ +#define dispatch_with_disabled_narrowing(dic, flags, ...) ({ \ uint64_t suspend_narrow_check = dic->dic_next_narrow_check; \ dic->dic_next_narrow_check = 0; \ __VA_ARGS__; \ dic->dic_next_narrow_check = suspend_narrow_check; \ }) +#endif /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ #else -#define dispatch_with_disabled_narrowing(dic, ...) __VA_ARGS__ +#define dispatch_with_disabled_narrowing(dic, flags, ...) __VA_ARGS__ #endif -DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, +DISPATCH_OPTIONS(dispatch_invoke_flags, uint32_t, DISPATCH_INVOKE_NONE = 0x00000000, // Invoke modes @@ -335,15 +350,19 @@ DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, // @const DISPATCH_INVOKE_THREAD_BOUND // We're draining from the context of a thread-bound queue (main thread) // - // @const DISPATCH_INVOKE_WORKER_DRAIN + // @const DISPATCH_INVOKE_WORKLOOP_DRAIN // The queue at the bottom of this drain is a workloop that supports // reordering. // + // @const DISPATCH_INVOKE_COOPERATIVE_DRAIN + // The queue at the bottom of this drain is a cooperative global queue + // DISPATCH_INVOKE_WORKER_DRAIN = 0x00010000, DISPATCH_INVOKE_REDIRECTING_DRAIN = 0x00020000, DISPATCH_INVOKE_MANAGER_DRAIN = 0x00040000, DISPATCH_INVOKE_THREAD_BOUND = 0x00080000, DISPATCH_INVOKE_WORKLOOP_DRAIN = 0x00100000, + DISPATCH_INVOKE_COOPERATIVE_DRAIN = 0x00200000, #define _DISPATCH_INVOKE_DRAIN_MODE_MASK 0x00ff0000u // Autoreleasing modes @@ -357,9 +376,13 @@ DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, DISPATCH_INVOKE_AUTORELEASE_ALWAYS = 0x01000000, DISPATCH_INVOKE_AUTORELEASE_NEVER = 0x02000000, #define _DISPATCH_INVOKE_AUTORELEASE_MASK 0x03000000u + + // @const DISPATCH_INVOKE_DISABLED_NARROWING + // Don't check for narrowing during this invoke + DISPATCH_INVOKE_DISABLED_NARROWING = 0x4000000, ); -DISPATCH_ENUM(dispatch_object_flags, unsigned long, +DISPATCH_OPTIONS(dispatch_object_flags, unsigned long, _DISPATCH_META_TYPE_MASK = 0x000000ff, // mask for object meta-types _DISPATCH_TYPE_CLUSTER_MASK = 0x000000f0, // mask for the cluster type _DISPATCH_SUB_TYPE_MASK = 0x0000ff00, // mask for object sub-types @@ -367,11 +390,12 @@ DISPATCH_ENUM(dispatch_object_flags, unsigned long, _DISPATCH_OBJECT_CLUSTER = 0x00000000, // dispatch object cluster _DISPATCH_CONTINUATION_TYPE = 0x00000000, // meta-type for continuations - _DISPATCH_SEMAPHORE_TYPE = 0x00000001, // meta-type for semaphores - _DISPATCH_NODE_TYPE = 0x00000002, // meta-type for data node - _DISPATCH_IO_TYPE = 0x00000003, // meta-type for io channels - _DISPATCH_OPERATION_TYPE = 0x00000004, // meta-type for io operations - _DISPATCH_DISK_TYPE = 0x00000005, // meta-type for io disks + _DISPATCH_SWIFT_JOB_TYPE = 0x00000001, // meta-type for swift jobs + _DISPATCH_SEMAPHORE_TYPE = 0x00000002, // meta-type for semaphores + _DISPATCH_NODE_TYPE = 0x00000003, // meta-type for data node + _DISPATCH_IO_TYPE = 0x00000004, // meta-type for io channels + _DISPATCH_OPERATION_TYPE = 0x00000005, // meta-type for io operations + _DISPATCH_DISK_TYPE = 0x00000006, // meta-type for io disks _DISPATCH_QUEUE_CLUSTER = 0x00000010, // dispatch queue cluster _DISPATCH_LANE_TYPE = 0x00000011, // meta-type for lanes @@ -400,6 +424,8 @@ DISPATCH_ENUM(dispatch_object_flags, unsigned long, DISPATCH_OPERATION_TYPE = DISPATCH_OBJECT_SUBTYPE(0, OPERATION), DISPATCH_DISK_TYPE = DISPATCH_OBJECT_SUBTYPE(0, DISK), + DISPATCH_SWIFT_JOB_TYPE = DISPATCH_OBJECT_SUBTYPE(0, SWIFT_JOB), + DISPATCH_QUEUE_SERIAL_TYPE = DISPATCH_OBJECT_SUBTYPE(1, LANE), DISPATCH_QUEUE_CONCURRENT_TYPE = DISPATCH_OBJECT_SUBTYPE(2, LANE), DISPATCH_QUEUE_GLOBAL_ROOT_TYPE = DISPATCH_OBJECT_SUBTYPE(3, LANE) | @@ -414,12 +440,15 @@ DISPATCH_ENUM(dispatch_object_flags, unsigned long, _DISPATCH_QUEUE_BASE_TYPEFLAG | _DISPATCH_NO_CONTEXT_TYPEFLAG, DISPATCH_QUEUE_NETWORK_EVENT_TYPE = DISPATCH_OBJECT_SUBTYPE(8, LANE) | _DISPATCH_QUEUE_BASE_TYPEFLAG, + DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE= DISPATCH_OBJECT_SUBTYPE(9, LANE) | + _DISPATCH_QUEUE_ROOT_TYPEFLAG | _DISPATCH_NO_CONTEXT_TYPEFLAG, DISPATCH_WORKLOOP_TYPE = DISPATCH_OBJECT_SUBTYPE(0, WORKLOOP) | _DISPATCH_QUEUE_BASE_TYPEFLAG, DISPATCH_SOURCE_KEVENT_TYPE = DISPATCH_OBJECT_SUBTYPE(1, SOURCE), - DISPATCH_MACH_CHANNEL_TYPE = DISPATCH_OBJECT_SUBTYPE(2, SOURCE), + DISPATCH_CHANNEL_TYPE = DISPATCH_OBJECT_SUBTYPE(2, SOURCE), + DISPATCH_MACH_CHANNEL_TYPE = DISPATCH_OBJECT_SUBTYPE(3, SOURCE), ); typedef struct _os_object_vtable_s { @@ -428,7 +457,7 @@ typedef struct _os_object_vtable_s { typedef struct _os_object_s { _OS_OBJECT_HEADER( - const _os_object_vtable_s *os_obj_isa, + const _os_object_vtable_s *__ptrauth_objc_isa_pointer os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); } _os_object_s; @@ -443,18 +472,25 @@ typedef struct _os_object_s { #else #define OS_OBJECT_STRUCT_HEADER(x) \ _OS_OBJECT_HEADER(\ - const struct x##_vtable_s *do_vtable, \ + const struct x##_vtable_s *__ptrauth_objc_isa_pointer do_vtable, \ do_ref_cnt, \ do_xref_cnt) #endif -#define _DISPATCH_OBJECT_HEADER(x) \ +#define _DISPATCH_OBJECT_HEADER_INTERNAL(x) \ struct _os_object_s _as_os_obj[0]; \ OS_OBJECT_STRUCT_HEADER(dispatch_##x); \ - struct dispatch_##x##_s *volatile do_next; \ + struct dispatch_##x##_s *volatile do_next; + + +#define _DISPATCH_OBJECT_HEADER(x) \ + _DISPATCH_OBJECT_HEADER_INTERNAL(x) \ struct dispatch_queue_s *do_targetq; \ void *do_ctxt; \ - void *do_finalizer + union { \ + dispatch_function_t DISPATCH_FUNCTION_POINTER do_finalizer; \ + void *do_introspection_ctxt; \ + } #define DISPATCH_OBJECT_HEADER(x) \ struct dispatch_object_s _as_do[0]; \ @@ -467,6 +503,9 @@ typedef struct _os_object_s { return [super init]; \ } +#define DISPATCH_OBJECT_USES_XREF_DISPOSE() \ + OS_OBJECT_USES_XREF_DISPOSE() + _OS_OBJECT_DECL_PROTOCOL(dispatch_object, object); DISPATCH_CLASS_DECL_BARE(object, OBJECT); @@ -480,9 +519,7 @@ size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, void *_dispatch_object_alloc(const void *vtable, size_t size); void _dispatch_object_finalize(dispatch_object_t dou); void _dispatch_object_dealloc(dispatch_object_t dou); -#if !USE_OBJC void _dispatch_xref_dispose(dispatch_object_t dou); -#endif void _dispatch_dispose(dispatch_object_t dou); #if DISPATCH_COCOA_COMPAT #if USE_OBJC @@ -528,7 +565,7 @@ OS_OBJECT_OBJC_CLASS_DECL(object); // This is required by the dispatch_data_t/NSData bridging, which is not // supported on the old runtime. #define DISPATCH_OBJECT_TFB(f, o, ...) \ - if (unlikely(((uintptr_t)((o)._os_obj->os_obj_isa) & 1) || \ + if (unlikely(((*(uintptr_t *)&((o)._os_obj->os_obj_isa)) & 1) || \ (Class)((o)._os_obj->os_obj_isa) < \ (Class)OS_OBJECT_VTABLE(dispatch_object) || \ (Class)((o)._os_obj->os_obj_isa) >= \ @@ -584,7 +621,7 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); */ #define _os_atomic_refcnt_perform2o(o, f, op, n, m) ({ \ __typeof__(o) _o = (o); \ - int _ref_cnt = _o->f; \ + int _ref_cnt = os_atomic_load(&_o->f, relaxed); \ if (likely(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \ _ref_cnt = os_atomic_##op##2o(_o, f, n, m); \ } \ diff --git a/src/protocol.defs b/src/protocol.defs index 7a9cf1898..6129f3f1a 100644 --- a/src/protocol.defs +++ b/src/protocol.defs @@ -20,6 +20,7 @@ #include #include +import ; // '64' is used to align with Mach notifications and so that we don't fight // with the notify symbols in Libsystem @@ -28,6 +29,8 @@ subsystem libdispatch_internal_protocol 64; serverprefix _dispatch_; userprefix _dispatch_send_; +ConsumeOnSendError Timeout; + skip; /* was MACH_NOTIFY_FIRST: 64 */ /* MACH_NOTIFY_PORT_DELETED: 65 */ diff --git a/src/queue.c b/src/queue.c index 493de3f21..44cdb4aa5 100644 --- a/src/queue.c +++ b/src/queue.c @@ -36,6 +36,9 @@ static inline void _dispatch_queue_wakeup_with_override( static void _dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, struct dispatch_object_s *dc, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, uint64_t owned); +static inline bool +_dispatch_async_and_wait_should_always_async(dispatch_queue_class_t dqu, + uint64_t dq_state); #pragma mark - #pragma mark dispatch_assert_queue @@ -123,7 +126,7 @@ void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, mach_voucher_t kv) { - _pthread_set_flags_t pflags = 0; + _pthread_set_flags_t pflags = (_pthread_set_flags_t)0; if (pp && _dispatch_set_qos_class_enabled) { pthread_priority_t old_pri = _dispatch_get_priority(); if (pp != old_pri) { @@ -133,9 +136,14 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, // it from the defaultpri, see _dispatch_priority_compute_update pp |= (_dispatch_get_basepri() & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + + // TODO (rokhinip): Right now there is no binding and unbinding + // to a kqueue for a cooperative thread. We'll need to do this + // right once we get that support } else { - // else we need to keep the one that is set in the current pri - pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + // else we need to keep the overcommit/cooperative one that is set on the current + // thread + pp |= (old_pri & _PTHREAD_PRIORITY_THREAD_TYPE_MASK); } if (likely(old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { pflags |= _PTHREAD_SET_SELF_QOS_FLAG; @@ -299,6 +307,22 @@ _dispatch_block_flags_valid(dispatch_block_flags_t flags) return ((flags & ~DISPATCH_BLOCK_API_MASK) == 0); } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_block_remember_async_queue(dispatch_block_private_data_t dbpd, + dispatch_queue_t dq) +{ + // balanced in d_block_sync_invoke or d_block_wait + // + // Note: we need to retain _before_ we publish it, + // because dispatch_block_wait() will eagerly + // consume the refcounts. + _dispatch_retain_2(dq); + if (!os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { + _dispatch_release_2(dq); + } +} + DISPATCH_ALWAYS_INLINE static inline dispatch_block_flags_t _dispatch_block_normalize_flags(dispatch_block_flags_t flags) @@ -663,10 +687,7 @@ _dispatch_continuation_init_slow(dispatch_continuation_t dc, uintptr_t dc_flags = dc->dc_flags; pthread_priority_t pp = 0; - // balanced in d_block_async_invoke_and_release or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain_2(dq); - } + _dispatch_block_remember_async_queue(dbpd, dq); if (dc_flags & DC_FLAG_CONSUME) { dc->dc_func = _dispatch_block_async_invoke_and_release; @@ -939,7 +960,7 @@ _dispatch_lane_non_barrier_complete_finish(dispatch_lane_t dq, // dependency ordering for dq state changes that were flushed // and not acted upon os_atomic_thread_fence(dependency); - dq = os_atomic_force_dependency_on(dq, old_state); + dq = os_atomic_inject_dependency(dq, (unsigned long)old_state); } return _dispatch_lane_barrier_complete(dq, 0, flags); } @@ -1063,9 +1084,10 @@ _dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq, // these bits should be set if the lock was never contended/discovered. const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK | DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY | - DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER | + DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; uint64_t old_state, new_state; + dispatch_wakeup_flags_t flags = 0; // similar to _dispatch_queue_drain_try_unlock os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { @@ -1074,7 +1096,7 @@ _dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq, new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; if (unlikely(old_state & fail_unlock_mask)) { os_atomic_rmw_loop_give_up({ - return _dispatch_lane_barrier_complete(dq, 0, 0); + return _dispatch_lane_barrier_complete(dq, 0, flags); }); } }); @@ -1104,7 +1126,6 @@ _dispatch_waiter_wake(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, { dispatch_wlh_t waiter_wlh = dsc->dc_data; -#if DISPATCH_USE_KEVENT_WORKLOOP // // We need to interact with a workloop if any of the following 3 cases: // 1. the current owner of the lock has a SYNC_WAIT knote to destroy @@ -1117,10 +1138,9 @@ _dispatch_waiter_wake(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, // without pushing (waiter_wlh == DISPATCH_WLH_ANON), in which case the next // owner is really woken up when the thread event is signaled. // -#endif - if (_dq_state_in_sync_transfer(old_state) || - _dq_state_in_sync_transfer(new_state) || - (waiter_wlh != DISPATCH_WLH_ANON)) { + if ((_dq_state_is_base_wlh(old_state) && !dsc->dsc_from_async) || + _dq_state_is_base_wlh(new_state) || + waiter_wlh != DISPATCH_WLH_ANON) { _dispatch_event_loop_wake_owner(dsc, wlh, old_state, new_state); } if (unlikely(waiter_wlh == DISPATCH_WLH_ANON)) { @@ -1185,8 +1205,8 @@ _dispatch_non_barrier_waiter_redirect_or_wake(dispatch_lane_t dq, } if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - // _dispatch_barrier_async_and_wait_f_slow() expects dc_other to be the - // bottom queue of the graph + // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects + // dc_other to be the bottom queue of the graph dsc->dc_other = dq; } return _dispatch_waiter_wake_wlh_anon(dsc); @@ -1250,17 +1270,10 @@ _dispatch_barrier_waiter_redirect_or_wake(dispatch_queue_class_t dqu, } // passing the QoS of `dq` helps pushing on low priority waiters with // legacy workloops. -#if DISPATCH_INTROSPECTION dsc->dsc_from_async = false; -#endif return dx_push(tq, dsc, _dq_state_max_qos(old_state)); } - if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - // _dispatch_async_and_wait_f_slow() expects dc_other to be the - // bottom queue of the graph - dsc->dc_other = dq; - } #if DISPATCH_INTROSPECTION if (dsc->dsc_from_async) { _dispatch_trace_runtime_event(async_sync_handoff, dq, 0); @@ -1268,6 +1281,12 @@ _dispatch_barrier_waiter_redirect_or_wake(dispatch_queue_class_t dqu, _dispatch_trace_runtime_event(sync_sync_handoff, dq, 0); } #endif // DISPATCH_INTROSPECTION + + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // Falling into case (2) of _dispatch_async_and_wait_f_slow, dc_other is + // the bottom queue + dsc->dc_other = dq; + } return _dispatch_waiter_wake(dsc, wlh, old_state, new_state); } @@ -1286,13 +1305,18 @@ _dispatch_lane_drain_barrier_waiter(dispatch_lane_t dq, transfer_lock_again: os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { + _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dq); + _dispatch_queue_move_to_contended_sync(dq->_as_dq); + os_atomic_rmw_loop_give_up(goto transfer_lock_again); + } + new_state = old_state; new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; new_state &= ~DISPATCH_QUEUE_DIRTY; new_state |= next_owner; if (_dq_state_is_base_wlh(old_state)) { - new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; if (next_dc) { // we know there's a next item, keep the enqueued bit if any } else if (unlikely(_dq_state_is_dirty(old_state))) { @@ -1334,7 +1358,13 @@ _dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, enqueue = 0; } +again: os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { + _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dq); + _dispatch_queue_move_to_contended_sync(dq->_as_dq); + os_atomic_rmw_loop_give_up(goto again); + } new_state = _dq_state_merge_qos(old_state - owned, qos); new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; if (unlikely(_dq_state_is_suspended(old_state))) { @@ -1381,8 +1411,7 @@ _dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, dispatch_assert(!_dq_state_is_enqueued_on_manager(new_state)); if (_dq_state_is_enqueued_on_target(old_state) || _dq_state_is_enqueued_on_target(new_state) || - _dq_state_received_sync_wait(old_state) || - _dq_state_in_sync_transfer(old_state)) { + !_dq_state_in_uncontended_sync(old_state)) { return _dispatch_event_loop_end_ownership((dispatch_wlh_t)dq, old_state, new_state, flags); } @@ -1556,11 +1585,8 @@ _dispatch_wait_prepare(dispatch_queue_t dq) os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { if (_dq_state_is_suspended(old_state) || - !_dq_state_is_base_wlh(old_state)) { - os_atomic_rmw_loop_give_up(return old_state); - } - if (!_dq_state_drain_locked(old_state) || - _dq_state_in_sync_transfer(old_state)) { + !_dq_state_is_base_wlh(old_state) || + !_dq_state_in_uncontended_sync(old_state)) { os_atomic_rmw_loop_give_up(return old_state); } new_state = old_state | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; @@ -1637,13 +1663,19 @@ __DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq) (uint8_t)_dispatch_get_basepri_override_qos_floor(); _dispatch_thread_event_init(&dsc->dsc_event); } + + _dispatch_set_current_dsc((void *) dsc); dx_push(dq, dsc, _dispatch_qos_from_pp(dsc->dc_priority)); + _dispatch_trace_runtime_event(sync_wait, dq, 0); if (dsc->dc_data == DISPATCH_WLH_ANON) { _dispatch_thread_event_wait(&dsc->dsc_event); // acquire - } else { + } else if (!dsc->dsc_wlh_self_wakeup) { _dispatch_event_loop_wait_for_ownership(dsc); } + + _dispatch_clear_current_dsc(); + if (dsc->dc_data == DISPATCH_WLH_ANON) { _dispatch_thread_event_destroy(&dsc->dsc_event); // If _dispatch_sync_waiter_wake() gave this thread an override, @@ -1726,6 +1758,8 @@ _dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt, __DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq); if (dsc.dsc_func == NULL) { + // dsc_func being cleared means that the block ran on another thread ie. + // case (2) as listed in _dispatch_async_and_wait_f_slow. dispatch_queue_t stop_dq = dsc.dc_other; return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags); } @@ -1886,10 +1920,8 @@ _dispatch_sync_block_with_privdata(dispatch_queue_t dq, dispatch_block_t work, } ov = _dispatch_set_priority_and_voucher(p, v, 0); - // balanced in d_block_sync_invoke or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain_2(dq); - } + _dispatch_block_remember_async_queue(dbpd, dq); + if (dc_flags & DC_FLAG_BARRIER) { _dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke, dc_flags); @@ -1983,6 +2015,34 @@ static void _dispatch_async_and_wait_f_slow(dispatch_queue_t dq, uintptr_t top_dc_flags, dispatch_sync_context_t dsc, dispatch_queue_t tq) { + /* dc_other is an in-out parameter. + * + * As an in-param, it specifies the top queue on which the blocking + * primitive is called. + * + * As an out-param, it refers to the queue up till which we have the drain + * lock. This is slightly different depending on how we come out of + * _WAIT_FOR_QUEUE. + * + * Case 1: + * If the continuation is to be invoked on another thread - for + * async_and_wait, or we ran on a thread bound main queue - then someone + * already called _dispatch_async_and_wait_invoke which invoked the block + * already. dc_other as an outparam here tells the enqueuer the queue up + * till which the enqueuer got the drain lock so that we know what to unlock + * on the way out. This is the case whereby the enqueuer owns part of the + * locks in the queue hierachy (but not all). + * + * Case 2: + * If the continuation is to be invoked on the enqueuing thread - because + * we were contending with another sync or async_and_wait - then enqueuer + * return from _WAIT_FOR_QUEUE without having invoked the block. The + * enqueuer has had the locks for the rest of the queue hierachy handed off + * to it so dc_other specifies the queue up till which it has the locks + * which in this case, is up till the bottom queue in the hierachy. So it + * needs to unlock everything up till the bottom queue, on the way out. + */ + __DISPATCH_WAIT_FOR_QUEUE__(dsc, tq); if (unlikely(dsc->dsc_func == NULL)) { @@ -2010,11 +2070,19 @@ _dispatch_async_and_wait_should_always_async(dispatch_queue_class_t dqu, DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_async_and_wait_recurse_one(dispatch_queue_t dq, dispatch_tid tid, - uintptr_t dc_flags) +_dispatch_async_and_wait_recurse_one(dispatch_queue_t dq, + dispatch_sync_context_t dsc, dispatch_tid tid, uintptr_t dc_flags) { uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); if (unlikely(_dispatch_async_and_wait_should_always_async(dq, dq_state))) { + // Remove the async_and_wait flag but drive down the slow path so that + // we do the synchronous wait. We are guaranteed that dq is the base + // queue. + // + // We're falling down to case (1) of _dispatch_async_and_wait_f_slow so + // set dc_other to dq + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + dsc->dc_other = dq; return false; } if (likely(dc_flags & DC_FLAG_BARRIER)) { @@ -2034,7 +2102,8 @@ _dispatch_async_and_wait_recurse(dispatch_queue_t top_dq, _dispatch_trace_item_push(top_dq, dsc); for (;;) { - if (unlikely(!_dispatch_async_and_wait_recurse_one(dq, tid, dc_flags))){ + if (unlikely(!_dispatch_async_and_wait_recurse_one(dq, dsc, tid, + dc_flags))) { return _dispatch_async_and_wait_f_slow(top_dq, top_flags, dsc, dq); } @@ -2130,10 +2199,7 @@ _dispatch_async_and_wait_block_with_privdata(dispatch_queue_t dq, v = _voucher_get(); } - // balanced in d_block_sync_invoke or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain_2(dq); - } + _dispatch_block_remember_async_queue(dbpd, dq); dispatch_tid tid = _dispatch_tid_self(); struct dispatch_sync_context_s dsc = { @@ -2406,6 +2472,11 @@ _dispatch_lane_inherit_wlh_from_target(dispatch_lane_t dq, dispatch_queue_t tq) { uint64_t old_state, new_state, role; + /* TODO (rokhinip): We're going to have to change this in the future when we + * allow targetting queues to a cooperative pool and need to figure out what + * kind of a role that gives the queue */ + dispatch_assert(!_dispatch_queue_is_cooperative(tq)); + if (!dx_hastypeflag(tq, QUEUE_ROOT)) { role = DISPATCH_QUEUE_ROLE_INNER; } else if (_dispatch_base_lane_is_wlh(dq, tq)) { @@ -2513,7 +2584,7 @@ _dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, rqp &= DISPATCH_PRIORITY_REQUESTED_MASK; if (p < rqp) p = rqp; - p |= (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + p |= (tq->dq_priority & DISPATCH_PRIORITY_THREAD_TYPE_MASK); if ((dpri & DISPATCH_PRIORITY_FLAG_FLOOR) || !(dpri & DISPATCH_PRIORITY_REQUESTED_MASK)) { p |= (dpri & DISPATCH_PRIORITY_FLAG_FLOOR); @@ -2530,19 +2601,6 @@ _dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, return DISPATCH_PRIORITY_FLAG_MANAGER; } -DISPATCH_ALWAYS_INLINE -static void -_dispatch_queue_setter_assert_inactive(dispatch_queue_class_t dq) -{ - uint64_t dq_state = os_atomic_load2o(dq._dq, dq_state, relaxed); - if (likely(dq_state & DISPATCH_QUEUE_INACTIVE)) return; -#if DISPATCH_SIZEOF_PTR == 4 - dq_state >>= 32; -#endif - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "dispatch queue/source property setter called after activation"); -} - DISPATCH_ALWAYS_INLINE static void _dispatch_workloop_attributes_alloc_if_needed(dispatch_workloop_t dwl) @@ -2630,8 +2688,12 @@ _dispatch_queue_priority_inherit_from_target(dispatch_lane_class_t dq, if (_dispatch_is_in_root_queues_array(tq)) { dispatch_qos_t qos = _dispatch_priority_qos(pri); if (!qos) qos = DISPATCH_QOS_DEFAULT; - tq = _dispatch_get_root_queue(qos, - pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)->_as_dq; + + // TODO (rokhinip): In future, might want to consider whether dq + // itself might be tagged cooperative and therefore we need to + // adjust tq accordingly + uintptr_t flags = (pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? DISPATCH_QUEUE_OVERCOMMIT : 0; + tq = _dispatch_get_root_queue(qos, flags)->_as_dq; } return tq; } @@ -2697,6 +2759,8 @@ _dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa, qos = _dispatch_priority_qos(tq->dq_priority); } tq = NULL; + } else if (tq && _dispatch_queue_is_cooperative(tq)) { + DISPATCH_CLIENT_CRASH(tq, "Cannot target object to cooperative root queue - not implemented"); } else if (tq && !tq->do_targetq) { // target is a pthread or runloop root queue, setting QoS or overcommit // is disallowed @@ -2713,9 +2777,10 @@ _dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa, } } if (!tq) { + uintptr_t flags = (overcommit == _dispatch_queue_attr_overcommit_enabled) ? DISPATCH_QUEUE_OVERCOMMIT : 0; tq = _dispatch_get_root_queue( qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos, - overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq; + flags)->_as_dq; if (unlikely(!tq)) { DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute"); } @@ -2837,9 +2902,17 @@ void _dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) { dispatch_lane_t dq = dqu._dl; - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); + if (unlikely(dq->dq_items_tail)) { + DISPATCH_CLIENT_CRASH(dq->dq_items_tail, + "Release of a queue while items are enqueued"); + } + dq->dq_items_head = (void *)0x200; + dq->dq_items_tail = (void *)0x200; + + uint64_t orig_dq_state, dq_state; + dq_state = orig_dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); if (dx_hastypeflag(dq, QUEUE_ROOT)) { initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; } @@ -2848,23 +2921,15 @@ _dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) dq_state &= ~DISPATCH_QUEUE_ROLE_MASK; if (unlikely(dq_state != initial_state)) { if (_dq_state_drain_locked(dq_state)) { - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, "Release of a locked queue"); } #if DISPATCH_SIZEOF_PTR == 4 - dq_state >>= 32; + orig_dq_state >>= 32; #endif - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, "Release of a queue with corrupt state"); } - - if (unlikely(dq->dq_items_tail)) { - DISPATCH_CLIENT_CRASH(dq->dq_items_tail, - "Release of a queue while items are enqueued"); - } - dq->dq_items_head = (void *)0x200; - dq->dq_items_tail = (void *)0x200; - _dispatch_queue_dispose(dqu, allow_free); } @@ -2883,7 +2948,7 @@ _dispatch_queue_xref_dispose(dispatch_queue_t dq) if (unlikely(_dq_state_is_suspended(dq_state))) { long state = (long)dq_state; if (sizeof(long) < sizeof(uint64_t)) state = (long)(dq_state >> 32); - if (unlikely(_dq_state_is_inactive(dq_state))) { + if (unlikely(dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK)) { // Arguments for and against this assert are within 6705399 DISPATCH_CLIENT_CRASH(state, "Release of an inactive object"); } @@ -2984,26 +3049,23 @@ _dispatch_lane_resume_slow(dispatch_lane_t dq) retry: _dispatch_queue_sidelock_unlock(dq); - return _dispatch_lane_resume(dq, false); + return _dispatch_lane_resume(dq, DISPATCH_RESUME); } DISPATCH_NOINLINE static void _dispatch_lane_resume_activate(dispatch_lane_t dq) { - bool allow_resume = true; - // Step 2: run the activation finalizer if (dx_vtable(dq)->dq_activate) { - dx_vtable(dq)->dq_activate(dq, &allow_resume); - } - // Step 3: consume the suspend count - if (allow_resume) { - return _dispatch_lane_resume(dq, false); + dx_vtable(dq)->dq_activate(dq); } + + _dispatch_lane_resume(dq, DISPATCH_ACTIVATION_DONE); } +DISPATCH_NOINLINE void -_dispatch_lane_resume(dispatch_lane_t dq, bool activate) +_dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) { // covers all suspend and inactive bits, including side suspend bit const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK; @@ -3011,64 +3073,93 @@ _dispatch_lane_resume(dispatch_lane_t dq, bool activate) (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; uint64_t set_owner_and_set_full_width_and_in_barrier = _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | - DISPATCH_QUEUE_IN_BARRIER; + DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_UNCONTENDED_SYNC; // backward compatibility: only dispatch sources can abuse // dispatch_resume() to really mean dispatch_activate() bool is_source = (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE); uint64_t old_state, new_state; + // // Activation is a bit tricky as it needs to finalize before the wakeup. // - // If after doing its updates to the suspend count and/or inactive bit, - // the last suspension related bit that would remain is the - // NEEDS_ACTIVATION one, then this function: + // The inactive bits have 4 states: + // - 11: INACTIVE + // - 10: ACTIVATED, but not activating yet + // - 01: ACTIVATING right now + // - 00: fully active // - // 1. moves the state to { sc:1 i:0 na:0 } (converts the needs-activate into - // a suspend count) - // 2. runs the activation finalizer - // 3. consumes the suspend count set in (1), and finishes the resume flow + // ACTIVATED is only used when the queue is otherwise also suspended. + // In that case the last resume will take over the activation. // - // Concurrently, some property setters such as setting dispatch source - // handlers or _dispatch_lane_set_target_queue try to do in-place changes - // before activation. These protect their action by taking a suspend count. - // Step (1) above cannot happen if such a setter has locked the object. - if (activate) { + // The ACTIVATING state is tricky because it may be cleared by sources + // firing, to avoid priority inversions problems such as rdar://45419440 + // where as soon as the kevent is installed, the source may fire + // before its activating state was cleared. + // + if (op == DISPATCH_ACTIVATE) { // relaxed atomic because this doesn't publish anything, this is only // about picking the thread that gets to finalize the activation os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - if ((old_state & suspend_bits) == - DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { - // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } - new_state = old_state - DISPATCH_QUEUE_INACTIVE - - DISPATCH_QUEUE_NEEDS_ACTIVATION - + DISPATCH_QUEUE_SUSPEND_INTERVAL; - } else if (_dq_state_is_inactive(old_state)) { - // { sc:>0 i:1 na:1 } -> { i:0 na:1 } - // simple activation because sc is not 0 - // resume will deal with na:1 later - new_state = old_state - DISPATCH_QUEUE_INACTIVE; - } else { - // object already active, this is a no-op, just exit + if (!_dq_state_is_inactive(old_state)) { + // object already active or activated os_atomic_rmw_loop_give_up(return); } + if (unlikely(_dq_state_suspend_cnt(old_state))) { + // { sc != 0, i = INACTIVE } -> i = ACTIVATED + new_state = old_state - DISPATCH_QUEUE_INACTIVE + + DISPATCH_QUEUE_ACTIVATED; + } else { + // { sc = 0, i = INACTIVE } -> i = ACTIVATING + new_state = old_state - DISPATCH_QUEUE_INACTIVE + + DISPATCH_QUEUE_ACTIVATING; + } + }); + } else if (op == DISPATCH_ACTIVATION_DONE) { + // release barrier needed to publish the effect of dq_activate() + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + if (unlikely(!(old_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK))) { + os_atomic_rmw_loop_give_up({ + // object activation was already concurrently done + // due to a concurrent DISPATCH_WAKEUP_CLEAR_ACTIVATING + // wakeup call. + // + // We still need to consume the internal refcounts because + // the wakeup doesn't take care of these. + return _dispatch_release_2_tailcall(dq); + }); + } + + new_state = old_state - DISPATCH_QUEUE_ACTIVATING; + if (!_dq_state_is_runnable(new_state)) { + // Out of width or still suspended. + // For the former, force _dispatch_lane_non_barrier_complete + // to reconsider whether it has work to do + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (_dq_state_drain_locked(new_state)) { + // still locked by someone else, make drain_try_unlock() fail + // and reconsider whether it has work to do + new_state |= DISPATCH_QUEUE_DIRTY; + } else { + // clear overrides and force a wakeup + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + } }); + if (unlikely(new_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK)) { + DISPATCH_CLIENT_CRASH(dq, "Corrupt activation state"); + } } else { // release barrier needed to publish the effect of // - dispatch_set_target_queue() // - dispatch_set_*_handler() - // - dq_activate() os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - if ((old_state & suspend_bits) == DISPATCH_QUEUE_SUSPEND_INTERVAL - + DISPATCH_QUEUE_NEEDS_ACTIVATION) { - // { sc:1 i:0 na:1 } -> { sc:1 i:0 na:0 } - new_state = old_state - DISPATCH_QUEUE_NEEDS_ACTIVATION; - } else if (is_source && (old_state & suspend_bits) == - DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { - // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } - new_state = old_state - DISPATCH_QUEUE_INACTIVE - - DISPATCH_QUEUE_NEEDS_ACTIVATION - + DISPATCH_QUEUE_SUSPEND_INTERVAL; + new_state = old_state; + if (is_source && (old_state & suspend_bits) == + DISPATCH_QUEUE_INACTIVE) { + // { sc = 0, i = INACTIVE } -> i = ACTIVATING + new_state -= DISPATCH_QUEUE_INACTIVE; + new_state += DISPATCH_QUEUE_ACTIVATING; } else if (unlikely(os_sub_overflow(old_state, DISPATCH_QUEUE_SUSPEND_INTERVAL, &new_state))) { // underflow means over-resume or a suspend count transfer @@ -3082,6 +3173,10 @@ _dispatch_lane_resume(dispatch_lane_t dq, bool activate) // // below this, new_state = old_state - DISPATCH_QUEUE_SUSPEND_INTERVAL // + } else if (_dq_state_is_activated(new_state)) { + // { sc = 1, i = ACTIVATED } -> i = ACTIVATING + new_state -= DISPATCH_QUEUE_ACTIVATED; + new_state += DISPATCH_QUEUE_ACTIVATING; } else if (!_dq_state_is_runnable(new_state)) { // Out of width or still suspended. // For the former, force _dispatch_lane_non_barrier_complete @@ -3109,20 +3204,10 @@ _dispatch_lane_resume(dispatch_lane_t dq, bool activate) }); } - if ((old_state ^ new_state) & DISPATCH_QUEUE_NEEDS_ACTIVATION) { - // we cleared the NEEDS_ACTIVATION bit and we have a valid suspend count + if (_dq_state_is_activating(new_state)) { return _dispatch_lane_resume_activate(dq); } - if (activate) { - // if we're still in an activate codepath here we should have - // { sc:>0 na:1 }, if not we've got a corrupt state - if (unlikely(!_dq_state_is_suspended(new_state))) { - DISPATCH_CLIENT_CRASH(dq, "Invalid suspension state"); - } - return; - } - if (_dq_state_is_suspended(new_state)) { return; } @@ -3132,7 +3217,7 @@ _dispatch_lane_resume(dispatch_lane_t dq, bool activate) // dependency ordering for dq state changes that were flushed // and not acted upon os_atomic_thread_fence(dependency); - dq = os_atomic_force_dependency_on(dq, old_state); + dq = os_atomic_inject_dependency(dq, (unsigned long)old_state); } // Balancing the retain_2 done in suspend() for rdar://8181908 dispatch_wakeup_flags_t flags = DISPATCH_WAKEUP_CONSUME_2; @@ -3144,8 +3229,6 @@ _dispatch_lane_resume(dispatch_lane_t dq, bool activate) } return _dispatch_release_2(dq); } - dispatch_assert(!_dq_state_received_sync_wait(old_state)); - dispatch_assert(!_dq_state_in_sync_transfer(old_state)); return dx_wakeup(dq, _dq_state_max_qos(old_state), flags); over_resume: @@ -3259,6 +3342,11 @@ _dispatch_lane_legacy_set_target_queue(void *ctxt) // see _dispatch_queue_wakeup() _dispatch_queue_sidelock_lock(dq); #endif + if (unlikely(!_dispatch_queue_is_mutable(dq))) { + /* serialize with _dispatch_mach_handoff_set_wlh */ + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of this object " + "after it has been activated"); + } dq->do_targetq = tq; #if HAVE_PTHREAD_WORKQUEUE_QOS // see _dispatch_queue_wakeup() @@ -3280,7 +3368,7 @@ _dispatch_lane_set_target_queue(dispatch_lane_t dq, dispatch_queue_t tq) if (_dispatch_lane_try_inactive_suspend(dq)) { _dispatch_object_set_target_queue_inline(dq, tq); - return _dispatch_lane_resume(dq, false); + return _dispatch_lane_resume(dq, DISPATCH_RESUME); } #if !DISPATCH_ALLOW_NON_LEAF_RETARGET @@ -3347,8 +3435,10 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) } if (_dq_state_is_inactive(dq_state)) { offset += dsnprintf(&buf[offset], bufsiz - offset, ", inactive"); - } else if (_dq_state_needs_activation(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", needs-activation"); + } else if (_dq_state_is_activated(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", activated"); + } else if (_dq_state_is_activating(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", activating"); } if (_dq_state_is_enqueued(dq_state)) { offset += dsnprintf(&buf[offset], bufsiz - offset, ", enqueued"); @@ -3401,7 +3491,7 @@ static struct { uint64_t volatile time_total; uint64_t volatile count_total; uint64_t volatile thread_total; -} _dispatch_stats[DISPATCH_PERF_MON_BUCKETS]; +} _dispatch_stats[DISPATCH_PERF_MON_BUCKETS] DISPATCH_ATOMIC64_ALIGN; DISPATCH_USED static size_t _dispatch_stat_buckets = DISPATCH_PERF_MON_BUCKETS; void @@ -3454,11 +3544,13 @@ _dispatch_poll_for_events_4launchd(void) } #if DISPATCH_USE_WORKQUEUE_NARROWING + +#if !DISPATCH_USE_COOPERATIVE_WORKQUEUE DISPATCH_STATIC_GLOBAL(os_atomic(uint64_t) _dispatch_narrowing_deadlines[DISPATCH_QOS_NBUCKETS]); #if !DISPATCH_TIME_UNIT_USES_NANOSECONDS DISPATCH_STATIC_GLOBAL(uint64_t _dispatch_narrow_check_interval_cache); -#endif +#endif /* !DISPATCH_TIME_UNIT_USES_NANOSECONDS */ DISPATCH_ALWAYS_INLINE static inline uint64_t @@ -3497,7 +3589,7 @@ _dispatch_queue_drain_should_narrow_slow(uint64_t now, if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) { DISPATCH_CLIENT_CRASH(pp, "Thread QoS corruption"); } - size_t idx = DISPATCH_QOS_BUCKET(qos); + int idx = DISPATCH_QOS_BUCKET(qos); os_atomic(uint64_t) *deadline = &_dispatch_narrowing_deadlines[idx]; uint64_t oldval, newval = now + _dispatch_narrow_check_interval(); @@ -3529,9 +3621,50 @@ _dispatch_queue_drain_should_narrow(dispatch_invoke_context_t dic) } return false; } + +bool +dispatch_swift_job_should_yield(void) +{ + return false; +} + +#else /* !DISPATCH_USE_COOPERATIVE_WORKQUEUE */ + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_drain_should_narrow(dispatch_invoke_context_t __unused dic) +{ + uint64_t quantum_expiry_action = _dispatch_get_quantum_expiry_action(); + return (quantum_expiry_action & PTHREAD_WQ_QUANTUM_EXPIRY_NARROW) != 0; +} +#define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) + +bool +dispatch_swift_job_should_yield(void) +{ + uint64_t quantum_expiry_action = _dispatch_get_quantum_expiry_action(); + /* We want to return true here regardless of what the quantum expiry action + * is. There will be specific logic in root queue drain to handle the + * various specific reasons. + * + * TODO (rokhinip): There is room for some potential optmization to return + * false here if there is nothing else enqueued on the root queue we're + * draining + */ + return quantum_expiry_action != 0; +} + +#endif /* !DISPATCH_USE_COOPERATIVE_WORKQUEUE */ + #else #define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) #define _dispatch_queue_drain_should_narrow(dic) false + +bool +dispatch_swift_job_should_yield(void) +{ + return false; +} #endif /* @@ -3601,7 +3734,8 @@ _dispatch_lane_drain(dispatch_lane_t dq, dispatch_invoke_context_t dic, if (unlikely(serial_drain != (dq->dq_width == 1))) { break; } - if (unlikely(_dispatch_queue_drain_should_narrow(dic))) { + if (unlikely(!(flags & DISPATCH_INVOKE_DISABLED_NARROWING) && + _dispatch_queue_drain_should_narrow(dic))) { break; } if (likely(flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { @@ -3726,13 +3860,11 @@ _dispatch_queue_invoke_finish(dispatch_queue_t dq, struct dispatch_object_s *dc = dic->dic_barrier_waiter; dispatch_qos_t qos = dic->dic_barrier_waiter_bucket; if (dc) { + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; + dsc->dsc_from_async = true; dic->dic_barrier_waiter = NULL; dic->dic_barrier_waiter_bucket = DISPATCH_QOS_UNSPECIFIED; owned &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; -#if DISPATCH_INTROSPECTION - dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; - dsc->dsc_from_async = true; -#endif if (qos) { return _dispatch_workloop_drain_barrier_waiter(upcast(dq)._dwl, dc, qos, DISPATCH_WAKEUP_CONSUME_2, owned); @@ -3769,8 +3901,7 @@ _dispatch_queue_invoke_finish(dispatch_queue_t dq, } void -_dispatch_lane_activate(dispatch_lane_class_t dq, - DISPATCH_UNUSED bool *allow_resume) +_dispatch_lane_activate(dispatch_lane_class_t dq) { dispatch_queue_t tq = dq._dl->do_targetq; dispatch_priority_t pri = dq._dl->dq_priority; @@ -3925,6 +4056,9 @@ static void _dispatch_workloop_attributes_dispose(dispatch_workloop_t dwl) { if (dwl->dwl_attr) { + if (dwl->dwl_attr->workgroup) { + _os_object_release(dwl->dwl_attr->workgroup->_as_os_obj); + } free(dwl->dwl_attr); } } @@ -3993,6 +4127,22 @@ dispatch_workloop_set_qos_class_floor(dispatch_workloop_t dwl, #endif // TARGET_OS_MAC } +void +dispatch_workloop_set_os_workgroup(dispatch_workloop_t dwl, os_workgroup_t wg) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + os_workgroup_t old_wg = dwl->dwl_attr->workgroup; + if (old_wg) { + _os_object_release(old_wg->_as_os_obj); + } + + /* Take an external ref count on the workgroup */ + _os_object_retain(wg->_as_os_obj); + dwl->dwl_attr->workgroup = wg; +} + void dispatch_workloop_set_qos_class(dispatch_workloop_t dwl, qos_class_t cls, uint64_t flags) @@ -4018,6 +4168,19 @@ dispatch_workloop_set_cpupercent(dispatch_workloop_t dwl, uint8_t percent, dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT; } +#if DISPATCH_IOHID_SPI +void +_dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t dwl, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + dwl->dwl_attr->dwla_observers = *observer_hooks; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS; +} +#endif + #if TARGET_OS_MAC static void _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, @@ -4026,8 +4189,11 @@ _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, uint64_t old_state, new_state; dispatch_queue_global_t dprq; +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated" dprq = dispatch_pthread_root_queue_create( "com.apple.libdispatch.workloop_fallback", 0, attr, NULL); +#pragma clang diagnostic pop dwl->do_targetq = dprq->_as_dq; _dispatch_retain(dprq); @@ -4038,18 +4204,6 @@ _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, new_state |= DISPATCH_QUEUE_ROLE_BASE_ANON; }); } - -static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { - DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), - .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, - .do_ctxt = NULL, - .dq_label = "com.apple.root.workloop-custom", - .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), - .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER | - DISPATCH_PRIORITY_SATURATED_OVERRIDE, - .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, - .dgq_thread_pool_size = 1, -}; #endif // TARGET_OS_MAC static void @@ -4073,6 +4227,17 @@ _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) dwl->do_targetq = (dispatch_queue_t)_dispatch_custom_workloop_root_queue._as_dq; } + + if (dwla->workgroup != NULL) { + // _dispatch_async_and_wait_should_always_async detects when a queue + // targets a root queue that is not part of the root queues array in + // order to force async_and_wait to async. We want this path to always + // be taken on workloops that have an associated workgroup with them + // because there is no easy way to join and leave a workgroup for just a + // single block + dwl->do_targetq = + (dispatch_queue_t)_dispatch_custom_workloop_root_queue._as_dq; + } if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_POLICY) { pthread_attr_setschedpolicy(&attr, dwla->dwla_policy); } @@ -4157,10 +4322,13 @@ _dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) void _dispatch_workloop_activate(dispatch_workloop_t dwl) { - uint64_t dq_state = os_atomic_and_orig2o(dwl, dq_state, - ~DISPATCH_QUEUE_INACTIVE, relaxed); + // This transitions either: + // - from INACTIVE to ACTIVATING + // - or from ACTIVE to ACTIVE + uint64_t old_state = os_atomic_and_orig2o(dwl, dq_state, + ~DISPATCH_QUEUE_ACTIVATED, relaxed); - if (likely(dq_state & DISPATCH_QUEUE_INACTIVE)) { + if (likely(_dq_state_is_inactive(old_state))) { if (dwl->dwl_attr) { // Activation of a workloop with attributes forces us to create // the workloop up front and register the attributes with the @@ -4172,10 +4340,8 @@ _dispatch_workloop_activate(dispatch_workloop_t dwl) _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT); } dwl->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - os_atomic_and2o(dwl, dq_state, ~DISPATCH_QUEUE_NEEDS_ACTIVATION, - relaxed); - _dispatch_workloop_wakeup(dwl, 0, DISPATCH_WAKEUP_CONSUME_2); - return; + os_atomic_and2o(dwl, dq_state, ~DISPATCH_QUEUE_ACTIVATING, relaxed); + return _dispatch_workloop_wakeup(dwl, 0, DISPATCH_WAKEUP_CONSUME_2); } } @@ -4219,9 +4385,15 @@ _dispatch_workloop_invoke2(dispatch_workloop_t dwl, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, uint64_t *owned) { + dispatch_workloop_attr_t dwl_attr = dwl->dwl_attr; dispatch_thread_frame_s dtf; struct dispatch_object_s *dc = NULL, *next_dc; + if (dwl_attr && + (dwl_attr->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS)) { + _dispatch_set_pthread_root_queue_observer_hooks( + &dwl_attr->dwla_observers); + } _dispatch_thread_frame_push(&dtf, dwl); for (;;) { @@ -4258,10 +4430,12 @@ _dispatch_workloop_invoke2(dispatch_workloop_t dwl, *owned = (*owned & DISPATCH_QUEUE_ENQUEUED) + DISPATCH_QUEUE_IN_BARRIER + DISPATCH_QUEUE_WIDTH_INTERVAL; _dispatch_thread_frame_pop(&dtf); + _dispatch_set_pthread_root_queue_observer_hooks(NULL); return NULL; out_with_barrier_waiter: _dispatch_thread_frame_pop(&dtf); + _dispatch_set_pthread_root_queue_observer_hooks(NULL); return dwl->do_targetq; } @@ -4271,7 +4445,7 @@ _dispatch_workloop_invoke(dispatch_workloop_t dwl, { flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; flags |= DISPATCH_INVOKE_WORKLOOP_DRAIN; - _dispatch_queue_class_invoke(dwl, dic, flags, 0,_dispatch_workloop_invoke2); + _dispatch_queue_class_invoke(dwl, dic, flags, 0, _dispatch_workloop_invoke2); } DISPATCH_ALWAYS_INLINE @@ -4304,13 +4478,17 @@ _dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, } os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { + _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dwl); + _dispatch_queue_move_to_contended_sync(dwl->_as_dq); + os_atomic_rmw_loop_give_up(goto transfer_lock_again); + } new_state = old_state; new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; new_state &= ~DISPATCH_QUEUE_DIRTY; new_state |= next_owner; if (likely(_dq_state_is_base_wlh(old_state))) { - new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; if (has_more_work) { // we know there's a next item, keep the enqueued bit if any } else if (unlikely(_dq_state_is_dirty(old_state))) { @@ -4361,7 +4539,13 @@ _dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, uint64_t old_state, new_state; +transfer_lock_again: os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { + _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dwl); + _dispatch_queue_move_to_contended_sync(dwl->_as_dq); + os_atomic_rmw_loop_give_up(goto transfer_lock_again); + } new_state = _dq_state_merge_qos(old_state, qos); new_state -= DISPATCH_QUEUE_IN_BARRIER; new_state -= DISPATCH_QUEUE_WIDTH_INTERVAL; @@ -4405,8 +4589,7 @@ _dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, dispatch_assert(!_dq_state_is_enqueued_on_manager(new_state)); if (_dq_state_is_enqueued_on_target(old_state) || _dq_state_is_enqueued_on_target(new_state) || - _dq_state_received_sync_wait(old_state) || - _dq_state_in_sync_transfer(old_state)) { + !_dq_state_in_uncontended_sync(old_state)) { return _dispatch_event_loop_end_ownership((dispatch_wlh_t)dwl, old_state, new_state, flags); } @@ -4497,6 +4680,9 @@ _dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); if (_dq_state_max_qos(new_state)) { + // We need to make sure we have the enqueued bit when we are making + // the syscall to update QoS and we know that we will do it since + // we're at the base anyways new_state |= DISPATCH_QUEUE_ENQUEUED; } if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { @@ -4545,11 +4731,12 @@ _dispatch_workloop_push_waiter(dispatch_workloop_t dwl, uint64_t set_owner_and_set_full_width_and_in_barrier = _dispatch_lock_value_for_self() | - DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | + DISPATCH_QUEUE_UNCONTENDED_SYNC; uint64_t old_state, new_state; os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { - new_state = _dq_state_merge_qos(old_state, qos); + new_state = _dq_state_merge_qos(old_state, qos); new_state |= DISPATCH_QUEUE_DIRTY; if (unlikely(_dq_state_drain_locked(old_state))) { // not runnable, so we should just handle overrides @@ -4562,14 +4749,36 @@ _dispatch_workloop_push_waiter(dispatch_workloop_t dwl, } }); - dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); + if ((dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) && + _dispatch_async_and_wait_should_always_async(dwl, new_state)) { + dsc->dc_other = dwl; + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + } + + if (_dq_state_is_base_wlh(new_state) && dsc->dc_data != DISPATCH_WLH_ANON) { + dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); + } if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { - return _dispatch_workloop_barrier_complete(dwl, qos, 0); + dispatch_wakeup_flags_t flags = 0; + // We came here from __DISPATCH_WAIT_FOR_QUEUE__, if the element + // we pushed is still at the head, we can cheat, dequeue everything, + // and keep pretending we weren't contended. + if (dsc->dsc_wlh_was_first && _dispatch_workloop_get_head(dwl, qos) == dc) { + dsc->dsc_wlh_self_wakeup = true; + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects + // dc_other to be the bottom queue of the graph + dsc->dc_other = dwl; + } + _dispatch_workloop_pop_head(dwl, qos, dc); + return; + } + return _dispatch_workloop_barrier_complete(dwl, qos, flags); } #if HAVE_PTHREAD_WORKQUEUE_QOS if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - if (_dq_state_should_override(new_state)) { + if (_dq_state_should_override_for_waiter(new_state)) { return _dispatch_queue_wakeup_with_override(dwl, new_state, 0); } } @@ -4625,7 +4834,12 @@ _dispatch_queue_override_invoke(dispatch_continuation_t dc, } _dispatch_continuation_pop_forwarded(dc, dc_flags, assumed_rq, { if (_dispatch_object_has_vtable(dou._do)) { - dx_invoke(dou._dq, dic, flags); + if (dx_type(dou._do) == DISPATCH_SWIFT_JOB_TYPE) { + dx_invoke(dou._dsjc, NULL, + _dispatch_invoke_flags_to_swift_invoke_flags(flags)); + } else { + dx_invoke(dou._dq, dic, flags); + } } else { _dispatch_continuation_invoke_inline(dou, flags, assumed_rq); } @@ -4653,8 +4867,13 @@ static void _dispatch_root_queue_push_override(dispatch_queue_global_t orig_rq, dispatch_object_t dou, dispatch_qos_t qos) { - bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit); + uintptr_t flags = 0; + if (orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { + flags |= DISPATCH_QUEUE_OVERCOMMIT; + } else if (_dispatch_queue_is_cooperative(orig_rq)) { + flags |= DISPATCH_QUEUE_COOPERATIVE; + } + dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, flags); dispatch_continuation_t dc = dou._dc; if (_dispatch_object_is_redirection(dc)) { @@ -4678,8 +4897,13 @@ static void _dispatch_root_queue_push_override_stealer(dispatch_queue_global_t orig_rq, dispatch_queue_t dq, dispatch_qos_t qos) { - bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit); + uintptr_t flags = 0; + if (orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { + flags |= DISPATCH_QUEUE_OVERCOMMIT; + } else if (_dispatch_queue_is_cooperative(orig_rq)) { + flags |= DISPATCH_QUEUE_COOPERATIVE; + } + dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, flags); dispatch_continuation_t dc = _dispatch_continuation_alloc(); dc->do_vtable = DC_VTABLE(OVERRIDE_STEALING); @@ -4827,6 +5051,7 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) { dispatch_queue_t dq = dqu._dq; + uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED; dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT); if (target && !(flags & DISPATCH_WAKEUP_CONSUME_2)) { @@ -4852,18 +5077,30 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, } if (target) { - uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED; if (target == DISPATCH_QUEUE_WAKEUP_MGR) { enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; } qos = _dispatch_queue_wakeup_qos(dq, qos); os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); - if (likely(!_dq_state_is_suspended(old_state) && + if (flags & DISPATCH_WAKEUP_CLEAR_ACTIVATING) { + // When an event is being delivered to a source because its + // unote was being registered before the ACTIVATING state + // had a chance to be cleared, we don't want to fail the wakeup + // which could lead to a priority inversion. + // + // Instead, these wakeups are allowed to finish the pending + // activation. + if (_dq_state_is_activating(old_state)) { + new_state &= ~DISPATCH_QUEUE_ACTIVATING; + } + } + if (likely(!_dq_state_is_suspended(new_state) && !_dq_state_is_enqueued(old_state) && (!_dq_state_drain_locked(old_state) || - (enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR && - _dq_state_is_base_wlh(old_state))))) { + enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR))) { + // Always set the enqueued bit for async enqueues on all queues + // in the hierachy new_state |= enqueue; } if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { @@ -4872,52 +5109,85 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, os_atomic_rmw_loop_give_up(goto done); } }); - - if (likely((old_state ^ new_state) & enqueue)) { - dispatch_queue_t tq; - if (target == DISPATCH_QUEUE_WAKEUP_TARGET) { - // the rmw_loop above has no acquire barrier, as the last block - // of a queue asyncing to that queue is not an uncommon pattern - // and in that case the acquire would be completely useless - // - // so instead use depdendency ordering to read - // the targetq pointer. - os_atomic_thread_fence(dependency); - tq = os_atomic_load_with_dependency_on2o(dq, do_targetq, - (long)new_state); - } else { - tq = target; - } - dispatch_assert(_dq_state_is_enqueued(new_state)); - return _dispatch_queue_push_queue(tq, dq, new_state); - } #if HAVE_PTHREAD_WORKQUEUE_QOS - if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dq, new_state, - flags); - } - } } else if (qos) { // // Someone is trying to override the last work item of the queue. // - uint64_t old_state, new_state; os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - if (!_dq_state_drain_locked(old_state) || - !_dq_state_is_enqueued(old_state)) { + // Avoid spurious override if the item was drained before we could + // apply an override + if (!_dq_state_drain_locked(old_state) && + !_dq_state_is_enqueued(old_state)) { os_atomic_rmw_loop_give_up(goto done); } new_state = _dq_state_merge_qos(old_state, qos); + if (_dq_state_is_base_wlh(old_state) && + !_dq_state_is_suspended(old_state) && + /* */ + !_dq_state_is_enqueued_on_manager(old_state)) { + + // Always set the enqueued bit for async enqueues on all queues + // in the hierachy (rdar://62447289) + // + // Scenario: + // - mach channel DM + // - targetting TQ + // + // Thread 1: + // - has the lock on (TQ), uncontended sync + // - causes a wakeup at a low QoS on DM, causing it to have: + // max_qos = UT, enqueued = 1 + // - the enqueue of DM onto TQ hasn't happened yet. + // + // Thread 2: + // - an incoming IN IPC is being merged on the servicer + // - DM having qos=UT, enqueud=1, no further enqueue happens, + // but we need an extra override and go through this code for + // TQ. + // - this causes TQ to be "stashed", which requires the enqueued + // bit set, else try_lock_wlh() will complain and the + // wakeup refcounting will be off. + new_state |= enqueue; + } + if (new_state == old_state) { os_atomic_rmw_loop_give_up(goto done); } }); + + target = DISPATCH_QUEUE_WAKEUP_TARGET; +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + } else { + goto done; + } + + if (likely((old_state ^ new_state) & enqueue)) { + dispatch_queue_t tq; + if (target == DISPATCH_QUEUE_WAKEUP_TARGET) { + // the rmw_loop above has no acquire barrier, as the last block + // of a queue asyncing to that queue is not an uncommon pattern + // and in that case the acquire would be completely useless + // + // so instead use depdendency ordering to read + // the targetq pointer. + os_atomic_thread_fence(dependency); + tq = os_atomic_load_with_dependency_on2o(dq, do_targetq, + (long)new_state); + } else { + tq = target; + } + dispatch_assert(_dq_state_is_enqueued(new_state)); + return _dispatch_queue_push_queue(tq, dq, new_state); + } +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dq, new_state, flags); + return _dispatch_queue_wakeup_with_override(dq, new_state, + flags); } -#endif // HAVE_PTHREAD_WORKQUEUE_QOS } +#endif // HAVE_PTHREAD_WORKQUEUE_QOS done: if (likely(flags & DISPATCH_WAKEUP_CONSUME_2)) { return _dispatch_release_2_tailcall(dq); @@ -4969,6 +5239,10 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, if (unlikely(_dispatch_queue_push_item(dq, dsc))) { if (unlikely(_dispatch_lane_push_waiter_should_wakeup(dq, dsc))) { + // If this returns true, we know that we are pushing onto the base + // queue + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + dsc->dc_other = dq; return dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); } @@ -4976,7 +5250,8 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; uint64_t set_owner_and_set_full_width_and_in_barrier = _dispatch_lock_value_for_self() | - DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | + DISPATCH_QUEUE_UNCONTENDED_SYNC; os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); new_state |= DISPATCH_QUEUE_DIRTY; @@ -4995,16 +5270,30 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, } }); - if (_dq_state_is_base_wlh(old_state)) { + if (_dq_state_is_base_wlh(old_state) && dsc->dc_data != DISPATCH_WLH_ANON) { dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); } if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + struct dispatch_object_s *dc = (struct dispatch_object_s *)dsc; + // We came here from __DISPATCH_WAIT_FOR_QUEUE__, if the element + // we pushed is still at the head, we can cheat, dequeue everything, + // and keep pretending we weren't contended. + if (dsc->dsc_wlh_was_first && dq->dq_items_head == dc) { + dsc->dsc_wlh_self_wakeup = true; + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects + // dc_other to be the bottom queue of the graph + dsc->dc_other = dq; + } + _dispatch_queue_pop_head(dq, dc); + return; + } return _dispatch_lane_barrier_complete(dq, qos, 0); } #if HAVE_PTHREAD_WORKQUEUE_QOS if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - if (_dq_state_should_override(new_state)) { + if (_dq_state_should_override_for_waiter(new_state)) { return _dispatch_queue_wakeup_with_override(dq, new_state, 0); } } @@ -5015,7 +5304,7 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, os_atomic_rmw_loop_give_up(return); } }); - if (_dq_state_should_override(new_state)) { + if (_dq_state_should_override_for_waiter(new_state)) { return _dispatch_queue_wakeup_with_override(dq, new_state, 0); } #endif // HAVE_PTHREAD_WORKQUEUE_QOS @@ -5073,6 +5362,15 @@ void _dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, dispatch_qos_t qos) { + if (unlikely(_dispatch_queue_is_cooperative(dq))) { + /* If we're here, means that we're in the simulator fallback case. We + * still restrict what can target the cooperative thread pool */ + if (_dispatch_object_has_vtable(dou) && + dx_type(dou._do) != DISPATCH_SWIFT_JOB_TYPE) { + DISPATCH_CLIENT_CRASH(dou._do, "Cannot target the cooperative global queue - not implemented"); + } + } + // reserving non barrier width // doesn't fail if only the ENQUEUED bit is set (unlike its barrier // width equivalent), so we have to check that this thread hasn't @@ -5087,15 +5385,403 @@ _dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, _dispatch_lane_push(dq, dou, qos); } +void +dispatch_async_swift_job(dispatch_queue_t dq, void *object, qos_class_t qos) +{ + dispatch_swift_continuation_t swift_dc; + swift_dc = (dispatch_swift_continuation_t) object; + + dispatch_object_flags_t object_flags = dx_type(swift_dc); + if (object_flags != DISPATCH_SWIFT_JOB_TYPE) { + DISPATCH_CLIENT_CRASH(object_flags, + "Used Swift only SPI to enqueue non-Swift runtime objects into dispatch"); + } + + dx_push(dq, swift_dc->_as_do, _dispatch_qos_from_qos_class(qos)); +} + #pragma mark - -#pragma mark dispatch_mgr_queue +#pragma mark dispatch_channel_t -#if DISPATCH_USE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE -struct _dispatch_mgr_sched_s { - volatile int prio; - volatile qos_class_t qos; - int default_prio; - int policy; +void +_dispatch_channel_dispose(dispatch_channel_t dch, bool *allow_free) +{ + dch->dch_callbacks = NULL; + _dispatch_lane_class_dispose(dch, allow_free); +} + +void +_dispatch_channel_xref_dispose(dispatch_channel_t dch) +{ + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dch->_as_dq); + if (callbacks->dcc_acknowledge_cancel && !(dqf & DSF_CANCELED)) { + DISPATCH_CLIENT_CRASH(dch, "Release of a channel that has not been " + "cancelled, but has a cancel acknowledgement callback"); + } + dx_wakeup(dch, 0, DISPATCH_WAKEUP_MAKE_DIRTY); +} + +typedef struct dispatch_channel_invoke_ctxt_s { + dispatch_channel_t dcic_dch; + dispatch_thread_frame_s dcic_dtf; + dispatch_invoke_context_t dcic_dic; + dispatch_invoke_flags_t dcic_flags; + dispatch_queue_wakeup_target_t dcic_tq; + struct dispatch_object_s *dcic_next_dc; + bool dcic_called_drain; +} dispatch_channel_invoke_ctxt_s; + +static bool +_dispatch_channel_invoke_cancel_check(dispatch_channel_t dch, + dispatch_channel_invoke_ctxt_t ctxt, + dispatch_channel_callbacks_t callbacks) +{ + bool rc = true; + if (!dch->dm_cancel_handler_called) { + if (_dispatch_queue_atomic_flags(dch) & DSF_CANCELED) { + dispatch_invoke_with_autoreleasepool(ctxt->dcic_flags, { + rc = callbacks->dcc_acknowledge_cancel(dch, dch->do_ctxt); + }); + if (rc) { + dch->dm_cancel_handler_called = true; + _dispatch_release_no_dispose(dch); + } else { + ctxt->dcic_tq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + } + } + return rc; +} + +static bool +_dispatch_channel_invoke_checks(dispatch_channel_t dch, + dispatch_channel_invoke_ctxt_t dcic, + dispatch_channel_callbacks_t callbacks) +{ + if (!_dispatch_channel_invoke_cancel_check(dch, dcic, callbacks)) { + return false; + } + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } + if (likely(dcic->dcic_flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { + dispatch_workloop_t dwl = (dispatch_workloop_t)_dispatch_get_wlh(); + if (unlikely(_dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos)) { + dcic->dcic_tq = dch->do_targetq; + return false; + } + } + if (unlikely(_dispatch_queue_drain_should_narrow(dcic->dcic_dic))) { + dcic->dcic_tq = dch->do_targetq; + return false; + } + uint64_t dq_state = os_atomic_load(&dch->dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + dcic->dcic_tq = dch->do_targetq; + return false; + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_wakeup_target_t +_dispatch_channel_invoke2(dispatch_channel_t dch, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned DISPATCH_UNUSED) +{ + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + dispatch_channel_invoke_ctxt_s dcic = { + .dcic_dch = dch, + .dcic_dic = dic, + .dcic_flags = flags & + ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN, + .dcic_tq = DISPATCH_QUEUE_WAKEUP_NONE, + }; + + _dispatch_thread_frame_push(&dcic.dcic_dtf, dch); + + if (!_dispatch_channel_invoke_cancel_check(dch, &dcic, callbacks)) { + goto out; + } + + do { + struct dispatch_object_s *dc = dcic.dcic_next_dc; + + if (unlikely(!dc)) { + if (!dch->dq_items_tail) { + break; + } + dc = _dispatch_queue_get_head(dch); + } + + if (unlikely(_dispatch_object_is_sync_waiter(dc))) { + DISPATCH_CLIENT_CRASH(0, "sync waiter found on channel"); + } + + if (_dispatch_object_is_channel_item(dc)) { + dcic.dcic_next_dc = dc; + dcic.dcic_called_drain = false; + dispatch_invoke_with_autoreleasepool(dcic.dcic_flags, { + if (callbacks->dcc_invoke(dch, &dcic, dch->do_ctxt)) { + if (unlikely(!dcic.dcic_called_drain)) { + DISPATCH_CLIENT_CRASH(0, "Channel didn't call " + "dispatch_channel_drain"); + } + } else { + dcic.dcic_tq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + }); + } else { + dcic.dcic_next_dc = _dispatch_queue_pop_head(dch, dc); + _dispatch_continuation_pop_inline(dc, dic, flags, dch); + if (!_dispatch_channel_invoke_checks(dch, &dcic, callbacks)) { + break; + } + } + } while (dcic.dcic_tq == DISPATCH_QUEUE_WAKEUP_NONE); + +out: + _dispatch_thread_frame_pop(&dcic.dcic_dtf); + return dcic.dcic_tq; +} + +void +_dispatch_channel_invoke(dispatch_channel_t dch, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) +{ + _dispatch_queue_class_invoke(dch, dic, flags, + DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS, _dispatch_channel_invoke2); +} + +void +dispatch_channel_foreach_work_item_peek_f( + dispatch_channel_invoke_ctxt_t dcic, + void *ctxt, dispatch_channel_enumerator_handler_t f) +{ + if (dcic->dcic_called_drain) { + DISPATCH_CLIENT_CRASH(0, "Called peek after drain"); + } + + dispatch_channel_t dch = dcic->dcic_dch; + struct dispatch_object_s *dc = dcic->dcic_next_dc; + + for (;;) { + dispatch_continuation_t dci = (dispatch_continuation_t)dc; + if (!_dispatch_object_is_channel_item(dc)) { + break; + } + if (!f(ctxt, dci->dc_ctxt)) { + break; + } + if (dc == dch->dq_items_tail) { + break; + } + dc = os_mpsc_get_next(dc, do_next, &dch->dq_items_tail); + } +} + +void +dispatch_channel_drain_f(dispatch_channel_invoke_ctxt_t dcic, + void *_Nullable ctxt, dispatch_channel_drain_handler_t f) +{ + dispatch_channel_t dch = dcic->dcic_dch; + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + struct dispatch_object_s *dc; + uintptr_t dcf = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; + void *unpop_item = NULL; + bool stop_invoke = false; + + if (dcic->dcic_called_drain) { + DISPATCH_CLIENT_CRASH(0, "Called drain twice in the same invoke"); + } + dcic->dcic_called_drain = true; + + do { + dc = dcic->dcic_next_dc; + if (unlikely(!dc)) { + if (!dch->dq_items_tail) { + break; + } + dc = _dispatch_queue_get_head(dch); + } + if (!_dispatch_object_is_channel_item(dc)) { + break; + } + + dcic->dcic_next_dc = _dispatch_queue_pop_head(dch, dc); + + _dispatch_continuation_pop_forwarded(upcast(dc)._dc, dcf, dch, { + dispatch_invoke_with_autoreleasepool(dcic->dcic_flags, { + stop_invoke = !f(ctxt, upcast(dc)._dc->dc_ctxt, &unpop_item); + }); + }); + if (unlikely(stop_invoke)) { + break; + } + } while (_dispatch_channel_invoke_checks(dch, dcic, callbacks)); + + if (unlikely(unpop_item)) { + dispatch_continuation_t dci = _dispatch_continuation_alloc(); + _dispatch_continuation_init_f(dci, dch, unpop_item, NULL, 0, dcf); + os_mpsc_undo_pop_head(os_mpsc(dch, dq_items), upcast(dci)._do, + dcic->dcic_next_dc, do_next); + dcic->dcic_next_dc = upcast(dci)._do; + } +} + +#ifdef __BLOCKS__ +void +dispatch_channel_foreach_work_item_peek( + dispatch_channel_invoke_ctxt_t dcic, + dispatch_channel_enumerator_block_t block) +{ + dispatch_channel_enumerator_handler_t f; + f = (dispatch_channel_enumerator_handler_t)_dispatch_Block_invoke(block); + dispatch_channel_foreach_work_item_peek_f(dcic, block, f); +} + +void +dispatch_channel_drain(dispatch_channel_invoke_ctxt_t dcic, + dispatch_channel_drain_block_t block) +{ + dispatch_channel_drain_handler_t f; + f = (dispatch_channel_drain_handler_t)_dispatch_Block_invoke(block); + dispatch_channel_drain_f(dcic, block, f); +} +#endif // __BLOCKS__ + +DISPATCH_NOINLINE +void +_dispatch_channel_wakeup(dispatch_channel_t dch, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_queue_t dq = dch->_as_dq; + + if (unlikely(!callbacks->dcc_probe(dch, dch->do_ctxt))) { + target = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } else if (_dispatch_queue_class_probe(dch)) { + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } else if (_dispatch_queue_atomic_flags(dq) & DSF_CANCELED) { + if (!dch->dm_cancel_handler_called) { + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } + } + + return _dispatch_queue_wakeup(dch, qos, flags, target); +} + +size_t +_dispatch_channel_debug(dispatch_channel_t dch, char *buf, size_t bufsiz) +{ + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dch); + size_t offset = 0; + + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + _dispatch_object_class_name(dch), dch); + offset += _dispatch_object_debug_attr(dch, &buf[offset], bufsiz - offset); + offset += _dispatch_queue_debug_attr(dch->_as_dq, &buf[offset], bufsiz - offset); + offset += dsnprintf(buf, bufsiz, "%s%s%s", + (dqf & DSF_CANCELED) ? "cancelled, " : "", + (dqf & DSF_NEEDS_EVENT) ? "needs-event, " : "", + (dqf & DSF_DELETED) ? "deleted, " : ""); + + return offset; +} + +dispatch_channel_t +dispatch_channel_create(const char *label, dispatch_queue_t tq, + void *ctxt, dispatch_channel_callbacks_t callbacks) +{ + dispatch_channel_t dch; + dispatch_queue_flags_t dqf = DSF_STRICT; + + if (callbacks->dcc_version < 1) { + DISPATCH_CLIENT_CRASH(callbacks->dcc_version, + "Unsupported callbacks version"); + } + + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } + } + + if (unlikely(!tq)) { + tq = _dispatch_get_default_queue(true); + } else { + _dispatch_retain((dispatch_queue_t _Nonnull)tq); + } + + dch = _dispatch_queue_alloc(channel, dqf, 1, + DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER)._dch; + dch->dq_label = label; + dch->do_targetq = tq; + dch->dch_callbacks = callbacks; + dch->do_ctxt = ctxt; + if (!callbacks->dcc_acknowledge_cancel) { + dch->dm_cancel_handler_called = true; + dch->do_ref_cnt--; + } + return dch; +} + +DISPATCH_NOINLINE +static void +_dispatch_channel_enqueue_slow(dispatch_channel_t dch, void *ctxt) +{ + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; + dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); + dispatch_qos_t qos; + + qos = _dispatch_continuation_init_f(dc, dch, ctxt, NULL, 0, dc_flags); + _dispatch_continuation_async(dch, dc, qos, dc->dc_flags); +} + +DISPATCH_NOINLINE +void +dispatch_channel_enqueue(dispatch_channel_t dch, void *ctxt) +{ + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; + dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); + dispatch_qos_t qos; + + if (unlikely(!dc)) { + return _dispatch_channel_enqueue_slow(dch, ctxt); + } + qos = _dispatch_continuation_init_f(dc, dch, ctxt, NULL, 0, dc_flags); + _dispatch_continuation_async(dch, dc, qos, dc->dc_flags); +} + +#ifndef __APPLE__ +#if __BLOCKS__ +void typeof(dispatch_channel_async) dispatch_channel_async + __attribute__((__alias__("dispatch_async"))); +#endif + +void typeof(dispatch_channel_async_f) dispatch_channel_async_f + __attribute__((__alias__("dispatch_async_f"))); +#endif + +void +dispatch_channel_wakeup(dispatch_channel_t dch, qos_class_t qos_class) +{ + dispatch_qos_t oqos = _dispatch_qos_from_qos_class(qos_class); + dx_wakeup(dch, oqos, DISPATCH_WAKEUP_MAKE_DIRTY); +} + +#pragma mark - +#pragma mark dispatch_mgr_queue + +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE +struct _dispatch_mgr_sched_s { + volatile int prio; + volatile qos_class_t qos; + int default_prio; + int policy; #if defined(_WIN32) HANDLE hThread; #else @@ -5112,15 +5798,15 @@ DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mgr_sched_pred); static int _dispatch_mgr_sched_qos2prio(qos_class_t qos) { + if (qos == QOS_CLASS_MAINTENANCE) return 4; switch (qos) { - case QOS_CLASS_MAINTENANCE: return 4; case QOS_CLASS_BACKGROUND: return 4; case QOS_CLASS_UTILITY: return 20; case QOS_CLASS_DEFAULT: return 31; case QOS_CLASS_USER_INITIATED: return 37; case QOS_CLASS_USER_INTERACTIVE: return 47; + default: return 0; } - return 0; } #endif // HAVE_PTHREAD_WORKQUEUE_QOS @@ -5363,12 +6049,15 @@ _dispatch_mgr_queue_drain(void) _dispatch_perfmon_end(perfmon_thread_manager); } +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunreachable-code" #if DISPATCH_USE_KEVENT_WORKQUEUE if (!_dispatch_kevent_workqueue_enabled) #endif { _dispatch_force_cache_cleanup(); } +#pragma clang diagnostic pop } void @@ -5495,7 +6184,8 @@ _dispatch_wlh_worker_thread_init(dispatch_deferred_items_t ddi) // // Also add the NEEDS_UNBIND flag so that // _dispatch_priority_compute_update knows it has to unbind - pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + + pp &= _PTHREAD_PRIORITY_THREAD_TYPE_MASK | ~_PTHREAD_PRIORITY_FLAGS_MASK; if (ddi->ddi_wlh == DISPATCH_WLH_ANON) { pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; } else { @@ -5555,12 +6245,36 @@ _dispatch_wlh_worker_thread_reset(void) } } +static inline os_workgroup_t +_dispatch_wlh_get_workgroup(dispatch_wlh_t wlh) +{ + os_workgroup_t wg = NULL; + dispatch_queue_t dq = (dispatch_queue_t) wlh; + if (wlh != DISPATCH_WLH_ANON && (dx_type(dq) == DISPATCH_WORKLOOP_TYPE)) { + dispatch_workloop_t dwl = (dispatch_workloop_t) dq; + if (dwl->dwl_attr) { + wg = dwl->dwl_attr->workgroup; + } + } + + return wg; +} + DISPATCH_ALWAYS_INLINE static void _dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, int *nevents) { _dispatch_introspection_thread_add(); +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + /* If this thread is not part of the cooperative workq quantum world, + * clearing this field will make sure that we have no bad state lingering. + * + * If the thread is part of the cooperative workq quantum world, we know + * that the thread has just had its workq quantum armed before coming out to + * userspace, so we clobber this to make sure that we start fresh */ + _dispatch_ack_quantum_expiry_action(); +#endif DISPATCH_PERF_MON_VAR_INIT @@ -5570,6 +6284,13 @@ _dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, }; bool is_manager; + os_workgroup_t wg = _dispatch_wlh_get_workgroup(wlh); + os_workgroup_join_token_s join_token = {0}; + if (wg) { + int rv = os_workgroup_join(wg, &join_token); + dispatch_assert(rv == 0); + } + is_manager = _dispatch_wlh_worker_thread_init(&ddi); if (!is_manager) { _dispatch_trace_runtime_event(worker_event_delivery, @@ -5604,12 +6325,25 @@ _dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, } } + if (wg) { + os_workgroup_leave(wg, &join_token); + } + _dispatch_deferred_items_set(NULL); if (!is_manager && !ddi.ddi_stashed_dou._do) { _dispatch_perfmon_end(perfmon_thread_event_no_steal); } _dispatch_debug("returning %d deferred kevents", ddi.ddi_nevents); _dispatch_clear_return_to_kernel(); +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + /* If this thread is not part of the cooperative workq quantum world, + * clearing this field should be a noop. + * + * If the thread is part of the cooperative workq quantum world, the thread + * is not going to take any action on the workq quantum action regardless + * since it is going to park so we clear it anyways */ + _dispatch_ack_quantum_expiry_action(); +#endif *nevents = ddi.ddi_nevents; _dispatch_trace_runtime_event(worker_park, NULL, 0); @@ -5619,11 +6353,14 @@ DISPATCH_NOINLINE static void _dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents) { - if (!events || !nevents) { + if (!dispatch_assume(events && nevents)) { + return; + } + if (*nevents == 0 || *events == NULL) { // events for worker thread request have already been delivered earlier + // or got cancelled before point of no return concurrently return; } - if (!dispatch_assume(*nevents && *events)) return; _dispatch_adopt_wlh_anon(); _dispatch_wlh_worker_thread(DISPATCH_WLH_ANON, *events, nevents); _dispatch_reset_wlh(); @@ -5635,14 +6372,17 @@ static void _dispatch_workloop_worker_thread(uint64_t *workloop_id, dispatch_kevent_t *events, int *nevents) { - if (!workloop_id || !dispatch_assume(*workloop_id != 0)) { + if (!dispatch_assume(workloop_id && events && nevents)) { + return; + } + if (!dispatch_assume(*workloop_id != 0)) { return _dispatch_kevent_worker_thread(events, nevents); } - if (!events || !nevents) { + if (*nevents == 0 || *events == NULL) { // events for worker thread request have already been delivered earlier + // or got cancelled before point of no return concurrently return; } - if (!dispatch_assume(*nevents && *events)) return; dispatch_wlh_t wlh = (dispatch_wlh_t)*workloop_id; _dispatch_adopt_wlh(wlh); _dispatch_wlh_worker_thread(wlh, *events, nevents); @@ -5700,6 +6440,15 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority)); (void)dispatch_assume_zero(r); return; +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + } else if (dx_type(dq) == DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE) { + _dispatch_root_queue_debug("requesting new worker thread for cooperative global " + "queue: %p", dq); + r = _pthread_workqueue_add_cooperativethreads(remaining, + _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority)); + (void)dispatch_assume_zero(r); + return; +#endif /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ } #endif // !DISPATCH_USE_INTERNAL_WORKQUEUE #if DISPATCH_USE_PTHREAD_POOL @@ -5741,7 +6490,7 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) "%p", dq); return; } - } while (!os_atomic_cmpxchgvw2o(dq, dgq_thread_pool_size, t_count, + } while (!os_atomic_cmpxchgv2o(dq, dgq_thread_pool_size, t_count, t_count - remaining, &t_count, acquire)); #if !defined(_WIN32) @@ -5798,7 +6547,8 @@ _dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor) } #if !DISPATCH_USE_INTERNAL_WORKQUEUE #if DISPATCH_USE_PTHREAD_POOL - if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) + if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE || + dx_type(dq) == DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE)) #endif { if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, relaxed))) { @@ -5935,7 +6685,7 @@ _dispatch_root_queue_drain_one(dispatch_queue_global_t dq) goto out; } // There must be a next item now. - next = os_mpsc_get_next(head, do_next); + next = os_mpsc_get_next(head, do_next, &dq->dq_items_tail); } os_atomic_store2o(dq, dq_items_head, next, relaxed); @@ -5967,15 +6717,12 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi if (_dispatch_queue_drain_try_lock_wlh(dq, &dq_state)) { dx_invoke(dq, &dic, flags); -#if DISPATCH_USE_KEVENT_WORKLOOP // // dx_invoke() will always return `dq` unlocked or locked by another // thread, and either have consumed the +2 or transferred it to the // other thread. // -#endif if (!ddi->ddi_wlh_needs_delete) { -#if DISPATCH_USE_KEVENT_WORKLOOP // // The fate of the workloop thread request has already been dealt // with, which can happen for 4 reasons, for which we just want @@ -5985,10 +6732,8 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi // - the workloop has been re-enqueued on the manager queue // - the workloop ownership has been handed off to a sync owner // -#endif goto park; } -#if DISPATCH_USE_KEVENT_WORKLOOP // // The workloop has been drained to completion or suspended. // dx_invoke() has cleared the enqueued bit before it returned. @@ -6009,7 +6754,6 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi // Take over that +1, and add our own to make the +2 this loop expects, // and drain again. // -#endif // DISPATCH_USE_KEVENT_WORKLOOP dq_state = os_atomic_load2o(dq, dq_state, relaxed); if (unlikely(!_dq_state_is_base_wlh(dq_state))) { // rdar://32671286 goto park; @@ -6020,18 +6764,17 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi goto retry; } } else { -#if DISPATCH_USE_KEVENT_WORKLOOP // // The workloop enters this function with a +2 refcount, however we // couldn't acquire the lock due to suspension or discovering that // the workloop was locked by a sync owner. // // We need to give up, and _dispatch_event_loop_leave_deferred() - // will do a DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC transition to + // will do a DISPATCH_WORKLOOP_SYNC_DISCOVER and + // a DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE transition to // tell the kernel to stop driving this thread request. We leave // a +1 with the thread request, and consume the extra +1 we have. // -#endif if (_dq_state_is_suspended(dq_state)) { dispatch_assert(!_dq_state_is_enqueued(dq_state)); _dispatch_release_2_no_dispose(dq); @@ -6116,6 +6859,16 @@ _dispatch_root_queue_drain(dispatch_queue_global_t dq, if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) { break; } + +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + /* There is no need to check to see if we need to shuffle since by + * virtue of the fact that we're here, we're timesharing between the + * work items anyways - just eat the quantum expiry action. + * + * In the future, we'd expand this to include more checks for various + * other quantum expiry actions */ + _dispatch_ack_quantum_expiry_action(); +#endif } // overcommit or not. worker thread @@ -6137,22 +6890,42 @@ _dispatch_root_queue_drain(dispatch_queue_global_t dq, static void _dispatch_worker_thread2(pthread_priority_t pp) { +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + _dispatch_ack_quantum_expiry_action(); +#endif + bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - dispatch_queue_global_t dq; + bool cooperative = pp & _PTHREAD_PRIORITY_COOPERATIVE_FLAG; - pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp &= (_PTHREAD_PRIORITY_THREAD_TYPE_MASK | ~_PTHREAD_PRIORITY_FLAGS_MASK); _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); - dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit); + + dispatch_queue_global_t dq; + dispatch_invoke_flags_t invoke_flags = 0; + + uintptr_t rq_flags = 0; + if (cooperative) { + rq_flags |= DISPATCH_QUEUE_COOPERATIVE; + invoke_flags |= DISPATCH_INVOKE_COOPERATIVE_DRAIN; + } else { + rq_flags |= (overcommit ? DISPATCH_QUEUE_OVERCOMMIT : 0); + } + dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), rq_flags); _dispatch_introspection_thread_add(); _dispatch_trace_runtime_event(worker_unpark, dq, 0); int pending = os_atomic_dec2o(dq, dgq_pending, relaxed); dispatch_assert(pending >= 0); - _dispatch_root_queue_drain(dq, dq->dq_priority, - DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN); + + invoke_flags |= DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN; + _dispatch_root_queue_drain(dq, dq->dq_priority, invoke_flags); _dispatch_voucher_debug("root queue clear", NULL); _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); + +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + _dispatch_ack_quantum_expiry_action(); +#endif _dispatch_trace_runtime_event(worker_park, NULL, 0); } #endif // !DISPATCH_USE_INTERNAL_WORKQUEUE @@ -6207,6 +6980,13 @@ _dispatch_worker_thread(void *context) _dispatch_set_pthread_root_queue_observer_hooks( &pqc->dpq_observer_hooks); } + + /* Set it up before the configure block so that it can get overridden by + * client if they want to name their threads differently */ + if (dq->_as_dq->dq_label) { + pthread_setname_np(dq->_as_dq->dq_label); + } + if (pqc->dpq_thread_configure) { pqc->dpq_thread_configure(); } @@ -6296,6 +7076,15 @@ _dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou, dispatch_priority_t rq_overcommit; rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + // TODO (rokhinip): When we add kevent support for the cooperative pool, + // we need to fix this logic to make sure that we have the following + // ranking: + // + // non_overcommit < cooperative < overcommit + + // After parsing kevents, we could have stashed a non-overcommit work + // item to do but if an overcommit/cooperative request comes in, prefer + // that. if (likely(!old_dou._do || rq_overcommit)) { dispatch_queue_global_t old_rq = ddi->ddi_stashed_rq; dispatch_qos_t old_qos = ddi->ddi_stashed_qos; @@ -6317,6 +7106,16 @@ _dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou, } } #endif + + if (_dispatch_queue_is_cooperative(rq)) { + /* We only allow enqueueing of continuations or swift job objects on the + * cooperative pool, no other objects */ + if (_dispatch_object_has_vtable(dou) && + dx_type(dou._do) != DISPATCH_SWIFT_JOB_TYPE) { + DISPATCH_CLIENT_CRASH(dou._do, "Cannot target the cooperative global queue - not implemented"); + } + } + #if HAVE_PTHREAD_WORKQUEUE_QOS if (_dispatch_root_queue_push_needs_override(rq, qos)) { return _dispatch_root_queue_push_override(rq, dou, qos); @@ -6654,6 +7453,7 @@ _dispatch_runloop_queue_poke(dispatch_lane_t dq, dispatch_qos_t qos, _dispatch_runloop_queue_handle_init); } + qos = _dispatch_queue_wakeup_qos(dq, qos); os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { new_state = _dq_state_merge_qos(old_state, qos); if (old_state == new_state) { @@ -6998,7 +7798,7 @@ static void _dispatch_sigsuspend(void) { static const sigset_t mask; - + pthread_sigmask(SIG_SETMASK, &mask, NULL); for (;;) { sigsuspend(&mask); } @@ -7083,9 +7883,18 @@ _dispatch_queue_cleanup2(void) // See dispatch_main for call to _dispatch_sig_thread on linux. #ifndef __linux__ if (_dispatch_program_is_probably_callback_driven) { - _dispatch_barrier_async_detached_f(_dispatch_get_default_queue(true), - NULL, _dispatch_sig_thread); - sleep(1); // workaround 6778970 + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + pthread_t tid; + int r = pthread_create(&tid, &attr, (void*)_dispatch_sig_thread, NULL); + if (unlikely(r)) { + DISPATCH_CLIENT_CRASH(r, "Unable to create signal thread"); + } + pthread_attr_destroy(&attr); + // this used to be here as a workaround for 6778970 + // but removing it had bincompat fallouts :'( + sleep(1); } #endif @@ -7142,9 +7951,42 @@ _dispatch_context_cleanup(void *ctxt) DISPATCH_INTERNAL_CRASH(ctxt, "Premature thread exit while a dispatch context is set"); } + #pragma mark - #pragma mark dispatch_init +#if !DISPATCH_USE_COOPERATIVE_WORKQUEUE +static void +_dispatch_cooperative_root_queue_init_fallback(dispatch_queue_global_t dq) +{ + uint16_t max_cpus = (uint16_t) dispatch_hw_config(logical_cpus); + uint16_t width_per_cooperative_queue; + + if (_dispatch_mode & DISPATCH_COOPERATIVE_POOL_STRICT) { + /* We want width 1 for a strict runtime - implement it as a width 1 + * concurrent queue */ + width_per_cooperative_queue = 1; + } else { + /* Concurrent queue with limited width */ + width_per_cooperative_queue = MAX(max_cpus/DISPATCH_QOS_NBUCKETS, 1); + } + + dispatch_priority_t pri = dq->dq_priority; + dispatch_qos_t qos = (pri & DISPATCH_PRIORITY_FLAG_FALLBACK) ? + _dispatch_priority_fallback_qos(pri) : _dispatch_priority_qos(pri); + + /* _dispatch_queue_init will clobber the serial num so just save it and + * restore it back */ + unsigned long dq_serialnum = dq->dq_serialnum; + _dispatch_queue_init(dq, 0, width_per_cooperative_queue, DISPATCH_QUEUE_ROLE_BASE_ANON); + dq->dq_serialnum = dq_serialnum; + + dispatch_queue_t tq = _dispatch_get_root_queue(qos, 0)->_as_dq; + _dispatch_retain(tq); + dq->do_targetq = tq; +} +#endif + static void _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) { @@ -7164,33 +8006,89 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) "QoS Maintenance support required"); } +#if !DISPATCH_USE_COOPERATIVE_WORKQUEUE + for (int i = DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_COOPERATIVE; + i < _DISPATCH_ROOT_QUEUE_IDX_COUNT; i += DISPATCH_ROOT_QUEUE_FLAVORS) + { + _dispatch_cooperative_root_queue_init_fallback(&_dispatch_root_queues[i]); + } +#endif + +#if DISPATCH_USE_KEVENT_SETUP + struct pthread_workqueue_config cfg = { + .version = PTHREAD_WORKQUEUE_CONFIG_VERSION, + .flags = 0, + .workq_cb = 0, + .kevent_cb = 0, + .workloop_cb = 0, + .queue_serialno_offs = dispatch_queue_offsets.dqo_serialnum, +#if PTHREAD_WORKQUEUE_CONFIG_VERSION >= 2 + .queue_label_offs = dispatch_queue_offsets.dqo_label, +#endif + }; +#endif + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunreachable-code" if (unlikely(!_dispatch_kevent_workqueue_enabled)) { +#if DISPATCH_USE_KEVENT_SETUP + cfg.workq_cb = _dispatch_worker_thread2; + r = pthread_workqueue_setup(&cfg, sizeof(cfg)); +#else r = _pthread_workqueue_init(_dispatch_worker_thread2, offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif // DISPATCH_USE_KEVENT_SETUP #if DISPATCH_USE_KEVENT_WORKLOOP } else if (wq_supported & WORKQ_FEATURE_WORKLOOP) { +#if DISPATCH_USE_KEVENT_SETUP + cfg.workq_cb = _dispatch_worker_thread2; + cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; + cfg.workloop_cb = (pthread_workqueue_function_workloop_t) _dispatch_workloop_worker_thread; + r = pthread_workqueue_setup(&cfg, sizeof(cfg)); +#else r = _pthread_workqueue_init_with_workloop(_dispatch_worker_thread2, (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread, (pthread_workqueue_function_workloop_t) _dispatch_workloop_worker_thread, offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif // DISPATCH_USE_KEVENT_SETUP #endif // DISPATCH_USE_KEVENT_WORKLOOP #if DISPATCH_USE_KEVENT_WORKQUEUE } else if (wq_supported & WORKQ_FEATURE_KEVENT) { +#if DISPATCH_USE_KEVENT_SETUP + cfg.workq_cb = _dispatch_worker_thread2; + cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; + r = pthread_workqueue_setup(&cfg, sizeof(cfg)); +#else r = _pthread_workqueue_init_with_kevent(_dispatch_worker_thread2, (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread, offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif // DISPATCH_USE_KEVENT_SETUP #endif } else { DISPATCH_INTERNAL_CRASH(wq_supported, "Missing Kevent WORKQ support"); } +#pragma clang diagnostic pop if (r != 0) { DISPATCH_INTERNAL_CRASH((r << 16) | wq_supported, "Root queue initialization failed"); } + +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + if (_dispatch_mode & DISPATCH_COOPERATIVE_POOL_STRICT) { + int pool_size_limit = -1; /* strict per QoS bucket */ + r = sysctlbyname("kern.wq_limit_cooperative_threads", NULL, NULL, &pool_size_limit, + sizeof(int)); + + if (r != 0) { + DISPATCH_INTERNAL_CRASH(errno, "Unable to limit cooperative pool size"); + } + } +#endif + #endif // DISPATCH_USE_INTERNAL_WORKQUEUE } @@ -7203,6 +8101,38 @@ _dispatch_root_queues_init(void) _dispatch_root_queues_init_once); } +dispatch_queue_global_t +dispatch_get_global_queue(intptr_t priority, uintptr_t flags) +{ + if (flags & ~(unsigned long)(DISPATCH_QUEUE_OVERCOMMIT | DISPATCH_QUEUE_COOPERATIVE)) { + return DISPATCH_BAD_INPUT; + } + + if ((flags & DISPATCH_QUEUE_OVERCOMMIT) && (flags & DISPATCH_QUEUE_COOPERATIVE)) { + return DISPATCH_BAD_INPUT; + } + + dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority); +#if !HAVE_PTHREAD_WORKQUEUE_QOS + if (qos == QOS_CLASS_MAINTENANCE) { + qos = DISPATCH_QOS_BACKGROUND; + } else if (qos == QOS_CLASS_USER_INTERACTIVE) { + qos = DISPATCH_QOS_USER_INITIATED; + } +#endif + if (qos == DISPATCH_QOS_UNSPECIFIED) { + return DISPATCH_BAD_INPUT; + } + +#if !DISPATCH_USE_COOPERATIVE_WORKQUEUE + /* The fallback implementation of the cooperative root queues need to be + * fully initialized before work can be enqueued on these queues */ + _dispatch_root_queues_init(); +#endif + + return _dispatch_get_root_queue(qos, flags); +} + DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void) @@ -7213,14 +8143,10 @@ libdispatch_init(void) if (_dispatch_getenv_bool("LIBDISPATCH_STRICT", false)) { _dispatch_mode |= DISPATCH_MODE_STRICT; } -#if HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - if (_dispatch_getenv_bool("LIBDISPATCH_NO_FAULTS", false)) { - _dispatch_mode |= DISPATCH_MODE_NO_FAULTS; - } else if (getpid() == 1 || - !os_variant_has_internal_diagnostics("com.apple.libdispatch")) { - _dispatch_mode |= DISPATCH_MODE_NO_FAULTS; + + if (_dispatch_getenv_bool("LIBDISPATCH_COOPERATIVE_POOL_STRICT", false)) { + _dispatch_mode |= DISPATCH_COOPERATIVE_POOL_STRICT; } -#endif // HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR #if DISPATCH_DEBUG || DISPATCH_PROFILE @@ -7262,8 +8188,11 @@ libdispatch_init(void) _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); _dispatch_thread_key_create(&dispatch_deferred_items_key, _dispatch_deferred_items_cleanup); + _dispatch_thread_key_create(&dispatch_quantum_key, NULL); + _dispatch_thread_key_create(&dispatch_dsc_key, NULL); + _dispatch_thread_key_create(&os_workgroup_key, _os_workgroup_tsd_cleanup); + _dispatch_thread_key_create(&dispatch_enqueue_key, NULL); #endif - #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 _dispatch_main_q.do_targetq = _dispatch_get_default_queue(true); #endif @@ -7280,6 +8209,9 @@ libdispatch_init(void) _dispatch_vtable_init(); _os_object_init(); _voucher_init(); +#if TARGET_OS_MAC + _workgroup_init(); +#endif _dispatch_introspection_init(); } @@ -7405,6 +8337,9 @@ _libdispatch_tsd_cleanup(void *ctx) _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup); _tsd_call_cleanup(dispatch_deferred_items_key, _dispatch_deferred_items_cleanup); + _tsd_call_cleanup(dispatch_quantum_key, NULL); + _tsd_call_cleanup(dispatch_enqueue_key, NULL); + _tsd_call_cleanup(dispatch_dsc_key, NULL); #ifdef __ANDROID__ if (_dispatch_thread_detach_callback) { _dispatch_thread_detach_callback(); @@ -7456,7 +8391,7 @@ DISPATCH_NOINLINE void _dispatch_fork_becomes_unsafe_slow(void) { - uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + uint8_t value = (uint8_t)os_atomic_or(&_dispatch_unsafe_fork, _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed); if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) { DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); @@ -7468,7 +8403,7 @@ void _dispatch_prohibit_transition_to_multithreaded(bool prohibit) { if (prohibit) { - uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + uint8_t value = (uint8_t)os_atomic_or(&_dispatch_unsafe_fork, _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) { DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded"); diff --git a/src/queue_internal.h b/src/queue_internal.h index ce235f482..68a5fec23 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -35,7 +35,7 @@ #pragma mark - #pragma mark dispatch_queue_flags, dq_state -DISPATCH_ENUM(dispatch_queue_flags, uint32_t, +DISPATCH_OPTIONS(dispatch_queue_flags, uint32_t, DQF_NONE = 0x00000000, DQF_AUTORELEASE_ALWAYS = 0x00010000, DQF_AUTORELEASE_NEVER = 0x00020000, @@ -121,20 +121,16 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, */ #define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT 0x0200000000000000ull /* - * i: inactive bit (bit 56) + * i: inactive state (bit 56-55) * This bit means that the object is inactive (see dispatch_activate) */ -#define DISPATCH_QUEUE_INACTIVE 0x0100000000000000ull +#define DISPATCH_QUEUE_INACTIVE 0x0180000000000000ull +#define DISPATCH_QUEUE_ACTIVATED 0x0100000000000000ull +#define DISPATCH_QUEUE_ACTIVATING 0x0080000000000000ull /* - * na: needs activation (bit 55) - * This bit is set if the object is created inactive. It tells - * dispatch_queue_wakeup to perform various tasks at first wakeup. - * - * This bit is cleared as part of the first wakeup. Having that bit prevents - * the object from being woken up (because _dq_state_should_wakeup will say - * no), except in the dispatch_activate/dispatch_resume codepath. + * This mask covers the inactive bits state */ -#define DISPATCH_QUEUE_NEEDS_ACTIVATION 0x0080000000000000ull +#define DISPATCH_QUEUE_INACTIVE_BITS_MASK 0x0180000000000000ull /* * This mask covers the suspend count (sc), side suspend count bit (ssc), * inactive (i) and needs activation (na) bits @@ -322,7 +318,7 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, * * sw: has received sync wait (bit 35, if role DISPATCH_QUEUE_ROLE_BASE_WLH) * Set when a queue owner has been exposed to the kernel because of - * dispatch_sync() contention. + * contention with dispatch_sync(). */ #define DISPATCH_QUEUE_RECEIVED_OVERRIDE 0x0000000800000000ull #define DISPATCH_QUEUE_RECEIVED_SYNC_WAIT 0x0000000800000000ull @@ -338,14 +334,14 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, * drain stealers (like the QoS Override codepath). It holds the identity * (thread port) of the current drainer. * - * st: sync transfer (bit 1 or 30) - * Set when a dispatch_sync() is transferred to + * us: uncontended sync (bit 1 or 30) + * Set when a dispatch_sync() isn't contending * * e: enqueued bit (bit 0 or 31) * Set when a queue is enqueued on its target queue */ #define DISPATCH_QUEUE_DRAIN_OWNER_MASK ((uint64_t)DLOCK_OWNER_MASK) -#define DISPATCH_QUEUE_SYNC_TRANSFER ((uint64_t)DLOCK_FAILED_TRYLOCK_BIT) +#define DISPATCH_QUEUE_UNCONTENDED_SYNC ((uint64_t)DLOCK_FAILED_TRYLOCK_BIT) #define DISPATCH_QUEUE_ENQUEUED ((uint64_t)DLOCK_WAITERS_BIT) #define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \ @@ -354,7 +350,7 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, #define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK \ (DISPATCH_QUEUE_DRAIN_OWNER_MASK | DISPATCH_QUEUE_RECEIVED_OVERRIDE | \ - DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | DISPATCH_QUEUE_SYNC_TRANSFER) + DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | DISPATCH_QUEUE_UNCONTENDED_SYNC) /* ******************************************************************************* @@ -461,11 +457,12 @@ typedef struct dispatch_queue_specific_head_s { TAILQ_HEAD(, dispatch_queue_specific_s) dqsh_entries; } *dispatch_queue_specific_head_t; -#define DISPATCH_WORKLOOP_ATTR_HAS_SCHED 0x1u -#define DISPATCH_WORKLOOP_ATTR_HAS_POLICY 0x2u -#define DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT 0x4u -#define DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS 0x8u -#define DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY 0x10u +#define DISPATCH_WORKLOOP_ATTR_HAS_SCHED 0x0001u +#define DISPATCH_WORKLOOP_ATTR_HAS_POLICY 0x0002u +#define DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT 0x0004u +#define DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS 0x0008u +#define DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY 0x0010u +#define DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS 0x0020u typedef struct dispatch_workloop_attr_s *dispatch_workloop_attr_t; typedef struct dispatch_workloop_attr_s { uint32_t dwla_flags; @@ -478,6 +475,8 @@ typedef struct dispatch_workloop_attr_s { uint8_t percent; uint32_t refillms; } dwla_cpupercent; + os_workgroup_t workgroup; + dispatch_pthread_root_queue_observer_hooks_s dwla_observers; } dispatch_workloop_attr_s; /* @@ -503,6 +502,7 @@ typedef struct dispatch_workloop_attr_s { * '--> dispatch_lane_class_t * +--> struct dispatch_lane_s * | +--> struct dispatch_source_s + * | +--> struct dispatch_channel_s * | '--> struct dispatch_mach_s * +--> struct dispatch_queue_static_s * '--> struct dispatch_queue_global_s @@ -600,6 +600,7 @@ typedef struct dispatch_workloop_attr_s { struct dispatch_source_refs_s *ds_refs; \ struct dispatch_timer_source_refs_s *ds_timer_refs; \ struct dispatch_mach_recv_refs_s *dm_recv_refs; \ + struct dispatch_channel_callbacks_s const *dch_callbacks; \ }; \ int volatile dq_sref_cnt @@ -673,6 +674,10 @@ bool _dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( dispatch_queue_t queue); +DISPATCH_EXPORT DISPATCH_NOTHROW +void +_dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t workloop, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks); #endif // __APPLE__ #if DISPATCH_USE_PTHREAD_POOL @@ -735,6 +740,7 @@ DISPATCH_SUBCLASS_DECL(queue_global, queue, lane); #if DISPATCH_USE_PTHREAD_ROOT_QUEUES DISPATCH_INTERNAL_SUBCLASS_DECL(queue_pthread_root, queue, lane); #endif +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_cooperative, queue, lane); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue_serial, lane); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue_serial, lane); @@ -779,12 +785,18 @@ void _dispatch_queue_invoke_finish(dispatch_queue_t dq, dispatch_priority_t _dispatch_queue_compute_priority_and_wlh( dispatch_queue_class_t dq, dispatch_wlh_t *wlh_out); +DISPATCH_ENUM(dispatch_resume_op, int, + DISPATCH_RESUME, + DISPATCH_ACTIVATE, + DISPATCH_ACTIVATION_DONE, +); +void _dispatch_lane_resume(dispatch_lane_class_t dq, dispatch_resume_op_t how); + void _dispatch_lane_set_target_queue(dispatch_lane_t dq, dispatch_queue_t tq); void _dispatch_lane_class_dispose(dispatch_queue_class_t dq, bool *allow_free); void _dispatch_lane_dispose(dispatch_lane_class_t dq, bool *allow_free); void _dispatch_lane_suspend(dispatch_lane_class_t dq); -void _dispatch_lane_resume(dispatch_lane_class_t dq, bool activate); -void _dispatch_lane_activate(dispatch_lane_class_t dq, bool *allow_resume); +void _dispatch_lane_activate(dispatch_lane_class_t dq); void _dispatch_lane_invoke(dispatch_lane_class_t dq, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_lane_push(dispatch_lane_class_t dq, dispatch_object_t dou, @@ -853,45 +865,56 @@ DISPATCH_COLD size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char *buf, size_t bufsiz); -#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_NBUCKETS * 2) +#define DISPATCH_ROOT_QUEUE_FLAVORS 3 +#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_NBUCKETS * DISPATCH_ROOT_QUEUE_FLAVORS) // must be in lowest to highest qos order (as encoded in dispatch_qos_t) -// overcommit qos index values need bit 1 set enum { DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0, DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS, DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS, DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS, DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS, DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS, DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_COOPERATIVE, _DISPATCH_ROOT_QUEUE_IDX_COUNT, }; +#define DISPATCH_ROOT_QUEUE_IDX_OFFSET_OVERCOMMIT \ + (DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS) +#define DISPATCH_ROOT_QUEUE_IDX_OFFSET_COOPERATIVE \ + (DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_COOPERATIVE - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS) + // skip zero // 1 - main_q // 2 - mgr_q // 3 - mgr_root_q -// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues -// 17 - workloop_fallback_q +// 4 - 21 - global queues +// 22 - workloop_fallback_q // we use 'xadd' on Intel, so the initial value == next assigned -#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 17 +#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 22 extern unsigned long volatile _dispatch_queue_serial_numbers; // mark the workloop fallback queue to avoid finalizing objects on the base // queue of custom outside-of-qos workloops -#define DISPATCH_QUEUE_SERIAL_NUMBER_WLF 16 +#define DISPATCH_QUEUE_SERIAL_NUMBER_WLF 22 extern struct dispatch_queue_static_s _dispatch_mgr_q; // serial 2 #if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES extern struct dispatch_queue_global_s _dispatch_mgr_root_queue; // serial 3 #endif -extern struct dispatch_queue_global_s _dispatch_root_queues[]; // serials 4 - 15 +extern struct dispatch_queue_global_s _dispatch_root_queues[]; // serials 4 - 21 #if DISPATCH_DEBUG #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ @@ -957,7 +980,7 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); #if DISPATCH_SIZEOF_PTR == 8 #define DISPATCH_CONTINUATION_HEADER(x) \ union { \ - const void *do_vtable; \ + const void *__ptrauth_objc_isa_pointer do_vtable; \ uintptr_t dc_flags; \ }; \ union { \ @@ -981,7 +1004,7 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); }; \ struct voucher_s *dc_voucher; \ union { \ - const void *do_vtable; \ + const void *__ptrauth_objc_isa_pointer do_vtable; \ uintptr_t dc_flags; \ }; \ struct dispatch_##x##_s *volatile do_next; \ @@ -991,7 +1014,7 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); #else #define DISPATCH_CONTINUATION_HEADER(x) \ union { \ - const void *do_vtable; \ + const void *__ptrauth_objc_isa_pointer do_vtable; \ uintptr_t dc_flags; \ }; \ union { \ @@ -1042,6 +1065,8 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); // continuation is an internal implementation detail that should not be // introspected #define DC_FLAG_NO_INTROSPECTION 0x200ul +// The item is a channel item, not a continuation +#define DC_FLAG_CHANNEL_ITEM 0x400ul typedef struct dispatch_continuation_s { DISPATCH_CONTINUATION_HEADER(continuation); @@ -1050,6 +1075,36 @@ typedef struct dispatch_continuation_s { dispatch_assert_aliases(dispatch_continuation_s, dispatch_object_s, do_next); dispatch_assert_aliases(dispatch_continuation_s, dispatch_object_s, do_vtable); +/* Swift runtime objects to be enqueued into dispatch */ +struct dispatch_swift_continuation_s; + +struct dispatch_swift_continuation_extra_vtable_s { + unsigned long const do_type; + void DISPATCH_VTABLE_ENTRY(do_invoke)(struct dispatch_swift_continuation_s *, + void *, dispatch_swift_job_invoke_flags_t flags); +}; + +typedef struct dispatch_swift_continuation_vtable_s { + _OS_OBJECT_CLASS_HEADER(); + struct dispatch_swift_continuation_extra_vtable_s _os_obj_vtable; +} const *dispatch_swift_continuation_vtable_t; + +/* This is the internal representation of a Swift object that will be enqueued + * onto dispatch. The actual object may be bigger but we only care about this + * piece of it. The vtable the continuation points to, will be interpreted as a + * dispatch_swift_continuation_vtable_t even if it is bigger. + */ +typedef struct dispatch_swift_continuation_s { + struct dispatch_object_s _as_do[0]; + _DISPATCH_OBJECT_HEADER_INTERNAL(swift_continuation); + void *opaque1; + void *opaque2; + void *opaque3; +} *dispatch_swift_continuation_t; + +dispatch_static_assert(sizeof(struct dispatch_swift_continuation_s) == + sizeof(struct dispatch_object_s)); + typedef struct dispatch_sync_context_s { struct dispatch_continuation_s _as_dc[0]; DISPATCH_CONTINUATION_HEADER(continuation); @@ -1062,12 +1117,11 @@ typedef struct dispatch_sync_context_s { uint8_t dsc_override_qos; uint16_t dsc_autorelease : 2; uint16_t dsc_wlh_was_first : 1; + uint16_t dsc_wlh_self_wakeup : 1; uint16_t dsc_wlh_is_workloop : 1; uint16_t dsc_waiter_needs_cancel : 1; uint16_t dsc_release_storage : 1; -#if DISPATCH_INTROSPECTION uint16_t dsc_from_async : 1; -#endif } *dispatch_sync_context_t; typedef struct dispatch_continuation_vtable_s { @@ -1162,7 +1216,8 @@ struct dispatch_apply_s { #if !OS_OBJECT_HAVE_OBJC1 dispatch_continuation_t da_dc; #endif - size_t volatile da_index, da_todo; + size_t _Atomic da_index; + size_t _Atomic da_todo; size_t da_iterations; #if OS_OBJECT_HAVE_OBJC1 dispatch_continuation_t da_dc; @@ -1171,12 +1226,29 @@ struct dispatch_apply_s { dispatch_thread_event_s da_event; dispatch_invoke_flags_t da_flags; int32_t da_thr_cnt; + uint32_t _Atomic da_worker_index; + dispatch_apply_attr_t da_attr; }; dispatch_static_assert(offsetof(struct dispatch_continuation_s, dc_flags) == offsetof(struct dispatch_apply_s, da_dc), "These fields must alias so that leaks instruments work"); typedef struct dispatch_apply_s *dispatch_apply_t; +#define DISPATCH_APPLY_ATTR_SIG 0xA11AB000 +struct dispatch_apply_attr_s { + uint32_t sig; + uint32_t flags; + size_t per_cluster_parallelism; + uintptr_t guard; /* To prevent copying */ +#if defined(__LP64__) + uint8_t unused[40]; +#else + uint8_t unused[48]; +#endif +}; +dispatch_static_assert(sizeof(struct dispatch_apply_attr_s) == __DISPATCH_APPLY_ATTR_SIZE__, + "Opaque dispatch apply attr and internal apply attr size should match"); + #pragma mark - #pragma mark dispatch_block_t diff --git a/src/semaphore.c b/src/semaphore.c index 925c002af..987333740 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -72,7 +72,7 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) _dispatch_object_class_name(dsema), dsema); offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset); #if USE_MACH_SEM - offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", + offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%x, ", dsema->dsema_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, @@ -121,7 +121,7 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, case DISPATCH_TIME_NOW: orig = dsema->dsema_value; while (orig < 0) { - if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1, + if (os_atomic_cmpxchgv2o(dsema, dsema_value, orig, orig + 1, &orig, relaxed)) { return _DSEMA4_TIMEOUT(); } diff --git a/src/shims.h b/src/shims.h index e14697a9f..b611a5a73 100644 --- a/src/shims.h +++ b/src/shims.h @@ -55,8 +55,12 @@ #endif #ifndef DISPATCH_WORKQ_MAX_PTHREAD_COUNT +#if defined(__APPLE__) +#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 64 +#else #define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255 #endif +#endif /* DISPATCH_WORKQ_MAX_PTHREAD_COUNT */ #include "shims/hw_config.h" #include "shims/priority.h" @@ -65,10 +69,6 @@ #include #endif -#if __has_include() -#include -#endif - #if !HAVE_DECL_FD_COPY #define FD_COPY(f, t) (void)(*(t) = *(f)) #endif @@ -154,10 +154,7 @@ _pthread_workqueue_should_narrow(pthread_priority_t priority) } #endif -#if HAVE_PTHREAD_QOS_H && __has_include() && \ - defined(PTHREAD_MAX_PARALLELISM_PHYSICAL) && \ - DISPATCH_HAVE_HW_CONFIG_COMMPAGE && \ - DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) +#if HAVE_PTHREAD_QOS_H && __has_include() #define DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM 1 #define DISPATCH_MAX_PARALLELISM_PHYSICAL PTHREAD_MAX_PARALLELISM_PHYSICAL #else @@ -199,6 +196,98 @@ _dispatch_qos_max_parallelism(dispatch_qos_t qos, unsigned long flags) return p; } +#if DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) && !TARGET_OS_SIMULATOR +#include + +#if defined(_PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC) && __arm64__ +#define DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM 1 +#else // defined(_PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC) && __arm64__ +#define DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM 0 +#endif // defined(_PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC) && __arm64__ + +#else // DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) && !TARGET_OS_SIMULATOR +#define DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM 0 +#endif // DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) && !TARGET_OS_SIMULATOR + +#if DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM +extern int __bsdthread_ctl(uintptr_t cmd, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); +#include +// the sysctl wants thread_qos_t not dispatch_qos_t +DISPATCH_ALWAYS_INLINE +static inline uint8_t +_dispatch_qos2threadqos(dispatch_qos_t q) +{ + switch (q) { + case DISPATCH_QOS_USER_INTERACTIVE: return THREAD_QOS_USER_INTERACTIVE; + case DISPATCH_QOS_USER_INITIATED: return THREAD_QOS_USER_INITIATED; + case DISPATCH_QOS_DEFAULT: return THREAD_QOS_LEGACY; + case DISPATCH_QOS_UTILITY: return THREAD_QOS_UTILITY; + case DISPATCH_QOS_BACKGROUND: return THREAD_QOS_BACKGROUND; + case DISPATCH_QOS_MAINTENANCE: return THREAD_QOS_MAINTENANCE; + default: return THREAD_QOS_UNSPECIFIED; + } +} +#endif + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_cluster_max_parallelism(dispatch_qos_t qos) +{ + uint32_t cluster_count = 0; + +#if DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM + int r = pthread_qos_max_parallelism(_dispatch_qos_to_qos_class(qos), PTHREAD_MAX_PARALLELISM_CLUSTER); + if (likely(r > 0)) { + cluster_count = (uint32_t) r; + } +#else + (void)qos; +#endif + return cluster_count; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_attr_apply_cluster_set(size_t worker_index, size_t cluster_concurrency) +{ +#if DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM + int rc = 0; + rc = __bsdthread_ctl(BSDTHREAD_CTL_DISPATCH_APPLY_ATTR, _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_SET, worker_index, cluster_concurrency); + if (rc != 0) { + if (errno != ENOTSUP) { + /* ENOTSUP = Trying to get on a cluster it is not recommended for. + * + * Other error means something very bad has happened! On things + * like the Simulator we shouldn't even be in here. + * DISPATCH_INTERNAL_CRASH isn't available here + */ + __builtin_trap(); + } + } +#else + (void)worker_index; + (void)cluster_concurrency; +#endif + return; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_attr_apply_cluster_clear(size_t worker_index, size_t cluster_concurrency) +{ +#if DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM + int rc = 0; + rc = __bsdthread_ctl(BSDTHREAD_CTL_DISPATCH_APPLY_ATTR, _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_CLEAR, worker_index, cluster_concurrency); + if (rc != 0) { + __builtin_trap(); + } +#else + (void)worker_index; + (void)cluster_concurrency; +#endif + return; +} + #if !HAVE_NORETURN_BUILTIN_TRAP /* * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not @@ -216,9 +305,7 @@ void __builtin_trap(void); #endif -#ifndef __OS_INTERNAL_ATOMIC__ #include "shims/atomic.h" -#endif #define DISPATCH_ATOMIC64_ALIGN __attribute__((aligned(8))) #include "shims/atomic_sfb.h" diff --git a/src/shims/atomic.h b/src/shims/atomic.h index c002e726a..44af102eb 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -35,19 +35,55 @@ #if defined(__cplusplus) #define _Bool bool #endif -#include - -#define memory_order_ordered memory_order_seq_cst -#define memory_order_dependency memory_order_acquire -#define os_atomic(type) type _Atomic +#ifndef os_atomic +#define os_atomic(type) type _Atomic volatile +#endif +#ifndef _os_atomic_c11_atomic #define _os_atomic_c11_atomic(p) \ ((__typeof__(*(p)) _Atomic *)(p)) +#endif // This removes the _Atomic and volatile qualifiers on the type of *p +#ifndef _os_atomic_basetypeof #define _os_atomic_basetypeof(p) \ __typeof__(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) +#endif + +#if __has_include() +#include + +#ifndef __LP64__ +// libdispatch has too many Double-Wide loads for this to be practical +// so just rename everything to the wide variants +#undef os_atomic_load +#define os_atomic_load os_atomic_load_wide + +#undef os_atomic_store +#define os_atomic_store os_atomic_store_wide +#endif + +#if defined(__arm__) || defined(__arm64__) +#define memory_order_ordered memory_order_relaxed +#define memory_order_ordered_smp memory_order_relaxed +#define _os_atomic_mo_ordered memory_order_relaxed +#define _os_atomic_mo_ordered_smp memory_order_relaxed +#else +#define memory_order_ordered memory_order_seq_cst +#define memory_order_ordered_smp memory_order_seq_cst +#define _os_atomic_mo_ordered memory_order_seq_cst +#define _os_atomic_mo_ordered_smp memory_order_seq_cst +#endif + +#define _os_rel_barrier_ordered memory_order_release +#define _os_acq_barrier_ordered memory_order_acquire + +#else // __has_include() +#include + +#define memory_order_ordered memory_order_seq_cst +#define memory_order_dependency memory_order_acquire #define os_atomic_load(p, m) \ atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_##m) @@ -96,14 +132,41 @@ #define os_atomic_xor_orig(p, v, m) \ _os_atomic_c11_op_orig((p), (v), m, xor, ^) -#define os_atomic_force_dependency_on(p, e) (p) +typedef struct { unsigned long __opaque_zero; } os_atomic_dependency_t; + +#define OS_ATOMIC_DEPENDENCY_NONE ((os_atomic_dependency_t){ 0UL }) +#define os_atomic_make_dependency(v) ((void)(v), OS_ATOMIC_DEPENDENCY_NONE) +#define os_atomic_inject_dependency(p, e) \ + ((typeof(*(p)) *)((p) + _os_atomic_auto_dependency(e).__opaque_zero)) #define os_atomic_load_with_dependency_on(p, e) \ - os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed) -#define os_atomic_load_with_dependency_on2o(p, f, e) \ - os_atomic_load_with_dependency_on(&(p)->f, e) + os_atomic_load(os_atomic_inject_dependency(p, e), dependency) #define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m) +#define os_atomic_inc(p, m) \ + os_atomic_add((p), 1, m) +#define os_atomic_inc_orig(p, m) \ + os_atomic_add_orig((p), 1, m) +#define os_atomic_dec(p, m) \ + os_atomic_sub((p), 1, m) +#define os_atomic_dec_orig(p, m) \ + os_atomic_sub_orig((p), 1, m) + +#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ + bool _result = false; \ + __typeof__(p) _p = (p); \ + ov = os_atomic_load(_p, relaxed); \ + do { \ + __VA_ARGS__; \ + _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ + } while (unlikely(!_result)); \ + _result; \ + }) +#define os_atomic_rmw_loop_give_up(expr) \ + os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) + +#endif // !__has_include() + #define os_atomic_load2o(p, f, m) \ os_atomic_load(&(p)->f, m) #define os_atomic_store2o(p, f, v, m) \ @@ -114,8 +177,6 @@ os_atomic_cmpxchg(&(p)->f, (e), (v), m) #define os_atomic_cmpxchgv2o(p, f, e, v, g, m) \ os_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m) -#define os_atomic_cmpxchgvw2o(p, f, e, v, g, m) \ - os_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m) #define os_atomic_add2o(p, f, v, m) \ os_atomic_add(&(p)->f, (v), m) #define os_atomic_add_orig2o(p, f, v, m) \ @@ -137,38 +198,22 @@ #define os_atomic_xor_orig2o(p, f, v, m) \ os_atomic_xor_orig(&(p)->f, (v), m) -#define os_atomic_inc(p, m) \ - os_atomic_add((p), 1, m) -#define os_atomic_inc_orig(p, m) \ - os_atomic_add_orig((p), 1, m) +#define os_atomic_load_with_dependency_on2o(p, f, e) \ + os_atomic_load_with_dependency_on(&(p)->f, e) + #define os_atomic_inc2o(p, f, m) \ os_atomic_add2o(p, f, 1, m) #define os_atomic_inc_orig2o(p, f, m) \ os_atomic_add_orig2o(p, f, 1, m) -#define os_atomic_dec(p, m) \ - os_atomic_sub((p), 1, m) -#define os_atomic_dec_orig(p, m) \ - os_atomic_sub_orig((p), 1, m) #define os_atomic_dec2o(p, f, m) \ os_atomic_sub2o(p, f, 1, m) #define os_atomic_dec_orig2o(p, f, m) \ os_atomic_sub_orig2o(p, f, 1, m) -#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ - bool _result = false; \ - __typeof__(p) _p = (p); \ - ov = os_atomic_load(_p, relaxed); \ - do { \ - __VA_ARGS__; \ - _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ - } while (unlikely(!_result)); \ - _result; \ - }) #define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \ os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__) + #define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) -#define os_atomic_rmw_loop_give_up(expr) \ - os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) #endif // __DISPATCH_SHIMS_ATOMIC__ diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index 185cbdf14..06b8921d9 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -166,6 +166,9 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) #endif case RelationCache: case RelationGroup: +#if defined(DISPATCH_HAVE_EXTENDED_SLPI_22000) + case RelationProcessorModule: +#endif case RelationAll: break; } diff --git a/src/shims/lock.c b/src/shims/lock.c index d4d63134a..4a750b3bd 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -59,7 +59,7 @@ _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags, #pragma mark - semaphores #if USE_MACH_SEM -#if __has_include() +#if __has_include() && !TARGET_OS_SIMULATOR #include #define DISPATCH_USE_OS_SEMAPHORE_CACHE 1 #else @@ -109,7 +109,7 @@ _dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy) } void -_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy) +_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int __unused policy) { semaphore_t sema_port = *sema; *sema = MACH_PORT_DEAD; @@ -593,9 +593,7 @@ _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, } rc = _dispatch_unfair_lock_wait(&dul->dul_lock, new_value, 0, flags); if (rc == ENOTEMPTY) { - next = value_self | DLOCK_WAITERS_BIT; - } else { - next = value_self; + next |= DLOCK_WAITERS_BIT; } } } diff --git a/src/shims/lock.h b/src/shims/lock.h index 6bf825aa7..9c602724c 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -29,7 +29,7 @@ #pragma mark - platform macros -DISPATCH_ENUM(dispatch_lock_options, uint32_t, +DISPATCH_OPTIONS(dispatch_lock_options, uint32_t, DLOCK_LOCK_NONE = 0x00000000, DLOCK_LOCK_DATA_CONTENTION = 0x00010000, ); @@ -252,7 +252,7 @@ int _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value, void _dispatch_wake_by_address(uint32_t volatile *address); #pragma mark - thread event -/** +/*! * @typedef dispatch_thread_event_t * * @abstract @@ -301,7 +301,7 @@ static inline void _dispatch_thread_event_signal(dispatch_thread_event_t dte) { #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX - if (os_atomic_inc_orig(&dte->dte_value, release) == 0) { + if (os_atomic_add_orig(&dte->dte_value, 1u, release) == 0) { // 0 -> 1 transition doesn't need a signal // force a wake even when the value is corrupt, // waiters do the validation @@ -319,7 +319,7 @@ static inline void _dispatch_thread_event_wait(dispatch_thread_event_t dte) { #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX - if (os_atomic_dec(&dte->dte_value, acquire) == 0) { + if (os_atomic_sub(&dte->dte_value, 1u, acquire) == 0) { // 1 -> 0 is always a valid transition, so we can return // for any other value, take the slow path which checks it's not corrupt return; diff --git a/src/shims/priority.h b/src/shims/priority.h index 3a79c5efb..aa0008ce2 100644 --- a/src/shims/priority.h +++ b/src/shims/priority.h @@ -45,6 +45,13 @@ #ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG #define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 #endif +#ifndef _PTHREAD_PRIORITY_COOPERATIVE_FLAG +#define _PTHREAD_PRIORITY_COOPERATIVE_FLAG 0x08000000 +#endif +#ifndef _PTHREAD_PRIORITY_THREAD_TYPE_MASK +#define _PTHREAD_PRIORITY_THREAD_TYPE_MASK 0x88000000 +#endif + #else // HAVE_PTHREAD_QOS_H OS_ENUM(qos_class, unsigned int, QOS_CLASS_USER_INTERACTIVE = 0x21, @@ -64,9 +71,12 @@ typedef unsigned long pthread_priority_t; #define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 #define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 #define _PTHREAD_PRIORITY_FALLBACK_FLAG 0x04000000 +#define _PTHREAD_PRIORITY_COOPERATIVE_FLAG 0x08000000 #define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 #define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 #define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 +#define _PTHREAD_PRIORITY_THREAD_TYPE_MASK \ + (_PTHREAD_PRIORITY_OVERCOMMIT_FLAG | _PTHREAD_PRIORITY_COOPERATIVE_FLAG) #endif // HAVE_PTHREAD_QOS_H @@ -108,9 +118,12 @@ typedef uint32_t dispatch_priority_t; #define DISPATCH_PRIORITY_FLAG_OVERCOMMIT ((dispatch_priority_t)0x80000000) // _PTHREAD_PRIORITY_OVERCOMMIT_FLAG #define DISPATCH_PRIORITY_FLAG_FALLBACK ((dispatch_priority_t)0x04000000) // _PTHREAD_PRIORITY_FALLBACK_FLAG #define DISPATCH_PRIORITY_FLAG_MANAGER ((dispatch_priority_t)0x02000000) // _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG +#define DISPATCH_PRIORITY_FLAG_COOPERATIVE ((dispatch_priority_t)0x08000000) // _PTHREAD_PRIORITY_COOPERATIVE_FLAG #define DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK \ (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_FALLBACK | \ - DISPATCH_PRIORITY_FLAG_MANAGER) + DISPATCH_PRIORITY_FLAG_MANAGER | DISPATCH_PRIORITY_FLAG_COOPERATIVE) +#define DISPATCH_PRIORITY_THREAD_TYPE_MASK \ + (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_COOPERATIVE) // not passed to pthread #define DISPATCH_PRIORITY_FLAG_FLOOR ((dispatch_priority_t)0x40000000) // _PTHREAD_PRIORITY_INHERIT_FLAG diff --git a/src/shims/target.h b/src/shims/target.h index 8e996aa73..425279b19 100644 --- a/src/shims/target.h +++ b/src/shims/target.h @@ -38,18 +38,18 @@ #if TARGET_OS_OSX # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ (__MAC_OS_X_VERSION_MIN_REQUIRED >= (x)) -# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) -# error "OS X hosts older than OS X 10.12 aren't supported anymore" -# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) +# error "OS X hosts older than OS X 10.14 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) #elif TARGET_OS_SIMULATOR # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ (IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED >= (x)) -# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) -# error "Simulator hosts older than OS X 10.12 aren't supported anymore" -# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) +# error "Simulator hosts older than OS X 10.14 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) #else # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 1 -# if __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 +# if !TARGET_OS_DRIVERKIT && __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 # error "iOS hosts older than iOS 9.0 aren't supported anymore" # endif #endif diff --git a/src/shims/time.h b/src/shims/time.h index b57731c9a..851b819c4 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -254,17 +254,34 @@ _dispatch_time_now_cached(dispatch_clock_t clock, DISPATCH_ALWAYS_INLINE static inline void -_dispatch_time_to_clock_and_value(dispatch_time_t time, +_dispatch_time_to_clock_and_value(dispatch_time_t time, bool allow_now, dispatch_clock_t *clock, uint64_t *value) { uint64_t actual_value; + + if (allow_now) { + switch (time) { + case DISPATCH_TIME_NOW: + *clock = DISPATCH_CLOCK_UPTIME; + *value = _dispatch_uptime(); + return; + case DISPATCH_MONOTONICTIME_NOW: + *clock = DISPATCH_CLOCK_MONOTONIC; + *value = _dispatch_monotonic_time(); + return; + case DISPATCH_WALLTIME_NOW: + *clock = DISPATCH_CLOCK_WALL; + *value = _dispatch_get_nanoseconds(); + return; + } + } + if ((int64_t)time < 0) { // Wall time or mach continuous time if (time & DISPATCH_WALLTIME_MASK) { // Wall time (value 11 in bits 63, 62) *clock = DISPATCH_CLOCK_WALL; - actual_value = time == DISPATCH_WALLTIME_NOW ? - _dispatch_get_nanoseconds() : (uint64_t)-time; + actual_value = (uint64_t)-time; } else { // Continuous time (value 10 in bits 63, 62). *clock = DISPATCH_CLOCK_MONOTONIC; diff --git a/src/shims/tsd.h b/src/shims/tsd.h index f8b5ec9ba..ea9d18d57 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -40,6 +40,11 @@ #include #endif +#if __has_include() +#include +#endif +#include + #if !defined(OS_GS_RELATIVE) && (defined(__i386__) || defined(__x86_64__)) #define OS_GS_RELATIVE __attribute__((address_space(256))) #endif @@ -65,16 +70,8 @@ typedef struct { void *a; void *b; } dispatch_tsd_pair_t; #endif #if DISPATCH_USE_DIRECT_TSD -#ifndef __TSD_THREAD_QOS_CLASS -#define __TSD_THREAD_QOS_CLASS 4 -#endif -#ifndef __TSD_RETURN_TO_KERNEL -#define __TSD_RETURN_TO_KERNEL 5 -#endif -#ifndef __TSD_MACH_SPECIAL_REPLY -#define __TSD_MACH_SPECIAL_REPLY 8 -#endif - +#undef errno +#define errno (*_pthread_errno_address_direct()) static const unsigned long dispatch_priority_key = __TSD_THREAD_QOS_CLASS; static const unsigned long dispatch_r2k_key = __TSD_RETURN_TO_KERNEL; @@ -94,8 +91,13 @@ static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY6; static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY6; #endif static const unsigned long dispatch_wlh_key = __PTK_LIBDISPATCH_KEY7; -static const unsigned long dispatch_voucher_key = __PTK_LIBDISPATCH_KEY8; +static const unsigned long dispatch_voucher_key = OS_VOUCHER_TSD_KEY; static const unsigned long dispatch_deferred_items_key = __PTK_LIBDISPATCH_KEY9; +static const unsigned long dispatch_quantum_key = __PTK_LIBDISPATCH_KEY10; +static const unsigned long dispatch_dsc_key = __PTK_LIBDISPATCH_KEY11; +static const unsigned long dispatch_enqueue_key = __PTK_LIBDISPATCH_KEY12; + +static const unsigned long os_workgroup_key = __PTK_LIBDISPATCH_WORKGROUP_KEY0; DISPATCH_TSD_INLINE static inline void @@ -153,6 +155,11 @@ struct dispatch_tsd { void *dispatch_wlh_key; void *dispatch_voucher_key; void *dispatch_deferred_items_key; + void *dispatch_quantum_key; + void *dispatch_dsc_key; + void *dispatch_enqueue_key; + + void *os_workgroup_key; }; extern _Thread_local struct dispatch_tsd __dispatch_tsd; @@ -209,6 +216,11 @@ extern pthread_key_t dispatch_bcounter_key; extern pthread_key_t dispatch_wlh_key; extern pthread_key_t dispatch_voucher_key; extern pthread_key_t dispatch_deferred_items_key; +extern pthread_key_t dispatch_quantum_key; +extern pthread_key_t dispatch_dsc_key; +extern pthread_key_t dispatch_enqueue_key; + +extern pthread_key_t os_workgroup_key; DISPATCH_TSD_INLINE static inline void @@ -353,7 +365,11 @@ DISPATCH_TSD_INLINE DISPATCH_CONST static inline unsigned int _dispatch_cpu_number(void) { -#if __has_include() +#if TARGET_OS_SIMULATOR + size_t n; + pthread_cpu_number_np(&n); + return (unsigned int)n; +#elif __has_include() return _os_cpu_number(); #elif defined(__x86_64__) || defined(__i386__) struct { uintptr_t p1, p2; } p; diff --git a/src/shims/yield.c b/src/shims/yield.c index 43f0017ee..cd7e1acf8 100644 --- a/src/shims/yield.c +++ b/src/shims/yield.c @@ -22,18 +22,19 @@ DISPATCH_NOINLINE static void * -__DISPATCH_WAIT_FOR_ENQUEUER__(void **ptr) +__DISPATCH_WAIT_FOR_ENQUEUER__(void **ptr, void **tailp) { - int spins = 0; + unsigned int spins = 0; void *value; while ((value = os_atomic_load(ptr, relaxed)) == NULL) { - _dispatch_preemption_yield(++spins); + /* ptr == &prev->do_next */ + _dispatch_yield_to_enqueuer(tailp, ++spins); } return value; } void * -_dispatch_wait_for_enqueuer(void **ptr) +_dispatch_wait_for_enqueuer(void **ptr, void **tailp) { #if !DISPATCH_HW_CONFIG_UP #if defined(__arm__) || defined(__arm64__) @@ -57,5 +58,5 @@ _dispatch_wait_for_enqueuer(void **ptr) } #endif #endif // DISPATCH_HW_CONFIG_UP - return __DISPATCH_WAIT_FOR_ENQUEUER__(ptr); + return __DISPATCH_WAIT_FOR_ENQUEUER__(ptr, tailp); } diff --git a/src/shims/yield.h b/src/shims/yield.h index 53eb80065..aeb429d44 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -80,7 +80,7 @@ #endif DISPATCH_NOT_TAIL_CALLED DISPATCH_EXPORT -void *_dispatch_wait_for_enqueuer(void **ptr); +void *_dispatch_wait_for_enqueuer(void **ptr, void **tailp); #pragma mark - #pragma mark _dispatch_contention_wait_until @@ -140,12 +140,22 @@ void *_dispatch_wait_for_enqueuer(void **ptr); #pragma mark - #pragma mark _dispatch_preemption_yield +/* Don't allow directed yield to enqueuer if !_pthread_has_direct_tsd() */ +#ifndef DISPATCH_HAVE_YIELD_TO_ENQUEUER +#if PTHREAD_HAVE_YIELD_TO_ENQUEUER && !TARGET_OS_SIMULATOR +#define DISPATCH_HAVE_YIELD_TO_ENQUEUER 1 +#else +#define DISPATCH_HAVE_YIELD_TO_ENQUEUER 0 +#endif +#endif /* DISPATCH_HAVE_YIELD_TO_ENQUEUER */ + #if HAVE_MACH #if defined(SWITCH_OPTION_OSLOCK_DEPRESS) #define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_OSLOCK_DEPRESS #else #define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_DEPRESS #endif + #define _dispatch_preemption_yield(n) thread_switch(MACH_PORT_NULL, \ DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n)) #define _dispatch_preemption_yield_to(th, n) thread_switch(th, \ @@ -161,6 +171,20 @@ void *_dispatch_wait_for_enqueuer(void **ptr); #define _dispatch_preemption_yield_to(th, n) { (void)n; sched_yield(); } #endif // HAVE_MACH +#if DISPATCH_HAVE_YIELD_TO_ENQUEUER +#define _dispatch_set_enqueuer_for(ptr) \ + _dispatch_thread_setspecific(dispatch_enqueue_key, (void *) (ptr)); +#define _dispatch_clear_enqueuer() \ + _dispatch_thread_setspecific(dispatch_enqueue_key, NULL); +#define _dispatch_yield_to_enqueuer(q, n) \ + (void) _pthread_yield_to_enqueuer_4dispatch(dispatch_enqueue_key, q, n) +#else +#define _dispatch_set_enqueuer_for(ptr) +#define _dispatch_clear_enqueuer(ptr) +#define _dispatch_yield_to_enqueuer(q, n) \ + ((void) (q), _dispatch_preemption_yield(n)) +#endif /* DISPATCH_HAVE_YIELD_TO_ENQUEUER */ + #pragma mark - #pragma mark _dispatch_contention_usleep diff --git a/src/source.c b/src/source.c index 1010da197..9af2a4a8b 100644 --- a/src/source.c +++ b/src/source.c @@ -60,6 +60,9 @@ dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle, if (unlikely(!dq)) { dq = _dispatch_get_default_queue(true); } else { + if (_dispatch_queue_is_cooperative(dq)) { + DISPATCH_CLIENT_CRASH(dq, "Cannot target object to cooperative root queue - not implemented"); + } _dispatch_retain((dispatch_queue_t _Nonnull)dq); } ds->do_targetq = dq; @@ -90,7 +93,7 @@ _dispatch_source_xref_dispose(dispatch_source_t ds) dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); if (unlikely((dqf & DSF_STRICT) && !(dqf & DSF_CANCELED) && _dispatch_source_get_cancel_handler(ds->ds_refs))) { - DISPATCH_CLIENT_CRASH(ds, "Release of a source that has not been " + DISPATCH_CLIENT_CRASH(dqf, "Release of a source that has not been " "cancelled, but has a mandatory cancel handler"); } dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY); @@ -327,7 +330,7 @@ _dispatch_source_set_handler(dispatch_source_t ds, void *func, if (_dispatch_lane_try_inactive_suspend(ds)) { _dispatch_source_handler_replace(ds, kind, dc); - return _dispatch_lane_resume(ds, false); + return _dispatch_lane_resume(ds, DISPATCH_RESUME); } dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); @@ -441,7 +444,10 @@ _dispatch_source_registration_callout(dispatch_source_t ds, dispatch_queue_t cq, dc = _dispatch_source_handler_take(ds->ds_refs, DS_REGISTN_HANDLER); if (ds->dq_atomic_flags & (DSF_CANCELED | DQF_RELEASED)) { // no registration callout if source is canceled rdar://problem/8955246 - return _dispatch_source_handler_dispose(dc); + dispatch_invoke_with_autoreleasepool(flags, { + _dispatch_source_handler_dispose(dc); + }); + return; } if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { dc->dc_ctxt = ds->do_ctxt; @@ -458,22 +464,33 @@ _dispatch_source_cancel_callout(dispatch_source_t ds, dispatch_queue_t cq, dispatch_source_refs_t dr = ds->ds_refs; dispatch_continuation_t dc; - dc = _dispatch_source_handler_take(dr, DS_CANCEL_HANDLER); - dr->ds_pending_data = 0; - dr->ds_data = 0; - _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); - _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); - if (!dc) { - return; - } - if (!(ds->dq_atomic_flags & DSF_CANCELED)) { - return _dispatch_source_handler_dispose(dc); - } - if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { - dc->dc_ctxt = ds->do_ctxt; - } - _dispatch_trace_source_callout_entry(ds, DS_CANCEL_HANDLER, cq, dc); - _dispatch_continuation_pop(dc, NULL, flags, cq); + dispatch_invoke_with_autoreleasepool(flags, { + dc = _dispatch_source_handler_take(dr, DS_CANCEL_HANDLER); + dr->ds_pending_data = 0; + dr->ds_data = 0; + _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); + _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); + if (!dc) { + /* nothing to do here */ + } else if (!(ds->dq_atomic_flags & DSF_CANCELED)) { + _dispatch_source_handler_dispose(dc); + } else { + if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { + dc->dc_ctxt = ds->do_ctxt; + } + _dispatch_trace_source_callout_entry(ds, DS_CANCEL_HANDLER, cq, dc); + + // + // Make sure _dispatch_continuation_pop() will not + // add its own autoreleasepool since we have one, + // and there's magic in objc that makes _one_ + // autoreleasepool cheap. + // + flags &= ~DISPATCH_INVOKE_AUTORELEASE_ALWAYS; + _dispatch_continuation_pop(dc, NULL, flags, cq); + } + + }); } DISPATCH_ALWAYS_INLINE @@ -513,7 +530,7 @@ _dispatch_source_timer_data(dispatch_timer_source_refs_t dr, uint64_t prev) // We hence need dependency ordering to pair with the release barrier // done by _dispatch_timers_run2() when setting the DISARMED_MARKER bit. os_atomic_thread_fence(dependency); - dr = os_atomic_force_dependency_on(dr, data); + dr = os_atomic_inject_dependency(dr, data); if (dr->dt_timer.target < INT64_MAX) { uint64_t now = _dispatch_time_now(DISPATCH_TIMER_CLOCK(dr->du_ident)); @@ -580,7 +597,9 @@ _dispatch_source_latch_and_call(dispatch_source_t ds, dispatch_queue_t cq, } if (dr->du_timer_flags & DISPATCH_TIMER_AFTER) { _dispatch_trace_item_complete(dc); // see _dispatch_after - _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); + dispatch_invoke_with_autoreleasepool(flags, { + _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); + }); dispatch_release(ds); // dispatch_after sources are one-shot } } @@ -639,7 +658,7 @@ _dispatch_source_install(dispatch_source_t ds, dispatch_wlh_t wlh, } void -_dispatch_source_activate(dispatch_source_t ds, bool *allow_resume) +_dispatch_source_activate(dispatch_source_t ds) { dispatch_continuation_t dc; dispatch_source_refs_t dr = ds->ds_refs; @@ -669,7 +688,7 @@ _dispatch_source_activate(dispatch_source_t ds, bool *allow_resume) } // call "super" - _dispatch_lane_activate(ds, allow_resume); + _dispatch_lane_activate(ds); if ((dr->du_is_direct || dr->du_is_timer) && !ds->ds_is_installed) { pri = _dispatch_queue_compute_priority_and_wlh(ds, &wlh); @@ -688,6 +707,7 @@ _dispatch_source_activate(dispatch_source_t ds, bool *allow_resume) _dispatch_unote_state_set(dr, wlh, 0); } #endif + // rdar://45419440 this needs to be last _dispatch_source_install(ds, wlh, pri); } } @@ -729,7 +749,7 @@ _dispatch_source_invoke2(dispatch_source_t ds, dispatch_invoke_context_t dic, // Intentionally always drain even when on the manager queue // and not the source's regular target queue: we need to be able // to drain timer setting and the like there. - dispatch_with_disabled_narrowing(dic, { + dispatch_with_disabled_narrowing(dic, flags, { retq = _dispatch_lane_serial_drain(ds, dic, flags, owned); }); } @@ -810,10 +830,8 @@ _dispatch_source_invoke2(dispatch_source_t ds, dispatch_invoke_context_t dic, avoid_starvation = dq->do_targetq || !(dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); } - if (avoid_starvation && - os_atomic_load2o(dr, ds_pending_data, relaxed)) { - retq = ds->do_targetq; - } + + ds->ds_latched = true; } else { // there is no point trying to be eager, the next thing to do is // to deliver the event @@ -865,21 +883,61 @@ _dispatch_source_invoke2(dispatch_source_t ds, dispatch_invoke_context_t dic, // from the source handler return ds->do_targetq; } - if (avoid_starvation && _dispatch_unote_wlh(dr) == DISPATCH_WLH_ANON) { - // keep the old behavior to force re-enqueue to our target queue - // for the rearm. + if (dr->du_is_direct && _dispatch_unote_wlh(dr) == DISPATCH_WLH_ANON) { // - // if the handler didn't run, or this is a pending delete - // or our target queue is a global queue, then starvation is - // not a concern and we can rearm right away. - return ds->do_targetq; - } - _dispatch_unote_resume(dr); - if (!avoid_starvation && _dispatch_wlh_should_poll_unote(dr)) { - // try to redrive the drain from under the lock for sources - // targeting an overcommit root queue to avoid parking - // when the next event has already fired - _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + // for legacy, direct event delivery, + // _dispatch_source_install above could cause a worker thread to + // deliver an event, and disarm the knote before we're through. + // + // This can lead to a double fire of the event handler for the same + // event with the following ordering: + // + //------------------------------------------------------------------ + // Thread1 Thread2 + // + // _dispatch_source_invoke() + // _dispatch_source_install() + // _dispatch_kevent_worker_thread() + // _dispatch_source_merge_evt() + // + // _dispatch_unote_resume() + // _dispatch_kevent_worker_thread() + // < re-enqueue due DIRTY > + // + // _dispatch_source_invoke() + // ..._latch_and_call() + // _dispatch_unote_resume() + // _dispatch_source_merge_evt() + // + // _dispatch_source_invoke() + // ..._latch_and_call() + // + //------------------------------------------------------------------ + // + // To avoid this situation, we should never resume a direct source + // for which we haven't fired an event. + // + // Note: this isn't a concern for kqworkloops as event delivery is + // serial with draining it by design. + // + if (ds->ds_latched) { + ds->ds_latched = false; + _dispatch_unote_resume(dr); + } + if (avoid_starvation) { + // To avoid starvation of a source firing immediately when we + // rearm it, force a round-trip through the end of the target + // queue no matter what. + return ds->do_targetq; + } + } else { + _dispatch_unote_resume(dr); + if (!avoid_starvation && _dispatch_wlh_should_poll_unote(dr)) { + // try to redrive the drain from under the lock for sources + // targeting an overcommit root queue to avoid parking + // when the next event has already fired + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + } } } @@ -1134,6 +1192,7 @@ _dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, _dispatch_debug("kevent-source[%p]: merged kevent[%p]", ds, du._dr); _dispatch_object_debug(ds, "%s", __func__); dx_wakeup(ds, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_EVENT | + DISPATCH_WAKEUP_CLEAR_ACTIVATING | DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY); } @@ -1192,15 +1251,7 @@ _dispatch_timer_config_create(dispatch_time_t start, // future, this will default to UPTIME if no clock was set. clock = _dispatch_timer_flags_to_clock(dt->du_timer_flags); } else { - _dispatch_time_to_clock_and_value(start, &clock, &target); - if (target == DISPATCH_TIME_NOW) { - if (clock == DISPATCH_CLOCK_UPTIME) { - target = _dispatch_uptime(); - } else { - dispatch_assert(clock == DISPATCH_CLOCK_MONOTONIC); - target = _dispatch_monotonic_time(); - } - } + _dispatch_time_to_clock_and_value(start, true, &clock, &target); } if (clock != DISPATCH_CLOCK_WALL) { @@ -1359,7 +1410,7 @@ _dispatch_after(dispatch_time_t when, dispatch_queue_t dq, dispatch_clock_t clock; uint64_t target; - _dispatch_time_to_clock_and_value(when, &clock, &target); + _dispatch_time_to_clock_and_value(when, false, &clock, &target); if (clock != DISPATCH_CLOCK_WALL) { leeway = _dispatch_time_nano2mach(leeway); } diff --git a/src/source_internal.h b/src/source_internal.h index f38c2e9d4..9297ac5cd 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -35,15 +35,21 @@ _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_source, dispatch_object) DISPATCH_CLASS_DECL_BARE(source, QUEUE); +DISPATCH_CLASS_DECL(channel, QUEUE); + #define DISPATCH_SOURCE_CLASS_HEADER(x) \ DISPATCH_LANE_CLASS_HEADER(x); \ uint16_t \ /* set under the drain lock */ \ ds_is_installed:1, \ + ds_latched:1, \ dm_connect_handler_called:1, \ dm_cancel_handler_called:1, \ dm_is_xpc:1, \ - __ds_flags_pad : 12; \ + dm_arm_no_senders:1, \ + dm_made_sendrights:1, \ + dm_strict_reply:1, \ + __ds_flags_pad : 8; \ uint16_t __dq_flags_separation[0]; \ uint16_t \ /* set under the send queue lock */ \ @@ -57,9 +63,15 @@ struct dispatch_source_s { dispatch_assert_valid_lane_type(dispatch_source_s); dispatch_static_assert(sizeof(struct dispatch_source_s) <= 128); +struct dispatch_channel_s { + DISPATCH_SOURCE_CLASS_HEADER(channel); +} DISPATCH_ATOMIC64_ALIGN; +dispatch_assert_valid_lane_type(dispatch_channel_s); +dispatch_static_assert(sizeof(struct dispatch_channel_s) <= 128); + void _dispatch_source_xref_dispose(dispatch_source_t ds); void _dispatch_source_dispose(dispatch_source_t ds, bool *allow_free); -void _dispatch_source_activate(dispatch_source_t ds, bool *allow_resume); +void _dispatch_source_activate(dispatch_source_t ds); void _dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, @@ -67,6 +79,15 @@ void _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, void _dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data, pthread_priority_t pp); DISPATCH_COLD -size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); +size_t _dispatch_source_debug(dispatch_source_t ds, char *buf, size_t bufsiz); + +void _dispatch_channel_xref_dispose(dispatch_channel_t dch); +void _dispatch_channel_dispose(dispatch_channel_t dch, bool *allow_free); +void _dispatch_channel_invoke(dispatch_channel_t dch, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +void _dispatch_channel_wakeup(dispatch_channel_t dch, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags); +DISPATCH_COLD +size_t _dispatch_channel_debug(dispatch_channel_t dch, char *buf, size_t bufsiz); #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/time.c b/src/time.c index b70f81343..30ed53b26 100644 --- a/src/time.c +++ b/src/time.c @@ -43,7 +43,7 @@ _dispatch_mach_host_time_mach2nano(uint64_t machtime) return INT64_MAX; } long double big_tmp = ((long double)machtime * data->frac) + .5L; - if (unlikely(big_tmp >= INT64_MAX)) { + if (unlikely(big_tmp >= (long double)INT64_MAX)) { return INT64_MAX; } return (uint64_t)big_tmp; @@ -61,7 +61,7 @@ _dispatch_mach_host_time_nano2mach(uint64_t nsec) return INT64_MAX; } long double big_tmp = ((long double)nsec / data->frac) + .5L; - if (unlikely(big_tmp >= INT64_MAX)) { + if (unlikely(big_tmp >= (long double)INT64_MAX)) { return INT64_MAX; } return (uint64_t)big_tmp; @@ -98,7 +98,7 @@ dispatch_time(dispatch_time_t inval, int64_t delta) dispatch_clock_t clock; uint64_t value; - _dispatch_time_to_clock_and_value(inval, &clock, &value); + _dispatch_time_to_clock_and_value(inval, true, &clock, &value); if (value == DISPATCH_TIME_FOREVER) { // Out-of-range for this clock. return value; @@ -122,14 +122,6 @@ dispatch_time(dispatch_time_t inval, int64_t delta) // up time or monotonic time. "value" has the clock type removed, // so the test against DISPATCH_TIME_NOW is correct for either clock. - if (value == DISPATCH_TIME_NOW) { - if (clock == DISPATCH_CLOCK_UPTIME) { - value = _dispatch_uptime(); - } else { - dispatch_assert(clock == DISPATCH_CLOCK_MONOTONIC); - value = _dispatch_monotonic_time(); - } - } if (delta >= 0) { offset = _dispatch_time_nano2mach((uint64_t)delta); if ((int64_t)(value += offset) <= 0) { @@ -145,6 +137,37 @@ dispatch_time(dispatch_time_t inval, int64_t delta) } } +bool +dispatch_time_to_nsecs(dispatch_time_t time, + dispatch_clockid_t *clock_out, uint64_t *nsecs_out) +{ + dispatch_clock_t clock; + uint64_t value; + + if (time != DISPATCH_TIME_FOREVER) { + _dispatch_time_to_clock_and_value(time, true, &clock, &value); + + switch (clock) { + case DISPATCH_CLOCK_WALL: + *clock_out = DISPATCH_CLOCKID_WALLTIME; + *nsecs_out = value; + return true; + case DISPATCH_CLOCK_UPTIME: + *clock_out = DISPATCH_CLOCKID_UPTIME; + *nsecs_out = _dispatch_time_mach2nano(value); + return true; + case DISPATCH_CLOCK_MONOTONIC: + *clock_out = DISPATCH_CLOCKID_MONOTONIC; + *nsecs_out = _dispatch_time_mach2nano(value); + return true; + } + } + + *clock_out = 0; + *nsecs_out = UINT64_MAX; + return false; +} + dispatch_time_t dispatch_walltime(const struct timespec *inval, int64_t delta) { @@ -166,16 +189,19 @@ uint64_t _dispatch_timeout(dispatch_time_t when) { dispatch_time_t now; - if (when == DISPATCH_TIME_FOREVER) { + + switch (when) { + case DISPATCH_TIME_FOREVER: return DISPATCH_TIME_FOREVER; - } - if (when == DISPATCH_TIME_NOW) { + case DISPATCH_TIME_NOW: + case DISPATCH_MONOTONICTIME_NOW: + case DISPATCH_WALLTIME_NOW: return 0; } dispatch_clock_t clock; uint64_t value; - _dispatch_time_to_clock_and_value(when, &clock, &value); + _dispatch_time_to_clock_and_value(when, false, &clock, &value); if (clock == DISPATCH_CLOCK_WALL) { now = _dispatch_get_nanoseconds(); return now >= value ? 0 : value - now; diff --git a/src/trace.h b/src/trace.h index ed69e1b56..e4303dfd3 100644 --- a/src/trace.h +++ b/src/trace.h @@ -71,8 +71,19 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) _dispatch_introspection_callout_return(ctxt, func); } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_client_callout3_a(void *ctxt, size_t i, size_t w, void (*f)(void *, size_t, size_t)) +{ + dispatch_function_t func = (dispatch_function_t)f; + _dispatch_introspection_callout_entry(ctxt, func); + _dispatch_trace_callout(ctxt, func, _dispatch_client_callout3_a(ctxt, i, w, f)); + _dispatch_introspection_callout_return(ctxt, func); +} + #define _dispatch_client_callout _dispatch_trace_client_callout #define _dispatch_client_callout2 _dispatch_trace_client_callout2 +#define _dispatch_client_callout3_a _dispatch_trace_client_callout3_a #endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION #ifdef _COMM_PAGE_KDEBUG_ENABLE diff --git a/src/voucher.c b/src/voucher.c index f8ce0c841..61f1643df 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -24,8 +24,6 @@ #define PERSONA_ID_NONE ((uid_t)-1) #endif -#if !DISPATCH_VARIANT_DYLD_STUB - #if VOUCHER_USE_MACH_VOUCHER #if !HAVE_PTHREAD_WORKQUEUE_QOS #error Unsupported configuration, workqueue QoS support is required @@ -235,6 +233,7 @@ _voucher_insert(voucher_t v) { mach_voucher_t kv = v->v_ipc_kvoucher; if (!kv) return; + _voucher_hash_lock_lock(); if (unlikely(_voucher_hash_is_enqueued(v))) { _dispatch_voucher_debug("corruption", v); @@ -796,7 +795,9 @@ _voucher_dispose(voucher_t voucher) voucher->v_recipe_extra_size = 0; voucher->v_recipe_extra_offset = 0; #endif +#if !USE_OBJC return _os_object_dealloc((_os_object_t)voucher); +#endif // !USE_OBJC } void @@ -827,6 +828,31 @@ _voucher_activity_debug_channel_init(void) } } +static bool +_voucher_hash_is_empty() { + _voucher_hash_lock_lock(); + + bool empty = true; + for (unsigned int i = 0; i < VL_HASH_SIZE; i++) { + voucher_hash_head_s *head = &_voucher_hash[i]; + if (_voucher_hash_get_next(head->vhh_first) != VOUCHER_NULL) { + empty = false; + break; + } + } + _voucher_hash_lock_unlock(); + + return empty; +} + +void +_voucher_atfork_parent(void) +{ + if (!_voucher_hash_is_empty()){ + _dispatch_fork_becomes_unsafe(); + } +} + void _voucher_atfork_child(void) { @@ -841,6 +867,39 @@ _voucher_atfork_child(void) _firehose_task_buffer = NULL; // firehose buffer is VM_INHERIT_NONE } +static void +_voucher_process_can_use_arbitrary_personas_init(void *__unused ctxt) +{ +#if VOUCHER_USE_PERSONA_ADOPT_ANY + mach_voucher_t kv = _voucher_get_task_mach_voucher(); + kern_return_t kr; + + mach_voucher_attr_content_t result_out; + mach_msg_type_number_t result_out_size; + + boolean_t local_result; + result_out = (mach_voucher_attr_content_t) &local_result; + result_out_size = sizeof(local_result); + + kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_BANK, + BANK_PERSONA_ADOPT_ANY, NULL, 0, result_out, &result_out_size); + if (kr != KERN_SUCCESS) { + DISPATCH_INTERNAL_CRASH(kr, "mach_voucher_attr_command(BANK_PERSONA_ADOPT_ANY) failed"); + } + + _voucher_process_can_use_arbitrary_personas = !!local_result; +#endif /* VOUCHER_USE_PERSONA_ADOPT_ANY */ +} + +bool +voucher_process_can_use_arbitrary_personas(void) +{ + dispatch_once_f(&_voucher_process_can_use_arbitrary_personas_pred, NULL, + _voucher_process_can_use_arbitrary_personas_init); + + return _voucher_process_can_use_arbitrary_personas; +} + voucher_t voucher_copy_with_persona_mach_voucher(mach_voucher_t persona_mach_voucher) { @@ -913,13 +972,10 @@ mach_voucher_persona_self(mach_voucher_t *persona_mach_voucher) mach_voucher_t bkv = MACH_VOUCHER_NULL; kern_return_t kr = KERN_NOT_SUPPORTED; #if VOUCHER_USE_PERSONA - mach_voucher_t kv = _voucher_get_task_mach_voucher(); - const mach_voucher_attr_recipe_data_t bank_send_recipe[] = { [0] = { .key = MACH_VOUCHER_ATTR_KEY_BANK, - .command = MACH_VOUCHER_ATTR_COPY, - .previous_voucher = kv, + .command = MACH_VOUCHER_ATTR_BANK_CREATE, }, [1] = { .key = MACH_VOUCHER_ATTR_KEY_BANK, @@ -1156,6 +1212,21 @@ voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) DISPATCH_CLIENT_CRASH(_voucher_libtrace_hooks, "voucher_activity_initialize_4libtrace called twice"); } + + // HACK: we can't call into os_variant until after the initialization of + // dispatch and XPC, but we want to do it before the end of libsystem + // initialization to avoid having to synchronize _dispatch_mode explicitly, + // so this happens to be just the right spot +#if HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR + if (_dispatch_getenv_bool("LIBDISPATCH_NO_FAULTS", false)) { + return; + } else if (getpid() == 1 || + !os_variant_has_internal_diagnostics("com.apple.libdispatch")) { + return; + } + + _dispatch_mode &= ~DISPATCH_MODE_NO_FAULTS; +#endif // HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR } void @@ -1275,6 +1346,7 @@ DISPATCH_ALWAYS_INLINE static inline bool _voucher_activity_disabled(void) { + dispatch_once_f(&_firehose_task_buffer_pred, NULL, _firehose_task_buffer_init); @@ -1621,7 +1693,7 @@ _voucher_debug(voucher_t v, char *buf, size_t bufsiz) v->v_activity, v->v_activity_creator, v->v_parent_activity); } bufprintf(" }"); - + return offset; } @@ -1634,7 +1706,7 @@ format_hex_data(char *prefix, char *desc, uint8_t *data, size_t data_len, uint8_t *pc = data; if (desc) { - bufprintf("%s%s:\n", prefix, desc); + bufprintf("%s%s:\n", prefix, desc); } ssize_t offset_in_row = -1; @@ -1672,10 +1744,6 @@ format_recipe_detail(mach_voucher_attr_recipe_t recipe, char *buf, bufprintf("Content size: %u\n", recipe->content_size); switch (recipe->key) { - case MACH_VOUCHER_ATTR_KEY_ATM: - bufprintprefix(); - bufprintf("ATM ID: %llu", *(uint64_t *)(uintptr_t)recipe->content); - break; case MACH_VOUCHER_ATTR_KEY_IMPORTANCE: bufprintprefix(); bufprintf("IMPORTANCE INFO: %s", (char *)recipe->content); @@ -1740,7 +1808,7 @@ voucher_kvoucher_debug(mach_port_t task, mach_port_name_t voucher, char *buf, } else { bufprintprefix(); bufprintf("Invalid voucher: 0x%x\n", voucher); - } + } done: return offset; @@ -1919,6 +1987,12 @@ voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_inf } #endif // __has_include() +bool +voucher_process_can_use_arbitrary_personas(void) +{ + return false; +} + void _voucher_activity_debug_channel_init(void) { @@ -1938,8 +2012,8 @@ _voucher_init(void) void* voucher_activity_get_metadata_buffer(size_t *length) { - *length = 0; - return NULL; + *length = 0; + return NULL; } voucher_t @@ -2026,17 +2100,3 @@ _voucher_debug(voucher_t v, char* buf, size_t bufsiz) } #endif // VOUCHER_USE_MACH_VOUCHER - -#else // DISPATCH_VARIANT_DYLD_STUB - -firehose_activity_id_t -voucher_get_activity_id_4dyld(void) -{ -#if VOUCHER_USE_MACH_VOUCHER - return _voucher_get_activity_id(_voucher_get(), NULL); -#else - return 0; -#endif -} - -#endif // DISPATCH_VARIANT_DYLD_STUB diff --git a/src/voucher_internal.h b/src/voucher_internal.h index ec8874346..c50c36ca4 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -89,6 +89,7 @@ voucher_get_mach_voucher(voucher_t voucher); void _voucher_init(void); void _voucher_atfork_child(void); +void _voucher_atfork_parent(void); void _voucher_activity_debug_channel_init(void); #if OS_VOUCHER_ACTIVITY_SPI && OS_VOUCHER_ACTIVITY_GENERATE_SWAPS void _voucher_activity_swap(firehose_activity_id_t old_id, @@ -155,7 +156,7 @@ OS_ENUM(voucher_fields, uint16_t, typedef struct voucher_s { _OS_OBJECT_HEADER( - struct voucher_vtable_s *os_obj_isa, + struct voucher_vtable_s *__ptrauth_objc_isa_pointer os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); struct voucher_hash_entry_s { @@ -233,7 +234,7 @@ _voucher_hash_store_to_prev_ptr(uintptr_t prev_ptr, struct voucher_s *v) #if VOUCHER_ENABLE_RECIPE_OBJECTS typedef struct voucher_recipe_s { _OS_OBJECT_HEADER( - const _os_object_vtable_s *os_obj_isa, + const _os_object_vtable_s *__ptrauth_objc_isa_pointer os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); size_t vr_allocation_size; @@ -309,7 +310,7 @@ _voucher_release_inline(struct voucher_s *voucher) if (unlikely(xref_cnt < -1)) { _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); } - return _os_object_xref_dispose((_os_object_t)voucher); + return _voucher_xref_dispose((voucher_t)voucher); } #if DISPATCH_PURE_C @@ -450,6 +451,10 @@ _voucher_get_activity_id(voucher_t v, uint64_t *creator_pid) void _voucher_task_mach_voucher_init(void* ctxt); extern dispatch_once_t _voucher_task_mach_voucher_pred; extern mach_voucher_t _voucher_task_mach_voucher; + +extern dispatch_once_t _voucher_process_can_use_arbitrary_personas_pred; +extern bool _voucher_process_can_use_arbitrary_personas; + #if VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER #define _voucher_default_task_mach_voucher MACH_VOUCHER_NULL #else diff --git a/src/workgroup.c b/src/workgroup.c new file mode 100644 index 000000000..ae47870f5 --- /dev/null +++ b/src/workgroup.c @@ -0,0 +1,1580 @@ +/* + * Copyright (c) 2019-2021 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#define PTHREAD_WORKGROUP_SPI 1 + +#include "internal.h" + +#include +#include +#include + +/* Declares struct symbols */ + +OS_OBJECT_CLASS_DECL(os_workgroup); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(os_workgroup, + (void (*)(_os_object_t))_os_workgroup_explicit_xref_dispose, + (void (*)(_os_object_t))_os_workgroup_dispose); +#endif // !USE_OBJC +#define WORKGROUP_CLASS OS_OBJECT_VTABLE(os_workgroup) + +OS_OBJECT_CLASS_DECL(os_workgroup_interval); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(os_workgroup_interval, + (void (*)(_os_object_t))_os_workgroup_interval_explicit_xref_dispose, + (void (*)(_os_object_t))_os_workgroup_interval_explicit_dispose); +#endif // USE_OBJC +#define WORKGROUP_INTERVAL_CLASS OS_OBJECT_VTABLE(os_workgroup_interval) + +OS_OBJECT_CLASS_DECL(os_workgroup_parallel); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(os_workgroup_parallel, + (void (*)(_os_object_t))_os_workgroup_explicit_xref_dispose, + (void (*)(_os_object_t))_os_workgroup_dispose); +#endif // USE_OBJC +#define WORKGROUP_PARALLEL_CLASS OS_OBJECT_VTABLE(os_workgroup_parallel) + +#pragma mark Internal functions + +/* These are default workgroup attributes to be used when no user attribute is + * passed in in creation APIs. + * + * For all classes, workgroup propagation is currently not supported. + * + * Class Default attribute Eventually supported + * + * os_workgroup_t propagating nonpropagating, propagating + * os_workgroup_interval_t nonpropagating nonpropagating, propagating + * os_workgroup_parallel_t nonpropagating nonpropagating + * + * Class Default attribute supported + * os_workgroup_t differentiated differentiated, undifferentiated + * os_workgroup_interval_t differentiated differentiated + * os_workgroup_parallel_t undifferentiated undifferentiated, differentiated + */ +static const struct os_workgroup_attr_s _os_workgroup_attr_default = { + .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, + .wg_type = OS_WORKGROUP_TYPE_DEFAULT, + .wg_attr_flags = 0, +}; + +static const struct os_workgroup_attr_s _os_workgroup_with_workload_id_attr_default = { + .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, + .wg_type = OS_WORKGROUP_TYPE_DEFAULT, + .wg_attr_flags = OS_WORKGROUP_ATTR_NONPROPAGATING, +}; + +static const struct os_workgroup_attr_s _os_workgroup_interval_attr_default = { + .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, + .wg_type = OS_WORKGROUP_INTERVAL_TYPE_DEFAULT, + .wg_attr_flags = OS_WORKGROUP_ATTR_NONPROPAGATING +}; + +static const struct os_workgroup_attr_s _os_workgroup_parallel_attr_default = { + .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, + .wg_type = OS_WORKGROUP_TYPE_PARALLEL, + .wg_attr_flags = OS_WORKGROUP_ATTR_NONPROPAGATING | + OS_WORKGROUP_ATTR_UNDIFFERENTIATED, +}; + +void +_os_workgroup_xref_dispose(os_workgroup_t wg) +{ + os_workgroup_arena_t arena = wg->wg_arena; + + if (arena == NULL) { + return; + } + + arena->destructor(arena->client_arena); + free(arena); +} + +void +_os_workgroup_interval_xref_dispose(os_workgroup_interval_t wgi) +{ + uint64_t wg_state = wgi->wg_state; + if (wg_state & OS_WORKGROUP_INTERVAL_STARTED) { + os_crash("BUG IN CLIENT: Releasing last reference to workgroup interval " + "while an interval has been started"); + } +} + +#if !USE_OBJC +void +_os_workgroup_explicit_xref_dispose(os_workgroup_t wg) +{ + _os_workgroup_xref_dispose(wg); + _os_object_release_internal(wg->_as_os_obj); +} + +void +_os_workgroup_interval_explicit_xref_dispose(os_workgroup_interval_t wgi) +{ + _os_workgroup_interval_xref_dispose(wgi); + _os_workgroup_explicit_xref_dispose(wgi->_as_wg); +} +#endif + +static inline bool +_os_workgroup_is_configurable(uint64_t wg_state) +{ + return (wg_state & OS_WORKGROUP_OWNER) == OS_WORKGROUP_OWNER; +} + +void +_os_workgroup_dispose(os_workgroup_t wg) +{ + dispatch_assert(wg->joined_cnt == 0); + + kern_return_t kr; + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (_os_workgroup_is_configurable(wg_state)) { + kr = work_interval_destroy(wg->wi); + } else { + kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, -1); + } + os_assumes(kr == KERN_SUCCESS); + if (wg_state & OS_WORKGROUP_LABEL_NEEDS_FREE) { + free((void *)wg->name); + } +} + +void +_os_workgroup_debug(os_workgroup_t wg, char *buf, size_t size) +{ + snprintf(buf, size, "wg[%p] = {xref = %d, ref = %d, name = %s}", + (void *) wg, wg->do_xref_cnt + 1, wg->do_ref_cnt + 1, wg->name); +} + +void +_os_workgroup_interval_dispose(os_workgroup_interval_t wgi) +{ + work_interval_instance_free(wgi->wii); +} + +#if !USE_OBJC +void +_os_workgroup_interval_explicit_dispose(os_workgroup_interval_t wgi) +{ + _os_workgroup_interval_dispose(wgi); + _os_workgroup_dispose(wgi->_as_wg); +} +#endif + +#define os_workgroup_inc_refcount(wg) \ + _os_object_retain_internal(wg->_as_os_obj); + +#define os_workgroup_dec_refcount(wg) \ + _os_object_release_internal(wg->_as_os_obj); + +void +_os_workgroup_tsd_cleanup(void *ctxt) /* Destructor for the tsd key */ +{ + os_workgroup_t wg = (os_workgroup_t) ctxt; + if (wg != NULL) { + char buf[512]; + snprintf(buf, sizeof(buf), "BUG IN CLIENT: Thread exiting without leaving workgroup '%s'", wg->name); + + os_crash(buf); + } +} + +static os_workgroup_t +_os_workgroup_get_current(void) +{ + return (os_workgroup_t) _dispatch_thread_getspecific(os_workgroup_key); +} + +static void +_os_workgroup_set_current(os_workgroup_t new_wg) +{ + if (new_wg != NULL) { + os_workgroup_inc_refcount(new_wg); + } + + os_workgroup_t old_wg = _os_workgroup_get_current(); + _dispatch_thread_setspecific(os_workgroup_key, new_wg); + + if (old_wg != NULL) { + os_workgroup_dec_refcount(old_wg); + } +} + +static inline bool +_os_workgroup_attr_is_resolved(os_workgroup_attr_t attr) +{ + return (attr->sig == _OS_WORKGROUP_ATTR_RESOLVED_INIT); +} + +static inline bool +_os_workgroup_client_attr_initialized(os_workgroup_attr_t attr) +{ + return (attr->sig == _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT) || + (attr->sig == _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT); +} + +static inline bool +_os_workgroup_attr_is_propagating(os_workgroup_attr_t attr) +{ + return (attr->wg_attr_flags & OS_WORKGROUP_ATTR_NONPROPAGATING) == 0; +} + +static inline bool +_os_workgroup_attr_is_differentiated(os_workgroup_attr_t attr) +{ + return (attr->wg_attr_flags & OS_WORKGROUP_ATTR_UNDIFFERENTIATED) == 0; +} + +static inline bool +_os_workgroup_type_is_interval_type(os_workgroup_type_t wg_type) +{ + return (wg_type >= OS_WORKGROUP_INTERVAL_TYPE_DEFAULT) && + (wg_type <= OS_WORKGROUP_INTERVAL_TYPE_ARKIT); +} + +static bool +_os_workgroup_type_is_audio_type(os_workgroup_type_t wg_type) +{ + return (wg_type == OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO) || + (wg_type == OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT); +} + +static inline bool +_os_workgroup_type_is_parallel_type(os_workgroup_type_t wg_type) +{ + return wg_type == OS_WORKGROUP_TYPE_PARALLEL; +} + +static inline bool +_os_workgroup_type_is_default_type(os_workgroup_type_t wg_type) +{ + return wg_type == OS_WORKGROUP_TYPE_DEFAULT; +} + + +static inline bool +_os_workgroup_has_backing_workinterval(os_workgroup_t wg) +{ + return wg->wi != NULL; +} + +static inline uint32_t +_wi_flags_to_wi_type(uint32_t wi_flags) +{ + return wi_flags & WORK_INTERVAL_TYPE_MASK; +} + +#if !TARGET_OS_SIMULATOR +static os_workgroup_type_t +_wi_flags_to_wg_type(uint32_t wi_flags) +{ + uint32_t type = _wi_flags_to_wi_type(wi_flags); + bool is_unrestricted = (wi_flags & WORK_INTERVAL_FLAG_UNRESTRICTED); + + switch (type) { + case WORK_INTERVAL_TYPE_DEFAULT: + /* Technically, this could be OS_WORKGROUP_INTERVAL_TYPE_DEFAULT + * as well but we can't know so we just assume it's a regular + * workgroup + */ + return OS_WORKGROUP_TYPE_DEFAULT; + case WORK_INTERVAL_TYPE_COREAUDIO: + return (is_unrestricted ? OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT : + OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO); + case WORK_INTERVAL_TYPE_COREANIMATION: + /* and WORK_INTERVAL_TYPE_CA_RENDER_SERVER */ + + /* We cannot distinguish between + * OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION and + * OS_WORKGROUP_INTERVAL_TYPE_CA_RENDER_SERVER since + * WORK_INTERVAL_TYPE_COREANIMATION and + * WORK_INTERVAL_TYPE_CA_RENDER_SERVER have the same value */ + return OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION; + case WORK_INTERVAL_TYPE_HID_DELIVERY: + return OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY; + case WORK_INTERVAL_TYPE_COREMEDIA: + return OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA; + case WORK_INTERVAL_TYPE_ARKIT: + return OS_WORKGROUP_INTERVAL_TYPE_ARKIT; + case WORK_INTERVAL_TYPE_CA_CLIENT: + return OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT; + default: + { + char buf[512]; + snprintf(buf, sizeof(buf), "BUG IN DISPATCH: Invalid wi flags = %u", wi_flags); + os_crash(buf); + } + } +} +#endif + +static uint32_t +_wg_type_to_wi_flags(os_workgroup_type_t wg_type) +{ + switch (wg_type) { + case OS_WORKGROUP_INTERVAL_TYPE_DEFAULT: + return WORK_INTERVAL_TYPE_DEFAULT | WORK_INTERVAL_FLAG_UNRESTRICTED; + case OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO: + return (WORK_INTERVAL_TYPE_COREAUDIO | + WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN | + WORK_INTERVAL_FLAG_ENABLE_DEFERRED_FINISH); + case OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION: + return WORK_INTERVAL_TYPE_COREANIMATION; + case OS_WORKGROUP_INTERVAL_TYPE_CA_RENDER_SERVER: + return WORK_INTERVAL_TYPE_CA_RENDER_SERVER; + case OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY: + return WORK_INTERVAL_TYPE_HID_DELIVERY; + case OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA: + return WORK_INTERVAL_TYPE_COREMEDIA; + case OS_WORKGROUP_INTERVAL_TYPE_ARKIT: + return (WORK_INTERVAL_TYPE_ARKIT | + WORK_INTERVAL_FLAG_FINISH_AT_DEADLINE); + case OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT: + return (WORK_INTERVAL_TYPE_COREAUDIO | WORK_INTERVAL_FLAG_UNRESTRICTED | + WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN | + WORK_INTERVAL_FLAG_ENABLE_DEFERRED_FINISH); + case OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT: + return WORK_INTERVAL_TYPE_CA_CLIENT | WORK_INTERVAL_FLAG_UNRESTRICTED; + case OS_WORKGROUP_TYPE_DEFAULT: + /* Non-interval workgroup types */ + return WORK_INTERVAL_FLAG_UNRESTRICTED; + default: + os_crash("Creating an os_workgroup of unknown type"); + } +} + +static inline uint32_t +_wg_type_to_wi_type(os_workgroup_type_t wg_type) +{ + return _wi_flags_to_wi_type(_wg_type_to_wi_flags(wg_type)); +} + +static inline int +_os_workgroup_get_wg_wi_types_from_port(mach_port_t port, + os_workgroup_type_t *out_wg_type, uint32_t *out_wi_type) +{ + os_workgroup_type_t wg_type = OS_WORKGROUP_TYPE_DEFAULT; + uint32_t wi_type = WORK_INTERVAL_TYPE_DEFAULT; + +#if !TARGET_OS_SIMULATOR + uint32_t wi_flags = 0; + int ret = work_interval_get_flags_from_port(port, &wi_flags); + if (ret != 0) { + return ret; + } + wg_type = _wi_flags_to_wg_type(wi_flags); + wi_type = _wi_flags_to_wi_type(wi_flags); +#else + (void)port; +#endif + + if (out_wg_type) *out_wg_type = wg_type; + if (out_wi_type) *out_wi_type = wi_type; + + return 0; +} + +static work_interval_t +_os_workgroup_create_work_interval(os_workgroup_attr_t attr) +{ + /* All workgroups are joinable */ + uint32_t flags = WORK_INTERVAL_FLAG_JOINABLE; + + flags |= _wg_type_to_wi_flags(attr->wg_type); + + if (_os_workgroup_attr_is_differentiated(attr)) { + flags |= WORK_INTERVAL_FLAG_GROUP; + } + + work_interval_t wi; + int rv = work_interval_create(&wi, flags); + if (rv) { + return NULL; + } + + return wi; +} + +struct os_workgroup_workload_id_table_entry_s { + const char* wl_id; + os_workgroup_type_t wl_type; +}; + +#if !TARGET_OS_SIMULATOR +static const struct os_workgroup_workload_id_table_entry_s + _os_workgroup_workload_id_table[] = { + { + .wl_id = "com.apple.coreaudio.hal.iothread", + .wl_type = OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO, + }, + { + .wl_id = "com.apple.coreaudio.hal.clientthread", + .wl_type = OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT, + }, +}; +#endif // !TARGET_OS_SIMULATOR + +static os_workgroup_type_t +_os_workgroup_lookup_type_from_workload_id(const char *workload_id) +{ + os_workgroup_type_t workload_type = OS_WORKGROUP_TYPE_DEFAULT; + + if (!workload_id) { + DISPATCH_CLIENT_CRASH(0, "Workload identifier must not be NULL"); + } +#if !TARGET_OS_SIMULATOR + for (size_t i = 0; i < countof(_os_workgroup_workload_id_table); i++) { + if (!strcasecmp(workload_id, _os_workgroup_workload_id_table[i].wl_id)){ + workload_type = _os_workgroup_workload_id_table[i].wl_type; + if (_os_workgroup_type_is_default_type(workload_type)) { + DISPATCH_INTERNAL_CRASH(i, "Invalid workload ID type"); + } + break; + } + } +#if OS_WORKGROUP_LOG_UKNOWN_WORKLOAD_ID + if (_os_workgroup_type_is_default_type(workload_type)) { + _dispatch_log("WARNING: os_workgroup: Unknown workload ID \"%s\"", + workload_id); + } +#endif +#endif // !TARGET_OS_SIMULATOR + return workload_type; +} + +static inline os_workgroup_attr_t +_os_workgroup_workload_id_attr_resolve(const char *workload_id, + os_workgroup_attr_t attr, + const os_workgroup_attr_s *default_attr) +{ + /* N.B: expects to be called with the attr pointer returned by + * _os_workgroup_client_attr_resolve() (i.e. a mutable local copy) */ + os_workgroup_type_t wl_type = + _os_workgroup_lookup_type_from_workload_id(workload_id); + if (_os_workgroup_type_is_default_type(wl_type)) { + /* Unknown workload ID, fallback to attribute type */ + return attr; + } + /* Require matching types between workload ID and attribute. + * Use workload ID type as the type implied by the default attribute */ + if (attr->wg_type == default_attr->wg_type) { + attr->wg_type = wl_type; + } else if (wl_type != attr->wg_type) { + /* Workload ID and attribute type mismatch */ + return NULL; + } + return attr; +} + +static inline bool +_os_workgroup_workload_id_is_valid_for_wi_type(const char *workload_id, + uint32_t wi_type) +{ + os_workgroup_type_t wl_type = + _os_workgroup_lookup_type_from_workload_id(workload_id); + if (_os_workgroup_type_is_default_type(wl_type)) { + /* Unknown workload ID, nothing to match */ + return true; + } + /* Require matching workinterval types between workload ID and passed in + * type of port or workgroup object. */ + if (_wg_type_to_wi_type(wl_type) != wi_type) { + return false; + } + return true; +} + +static inline bool +_os_workgroup_join_token_initialized(os_workgroup_join_token_t token) +{ + return (token->sig == _OS_WORKGROUP_JOIN_TOKEN_SIG_INIT); +} + +static inline void +_os_workgroup_set_name(os_workgroup_t wg, const char *name) +{ + if (name) { + const char *tmp = _dispatch_strdup_if_mutable(name); + if (tmp != name) { + wg->wg_state |= OS_WORKGROUP_LABEL_NEEDS_FREE; + name = tmp; + } + } + wg->name = name; +} + +static inline bool +_os_workgroup_client_attr_is_valid(os_workgroup_attr_t attr) +{ + return (attr && _os_workgroup_client_attr_initialized(attr)); +} + +static inline os_workgroup_attr_t +_os_workgroup_client_attr_resolve(os_workgroup_attr_t attr, + os_workgroup_attr_t client_attr, + const os_workgroup_attr_s *default_attr) +{ + if (client_attr == NULL) { + *attr = *default_attr; + } else { + if (!_os_workgroup_client_attr_is_valid(client_attr)) { + return NULL; + } + + // Make a local copy of the attr + *attr = *client_attr; + + switch (attr->sig) { + case _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT: + /* For any fields which are 0, we fill in with default values */ + if (attr->wg_attr_flags == 0) { + attr->wg_attr_flags = default_attr->wg_attr_flags; + } + if (attr->wg_type == 0) { + attr->wg_type = default_attr->wg_type; + } + break; + case _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT: + /* Nothing to do, the client built the attr up from scratch */ + break; + default: + return NULL; + } + + /* Mark it as resolved */ + attr->sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT; + } + + os_assert(_os_workgroup_attr_is_resolved(attr)); + return attr; +} + +static inline bool +_start_time_is_in_past(os_clockid_t clock, uint64_t start) +{ + switch (clock) { + case OS_CLOCK_MACH_ABSOLUTE_TIME: + return start <= mach_absolute_time(); + } +} + +struct os_workgroup_pthread_ctx_s { + os_workgroup_t wg; + void *(*start_routine)(void *); + void *arg; +}; + +static void * +_os_workgroup_pthread_start(void *wrapper_arg) +{ + struct os_workgroup_pthread_ctx_s *ctx = wrapper_arg; + os_workgroup_t wg = ctx->wg; + void *(*start_routine)(void *) = ctx->start_routine; + void *arg = ctx->arg; + + free(ctx); + + os_workgroup_join_token_s token; + int rc = os_workgroup_join(wg, &token); + if (rc != 0) { + DISPATCH_CLIENT_CRASH(rc, "pthread_start os_workgroup_join failed"); + } + + void *result = start_routine(arg); + + os_workgroup_leave(wg, &token); + os_workgroup_dec_refcount(wg); + + return result; +} + +static int +_os_workgroup_pthread_create_with_workgroup(pthread_t *thread, + os_workgroup_t wg, const pthread_attr_t *attr, + void *(*start_routine)(void *), void *arg) +{ + struct os_workgroup_pthread_ctx_s *ctx = _dispatch_calloc(1, sizeof(*ctx)); + + os_workgroup_inc_refcount(wg); + + ctx->wg = wg; + ctx->start_routine = start_routine; + ctx->arg = arg; + + int rc = pthread_create(thread, attr, _os_workgroup_pthread_start, ctx); + if (rc != 0) { + os_workgroup_dec_refcount(wg); + free(ctx); + } + + return rc; +} + +static const struct pthread_workgroup_functions_s _os_workgroup_pthread_functions = { + .pwgf_version = PTHREAD_WORKGROUP_FUNCTIONS_VERSION, + .pwgf_create_with_workgroup = _os_workgroup_pthread_create_with_workgroup, +}; + +void +_workgroup_init(void) +{ + pthread_install_workgroup_functions_np(&_os_workgroup_pthread_functions); +} + +#pragma mark Private functions + +int +os_workgroup_attr_set_interval_type(os_workgroup_attr_t attr, + os_workgroup_interval_type_t interval_type) +{ + int ret = 0; + if (_os_workgroup_client_attr_is_valid(attr) && + _os_workgroup_type_is_interval_type(interval_type)) { + attr->wg_type = interval_type; + } else { + ret = EINVAL; + } + return ret; +} + +int +os_workgroup_attr_set_flags(os_workgroup_attr_t attr, + os_workgroup_attr_flags_t flags) +{ + int ret = 0; + if (_os_workgroup_client_attr_is_valid(attr)) { + attr->wg_attr_flags = flags; + } else { + ret = EINVAL; + } + + return ret; +} + +os_workgroup_t +os_workgroup_interval_copy_current_4AudioToolbox(void) +{ + os_workgroup_t wg = _os_workgroup_get_current(); + + if (wg) { + if (_os_workgroup_type_is_audio_type(wg->wg_type)) { + wg = os_retain(wg); + } else { + wg = NULL; + } + } + + return wg; +} + +#pragma mark Public functions + +os_workgroup_t +os_workgroup_create(const char *name, os_workgroup_attr_t attr) +{ + os_workgroup_t wg = NULL; + work_interval_t wi = NULL; + + /* Resolve the input attributes */ + os_workgroup_attr_s wga; + attr = _os_workgroup_client_attr_resolve(&wga, attr, + &_os_workgroup_attr_default); + if (attr == NULL) { + errno = EINVAL; + return NULL; + } + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_default_type(attr->wg_type)) { + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroups yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + errno = ENOTSUP; + return NULL; + } + + wi = _os_workgroup_create_work_interval(attr); + if (wi == NULL) { + return NULL; + } + + wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + wg->wi = wi; + wg->wg_state = OS_WORKGROUP_OWNER; + wg->wg_type = attr->wg_type; + + _os_workgroup_set_name(wg, name); + + return wg; +} + +os_workgroup_interval_t +os_workgroup_interval_create(const char *name, os_clockid_t clock, + os_workgroup_attr_t attr) +{ + os_workgroup_interval_t wgi = NULL; + work_interval_t wi = NULL; + + /* Resolve the input attributes */ + os_workgroup_attr_s wga; + attr = _os_workgroup_client_attr_resolve(&wga, attr, + &_os_workgroup_interval_attr_default); + if (attr == NULL) { + errno = EINVAL; + return NULL; + } + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_interval_type(attr->wg_type)) { + errno = EINVAL; + return NULL; + } + + if (!_os_workgroup_attr_is_differentiated(attr)) { + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroup yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + errno = ENOTSUP; + return NULL; + } + + wi = _os_workgroup_create_work_interval(attr); + if (wi == NULL) { + return NULL; + } + + wgi = (os_workgroup_interval_t) _os_object_alloc(WORKGROUP_INTERVAL_CLASS, + sizeof(struct os_workgroup_interval_s)); + wgi->wi = wi; + wgi->clock = clock; + wgi->wii = work_interval_instance_alloc(wi); + wgi->wii_lock = OS_UNFAIR_LOCK_INIT; + wgi->wg_type = attr->wg_type; + wgi->wg_state = OS_WORKGROUP_OWNER; + + _os_workgroup_set_name(wgi->_as_wg, name); + + return wgi; +} + +os_workgroup_t +os_workgroup_create_with_workload_id(const char * name, + const char *workload_id, os_workgroup_attr_t attr) +{ + os_workgroup_t wg = NULL; + work_interval_t wi = NULL; + + const os_workgroup_attr_s *default_attr = + &_os_workgroup_with_workload_id_attr_default; + + /* Resolve the input attributes */ + os_workgroup_attr_s wga; + attr = _os_workgroup_client_attr_resolve(&wga, attr, default_attr); + if (attr == NULL) { + _os_workgroup_error_log("Invalid attribute pointer"); + errno = EINVAL; + return NULL; + } + + /* Resolve workload ID */ + attr = _os_workgroup_workload_id_attr_resolve(workload_id, attr, + default_attr); + if (attr == NULL) { + _os_workgroup_error_log("Mismatched workload ID and attribute " + "interval type: %s vs %hd", workload_id, wga.wg_type); + errno = EINVAL; + return NULL; + } + + /* Require default attribute flags. */ + if (attr->wg_attr_flags != default_attr->wg_attr_flags) { + _os_workgroup_error_log("Non-default attribute flags: 0x%x", + attr->wg_attr_flags); + errno = EINVAL; + return NULL; + } + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_default_type(attr->wg_type)) { + _os_workgroup_error_log("Non-default workload type: %s (%hd)", + workload_id, attr->wg_type); + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroups yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + _os_workgroup_error_log("Unsupported attribute flags: 0x%x", + attr->wg_attr_flags); + errno = ENOTSUP; + return NULL; + } + + wi = _os_workgroup_create_work_interval(attr); + if (wi == NULL) { + return NULL; + } + + wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + wg->wi = wi; + wg->wg_state = OS_WORKGROUP_OWNER; + wg->wg_type = attr->wg_type; + + _os_workgroup_set_name(wg, name); + + return wg; +} + +os_workgroup_interval_t +os_workgroup_interval_create_with_workload_id(const char *name, + const char *workload_id, os_clockid_t clock, os_workgroup_attr_t attr) +{ + os_workgroup_interval_t wgi = NULL; + work_interval_t wi = NULL; + + const os_workgroup_attr_s *default_attr = + &_os_workgroup_interval_attr_default; + + /* Resolve the input attributes */ + os_workgroup_attr_s wga; + attr = _os_workgroup_client_attr_resolve(&wga, attr, default_attr); + if (attr == NULL) { + _os_workgroup_error_log("Invalid attribute pointer"); + errno = EINVAL; + return NULL; + } + + /* Resolve workload ID */ + attr = _os_workgroup_workload_id_attr_resolve(workload_id, attr, + default_attr); + if (attr == NULL) { + _os_workgroup_error_log("Mismatched workload ID and attribute " + "interval type: %s vs %hd", workload_id, wga.wg_type); + errno = EINVAL; + return NULL; + } + + /* Require default attribute flags. */ + if (attr->wg_attr_flags != default_attr->wg_attr_flags) { + _os_workgroup_error_log("Non-default attribute flags: 0x%x", + attr->wg_attr_flags); + errno = EINVAL; + return NULL; + } + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_interval_type(attr->wg_type)) { + _os_workgroup_error_log("Invalid workload interval type: %s (%hd)", + workload_id, attr->wg_type); + errno = EINVAL; + return NULL; + } + + if (!_os_workgroup_attr_is_differentiated(attr)) { + _os_workgroup_error_log("Invalid attribute flags: 0x%x", + attr->wg_attr_flags); + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroup yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + _os_workgroup_error_log("Unsupported attribute flags: 0x%x", + attr->wg_attr_flags); + errno = ENOTSUP; + return NULL; + } + + wi = _os_workgroup_create_work_interval(attr); + if (wi == NULL) { + return NULL; + } + + wgi = (os_workgroup_interval_t) _os_object_alloc(WORKGROUP_INTERVAL_CLASS, + sizeof(struct os_workgroup_interval_s)); + wgi->wi = wi; + wgi->clock = clock; + wgi->wii = work_interval_instance_alloc(wi); + wgi->wii_lock = OS_UNFAIR_LOCK_INIT; + wgi->wg_type = attr->wg_type; + wgi->wg_state = OS_WORKGROUP_OWNER; + + _os_workgroup_set_name(wgi->_as_wg, name); + + return wgi; +} + +int +os_workgroup_join_self(os_workgroup_t wg, os_workgroup_join_token_t token, + os_workgroup_index * __unused id_out) +{ + return os_workgroup_join(wg, token); +} + +void +os_workgroup_leave_self(os_workgroup_t wg, os_workgroup_join_token_t token) +{ + return os_workgroup_leave(wg, token); +} + +#pragma mark Public functions + +os_workgroup_parallel_t +os_workgroup_parallel_create(const char *name, os_workgroup_attr_t attr) +{ + os_workgroup_parallel_t wgp = NULL; + + // Clients should only specify NULL attributes. + os_workgroup_attr_s wga; + if (attr == NULL) { + wga = _os_workgroup_parallel_attr_default; + attr = &wga; + } else { + // Make a local copy of the attr + if (!_os_workgroup_client_attr_is_valid(attr)) { + errno = EINVAL; + return NULL; + } + + wga = *attr; + attr = &wga; + + switch (attr->sig) { + case _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT: + { + /* For any fields which are 0, we fill in with default values */ + if (attr->wg_attr_flags == 0) { + attr->wg_attr_flags = _os_workgroup_parallel_attr_default.wg_attr_flags; + } + if (attr->wg_type == 0) { + attr->wg_type = _os_workgroup_parallel_attr_default.wg_type; + } + } + // Fallthrough + case _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT: + break; + default: + errno = EINVAL; + return NULL; + } + /* Mark it as resolved */ + attr->sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT; + } + + os_assert(_os_workgroup_attr_is_resolved(attr)); + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_parallel_type(attr->wg_type)) { + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroups yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + errno = ENOTSUP; + return NULL; + } + + wgp = (os_workgroup_t) _os_object_alloc(WORKGROUP_PARALLEL_CLASS, + sizeof(struct os_workgroup_parallel_s)); + wgp->wi = NULL; + wgp->wg_state = OS_WORKGROUP_OWNER; + wgp->wg_type = attr->wg_type; + + _os_workgroup_set_name(wgp, name); + + return wgp; +} + +int +os_workgroup_copy_port(os_workgroup_t wg, mach_port_t *mach_port_out) +{ + os_assert(wg != NULL); + os_assert(mach_port_out != NULL); + + *mach_port_out = MACH_PORT_NULL; + int rv = 0; + + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_CANCELED) { + return EINVAL; + } + + if (!_os_workgroup_has_backing_workinterval(wg)) { + return EINVAL; + } + + if (_os_workgroup_is_configurable(wg_state)) { + rv = work_interval_copy_port(wg->wi, mach_port_out); + if (rv < 0) { + rv = errno; + } + return rv; + } + + kern_return_t kr = mach_port_mod_refs(mach_task_self(), wg->port, + MACH_PORT_RIGHT_SEND, 1); + os_assumes(kr == KERN_SUCCESS); + *mach_port_out = wg->port; + return rv; +} + +os_workgroup_t +os_workgroup_create_with_port(const char *name, mach_port_t port) +{ + if (!MACH_PORT_VALID(port)) { + errno = EINVAL; + return NULL; + } + + os_workgroup_type_t wg_type; + int ret = _os_workgroup_get_wg_wi_types_from_port(port, &wg_type, NULL); + if (ret != 0) { + return NULL; + } + + os_workgroup_t wg = NULL; + wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + _os_workgroup_set_name(wg, name); + + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); + os_assumes(kr == KERN_SUCCESS); + wg->port = port; + wg->wg_type = wg_type; + + return wg; +} + +os_workgroup_t +os_workgroup_create_with_workload_id_and_port(const char *name, + const char *workload_id, mach_port_t port) +{ + if (!MACH_PORT_VALID(port)) { + _os_workgroup_error_log("Invalid mach port 0x%x", port); + errno = EINVAL; + return NULL; + } + + os_workgroup_type_t wg_type; + uint32_t wi_type; + int ret = _os_workgroup_get_wg_wi_types_from_port(port, &wg_type, &wi_type); + if (ret != 0) { + _os_workgroup_error_log("Invalid mach port 0x%x", port); + return NULL; + } + + /* Validate workload ID is compatible with port workinterval type */ + if (!_os_workgroup_workload_id_is_valid_for_wi_type(workload_id, wi_type)) { + _os_workgroup_error_log("Mismatched workload ID and port " + "interval type: %s vs %hd", workload_id, wg_type); + errno = EINVAL; + return NULL; + } + + os_workgroup_t wg = NULL; + wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + _os_workgroup_set_name(wg, name); + + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); + os_assumes(kr == KERN_SUCCESS); + wg->port = port; + wg->wg_type = wg_type; + + return wg; +} + +os_workgroup_t +os_workgroup_create_with_workgroup(const char *name, os_workgroup_t wg) +{ + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_CANCELED) { + errno = EINVAL; + return NULL; + } + + os_workgroup_t new_wg = NULL; + + new_wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + _os_workgroup_set_name(new_wg, name); + new_wg->wg_type = wg->wg_type; + + /* We intentionally don't copy the context */ + + if (_os_workgroup_has_backing_workinterval(wg)) { + + if (_os_workgroup_is_configurable(wg_state)) { + int rv = work_interval_copy_port(wg->wi, &new_wg->port); + + if (rv < 0) { + goto error; + } + } else { + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); + + if (kr != KERN_SUCCESS) { + goto error; + } + new_wg->port = wg->port; + } + } + + return new_wg; + +error: + wg_state = os_atomic_load(&new_wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_LABEL_NEEDS_FREE) { + free((void *)new_wg->name); + } + free(new_wg); + + return NULL; +} + +os_workgroup_t +os_workgroup_create_with_workload_id_and_workgroup(const char *name, + const char *workload_id, os_workgroup_t wg) +{ + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_CANCELED) { + _os_workgroup_error_log("Workgroup already cancelled"); + errno = EINVAL; + return NULL; + } + + /* Validate workload ID is compatible with workgroup workinterval type */ + if (!_os_workgroup_workload_id_is_valid_for_wi_type(workload_id, + _wg_type_to_wi_type(wg->wg_type))) { + _os_workgroup_error_log("Mismatched workload ID and workgroup " + "interval type: %s vs %hd", workload_id, wg->wg_type); + errno = EINVAL; + return NULL; + } + + os_workgroup_t new_wg = NULL; + + new_wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + _os_workgroup_set_name(new_wg, name); + new_wg->wg_type = wg->wg_type; + + /* We intentionally don't copy the context */ + + if (_os_workgroup_has_backing_workinterval(wg)) { + + if (_os_workgroup_is_configurable(wg_state)) { + int rv = work_interval_copy_port(wg->wi, &new_wg->port); + + if (rv < 0) { + _os_workgroup_error_log("Invalid workgroup work_interval"); + goto error; + } + } else { + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); + + if (kr != KERN_SUCCESS) { + _os_workgroup_error_log("Invalid workgroup port 0x%x", wg->port); + goto error; + } + new_wg->port = wg->port; + } + } + + return new_wg; + +error: + wg_state = os_atomic_load(&new_wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_LABEL_NEEDS_FREE) { + free((void *)new_wg->name); + } + free(new_wg); + + return NULL; +} + +int +os_workgroup_max_parallel_threads(os_workgroup_t wg, os_workgroup_mpt_attr_t __unused attr) +{ + os_assert(wg != NULL); + + qos_class_t qos = QOS_CLASS_USER_INTERACTIVE; + + switch (wg->wg_type) { + case OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO: + case OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT: + return pthread_time_constraint_max_parallelism(0); + default: + return pthread_qos_max_parallelism(qos, 0); + } +} + +int +os_workgroup_join(os_workgroup_t wg, os_workgroup_join_token_t token) +{ + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if (cur_wg) { + // We currently don't allow joining multiple workgroups at all, period + errno = EALREADY; + return errno; + } + + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_CANCELED) { + errno = EINVAL; + return errno; + } + + int rv = 0; + + if (_os_workgroup_has_backing_workinterval(wg)) { + if (_os_workgroup_is_configurable(wg_state)) { + rv = work_interval_join(wg->wi); + } else { + rv = work_interval_join_port(wg->port); + } + } + + if (rv) { + rv = errno; + return rv; + } + + os_atomic_inc(&wg->joined_cnt, relaxed); + + bzero(token, sizeof(struct os_workgroup_join_token_s)); + token->sig = _OS_WORKGROUP_JOIN_TOKEN_SIG_INIT; + + token->thread = _dispatch_thread_port(); + token->old_wg = cur_wg; /* should be null */ + token->new_wg = wg; + + _os_workgroup_set_current(wg); + return rv; +} + +void +os_workgroup_leave(os_workgroup_t wg, os_workgroup_join_token_t token) +{ + if (!_os_workgroup_join_token_initialized(token)) { + os_crash("Join token is corrupt"); + } + + if (token->thread != _dispatch_thread_port()) { + os_crash("Join token provided is for a different thread"); + } + + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if ((token->new_wg != cur_wg) || (cur_wg != wg)) { + os_crash("Join token provided is for a different workgroup than the " + "last one joined by thread"); + } + os_assert(token->old_wg == NULL); + + if (_os_workgroup_has_backing_workinterval(wg)) { + dispatch_assume(work_interval_leave() == 0); + } + uint32_t old_joined_cnt = os_atomic_dec_orig(&wg->joined_cnt, relaxed); + if (old_joined_cnt == 0) { + DISPATCH_INTERNAL_CRASH(0, "Joined count underflowed"); + } + _os_workgroup_set_current(NULL); +} + +int +os_workgroup_set_working_arena(os_workgroup_t wg, void * _Nullable client_arena, + uint32_t max_workers, os_workgroup_working_arena_destructor_t destructor) +{ + size_t arena_size; + // We overflowed, we can't allocate this + if (os_mul_and_add_overflow(sizeof(mach_port_t), max_workers, sizeof(struct os_workgroup_arena_s), &arena_size)) { + errno = ENOMEM; + return errno; + } + + os_workgroup_arena_t wg_arena = calloc(arena_size, 1); + if (wg_arena == NULL) { + errno = ENOMEM; + return errno; + } + wg_arena->max_workers = max_workers; + wg_arena->client_arena = client_arena; + wg_arena->destructor = destructor; + + _os_workgroup_atomic_flags old_state, new_state; + os_workgroup_arena_t old_arena = NULL; + + bool success = os_atomic_rmw_loop(&wg->wg_atomic_flags, old_state, new_state, relaxed, { + if (_wg_joined_cnt(old_state) > 0) { // We can't change the arena while it is in use + os_atomic_rmw_loop_give_up(break); + } + old_arena = _wg_arena(old_state); + + // Remove the old arena and put the new one in + new_state = old_state; + new_state &= ~OS_WORKGROUP_ARENA_MASK; + new_state |= (uint64_t) wg_arena; + }); + + if (!success) { + free(wg_arena); + errno = EBUSY; + return errno; + } + + if (old_arena) { + old_arena->destructor(old_arena->client_arena); + free(old_arena); + } + + return 0; +} + +void * +os_workgroup_get_working_arena(os_workgroup_t wg, os_workgroup_index *_Nullable index_out) +{ + if (_os_workgroup_get_current() != wg) { + os_crash("Thread is not a member of the workgroup"); + } + + /* At this point, we know that since this thread is a member of the wg, we + * won't have the arena replaced out from under us so we can modify it + * safely */ + dispatch_assert(wg->joined_cnt > 0); + + os_workgroup_arena_t arena = os_atomic_load(&wg->wg_arena, relaxed); + if (arena == NULL) { + return NULL; + } + + /* if the max_workers was 0 and the client wants an index, then they will + * fail */ + if (index_out != NULL && arena->max_workers == 0) { + os_crash("The arena associated with workgroup is not to be partitioned"); + } + + if (index_out) { + /* Find the index of the current thread in the arena */ + uint32_t found_index = 0; + bool found = false; + for (uint32_t i = 0; i < arena->max_workers; i++) { + if (arena->arena_indices[i] == _dispatch_thread_port()) { + found_index = i; + found = true; + break; + } + } + + if (!found) { + /* Current thread doesn't already have an index, give it one */ + found_index = os_atomic_inc_orig(&arena->next_worker_index, relaxed); + + if (found_index >= arena->max_workers) { + os_crash("Exceeded the maximum number of workers who can access the arena"); + } + arena->arena_indices[found_index] = _dispatch_thread_port(); + } + + *index_out = found_index; + } + + return arena->client_arena; +} + +void +os_workgroup_cancel(os_workgroup_t wg) +{ + os_atomic_or(&wg->wg_state, OS_WORKGROUP_CANCELED, relaxed); +} + +bool +os_workgroup_testcancel(os_workgroup_t wg) +{ + return os_atomic_load(&wg->wg_state, relaxed) & OS_WORKGROUP_CANCELED; +} + +int +os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, + uint64_t deadline, os_workgroup_interval_data_t __unused data) +{ + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if (cur_wg != wgi->_as_wg) { + os_crash("Thread is not a member of the workgroup"); + } + + if (deadline < start || (!_start_time_is_in_past(wgi->clock, start))) { + errno = EINVAL; + return errno; + } + + bool success = os_unfair_lock_trylock(&wgi->wii_lock); + if (!success) { + // Someone else is concurrently in a start, update or finish method. We + // can't make progress here + errno = EBUSY; + return errno; + } + + int rv = 0; + uint64_t old_state, new_state; + os_atomic_rmw_loop(&wgi->wg_state, old_state, new_state, relaxed, { + if (old_state & (OS_WORKGROUP_CANCELED | OS_WORKGROUP_INTERVAL_STARTED)) { + rv = EINVAL; + os_atomic_rmw_loop_give_up(break); + } + if (!_os_workgroup_is_configurable(old_state)) { + rv = EPERM; + os_atomic_rmw_loop_give_up(break); + } + new_state = old_state | OS_WORKGROUP_INTERVAL_STARTED; + }); + + if (rv) { + os_unfair_lock_unlock(&wgi->wii_lock); + errno = rv; + return rv; + } + + work_interval_instance_t wii = wgi->wii; + work_interval_instance_clear(wii); + + work_interval_instance_set_start(wii, start); + work_interval_instance_set_deadline(wii, deadline); + rv = work_interval_instance_start(wii); + if (rv != 0) { + /* If we failed to start the interval in the kernel, clear the started + * field */ + os_atomic_and(&wgi->wg_state, ~OS_WORKGROUP_INTERVAL_STARTED, relaxed); + } + + os_unfair_lock_unlock(&wgi->wii_lock); + + return rv; +} + +int +os_workgroup_interval_update(os_workgroup_interval_t wgi, uint64_t deadline, + os_workgroup_interval_data_t __unused data) +{ + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if (cur_wg != wgi->_as_wg) { + os_crash("Thread is not a member of the workgroup"); + } + + bool success = os_unfair_lock_trylock(&wgi->wii_lock); + if (!success) { + // Someone else is concurrently in a start, update or finish method. We + // can't make progress here + errno = EBUSY; + return errno; + } + + uint64_t wg_state = os_atomic_load(&wgi->wg_state, relaxed); + if (!_os_workgroup_is_configurable(wg_state)) { + os_unfair_lock_unlock(&wgi->wii_lock); + errno = EPERM; + return errno; + } + + /* Note: We allow updating and finishing an workgroup_interval that has + * already started even if the workgroup has been cancelled - since + * cancellation happens asynchronously and doesn't care about ongoing + * intervals. However a subsequent new interval cannot be started */ + if (!(wg_state & OS_WORKGROUP_INTERVAL_STARTED)) { + os_unfair_lock_unlock(&wgi->wii_lock); + errno = EINVAL; + return errno; + } + + work_interval_instance_t wii = wgi->wii; + work_interval_instance_set_deadline(wii, deadline); + int rv = work_interval_instance_update(wii); + if (rv != 0) { + rv = errno; + } + + os_unfair_lock_unlock(&wgi->wii_lock); + return rv; +} + +int +os_workgroup_interval_finish(os_workgroup_interval_t wgi, + os_workgroup_interval_data_t __unused data) +{ + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if (cur_wg != wgi->_as_wg) { + os_crash("Thread is not a member of the workgroup"); + } + + bool success = os_unfair_lock_trylock(&wgi->wii_lock); + if (!success) { + // Someone else is concurrently in a start, update or finish method. We + // can't make progress here + errno = EBUSY; + return errno; + } + + uint64_t wg_state = os_atomic_load(&wgi->wg_state, relaxed); + if (!_os_workgroup_is_configurable(wg_state)) { + os_unfair_lock_unlock(&wgi->wii_lock); + errno = EPERM; + return errno; + } + if (!(wg_state & OS_WORKGROUP_INTERVAL_STARTED)) { + os_unfair_lock_unlock(&wgi->wii_lock); + errno = EINVAL; + return errno; + } + + work_interval_instance_t wii = wgi->wii; + uint64_t current_finish = 0; + switch (wgi->clock) { + case OS_CLOCK_MACH_ABSOLUTE_TIME: + current_finish = mach_absolute_time(); + break; + } + + work_interval_instance_set_finish(wii, current_finish); + int rv = work_interval_instance_finish(wii); + if (rv != 0) { + rv = errno; + } else { + /* If we succeeded in finishing, clear the started bit */ + os_atomic_and(&wgi->wg_state, ~OS_WORKGROUP_INTERVAL_STARTED, relaxed); + } + + os_unfair_lock_unlock(&wgi->wii_lock); + return rv; +} diff --git a/src/workgroup_internal.h b/src/workgroup_internal.h new file mode 100644 index 000000000..e19df6467 --- /dev/null +++ b/src/workgroup_internal.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __OS_WORKGROUP_INTERNAL__ +#define __OS_WORKGROUP_INTERNAL__ + +#include +#include +#include +#include + +void _os_workgroup_xref_dispose(os_workgroup_t wg); +void _os_workgroup_dispose(os_workgroup_t wg); +void _os_workgroup_interval_xref_dispose(os_workgroup_interval_t wgi); +void _os_workgroup_interval_dispose(os_workgroup_interval_t wgi); +void _os_workgroup_debug(os_workgroup_t wg, char *buf, size_t size); + +#if !USE_OBJC +void _os_workgroup_explicit_xref_dispose(os_workgroup_t wg); +void _os_workgroup_interval_explicit_xref_dispose(os_workgroup_interval_t wgi); +void _os_workgroup_interval_explicit_dispose(os_workgroup_interval_t wgi); +#endif + +extern pthread_key_t _os_workgroup_key; +void _os_workgroup_tsd_cleanup(void *ctxt); + +void _workgroup_init(void); + +#if 1 || DISPATCH_DEBUG // log workload_id API adoption errors by default for now +#define OS_WORKGROUP_LOG_ERRORS 1 +#endif + +#if 1 || DISPATCH_DEBUG // log workload_id lookup failures by default for now +#define OS_WORKGROUP_LOG_UKNOWN_WORKLOAD_ID 1 +#endif + +#if OS_WORKGROUP_LOG_ERRORS +#define _os_workgroup_error_log(m, ...) \ + _dispatch_log("BUG IN CLIENT of %s: " m, __func__, ##__VA_ARGS__); +#else +#define _os_workgroup_error_log(m, ...) (void)m; +#endif + +/* + * os_workgroup_type_t is an internal representation that is a superset of types + * for various types of workgroups. Currently it only includes + * os_workgroup_interval_type_t and the types specified below + * + * Making the workgroup type uint16_t means that we have a total of 64k types + * which is plenty + */ +typedef uint16_t os_workgroup_type_t; +#define OS_WORKGROUP_TYPE_DEFAULT 0x0 +#define OS_WORKGROUP_TYPE_PARALLEL 0x40 + +/* To be set when the caller provided workgroup attribute has been expanded + * and resolved. */ +#define _OS_WORKGROUP_ATTR_RESOLVED_INIT 0x782618DA +struct os_workgroup_attr_s { + uint32_t sig; + uint32_t wg_attr_flags; + os_workgroup_type_t wg_type; + uint16_t empty; + uint32_t reserved[13]; +}; + +#define _OS_WORKGROUP_JOIN_TOKEN_SIG_INIT 0x4D5F5A58 +struct os_workgroup_join_token_s { + uint32_t sig; + mach_port_t thread; + os_workgroup_t old_wg; + os_workgroup_t new_wg; + uint64_t reserved[2]; +}; + +struct os_workgroup_interval_data_s { + uint32_t sig; + uint32_t reserved[14]; +}; + +/* This is lazily allocated if the arena is used by clients */ +typedef struct os_workgroup_arena_s { + void *client_arena; + os_workgroup_working_arena_destructor_t destructor; + uint32_t max_workers; /* Client specified max size */ + uint32_t next_worker_index; + mach_port_t arena_indices[0]; /* Dyanmic depending on max_workers */ +} *os_workgroup_arena_t; + +#define OS_WORKGROUP_OWNER (1 << 0) +#define OS_WORKGROUP_CANCELED (1 << 1) +#define OS_WORKGROUP_LABEL_NEEDS_FREE (1 << 2) +#define OS_WORKGROUP_INTERVAL_STARTED (1 << 3) + + +/* Note that os_workgroup_type_t doesn't have to be in the wg_atomic_flags, we + * just put it there to pack the struct. + * + * We have to put the arena related state in an atomic because the + * joined_cnt is modified in a real time context as part of os_workgroup_join + * and os_workgroup_leave(). We cannot have a lock and so it needs to all be + * part of a single _os_workgroup_atomic_flags sized atomic state */ + +#if !defined(__LP64__) || (__LP64_ && !defined(__arm64__)) +// For 32 bit watches (armv7), we can only do DCAS up to 64 bits so the union +// type is for uint64_t. +// +// 16 bits for tracking the type +// 16 bits for max number of threads which have joined a workgroup (64k is plenty) +// 32 bits for arena pointer +// ----- +// 64 bits +typedef uint64_t _os_workgroup_atomic_flags; + +typedef uint16_t os_joined_cnt_t; +#define OS_WORKGROUP_JOINED_COUNT_SHIFT 48 +#define OS_WORKGROUP_JOINED_COUNT_MASK (((uint64_t) 0xffff) << OS_WORKGROUP_JOINED_COUNT_SHIFT) +#define OS_WORKGROUP_ARENA_MASK 0xffffffffull + +#define OS_WORKGROUP_HEADER_INTERNAL \ + DISPATCH_UNION_LE(_os_workgroup_atomic_flags volatile wg_atomic_flags, \ + os_workgroup_arena_t wg_arena, \ + os_workgroup_type_t wg_type, \ + os_joined_cnt_t joined_cnt \ + ) +#else +// For all 64 bit systems (including arm64_32), we can do DCAS (or quad width +// CAS for arm64_32) so 128 bit union type works +// +// 16 bits for tracking the type +// 16 bits for empty +// 32 bits for max number of threads which have joined a workgroup +// 64 bits for arena pointer +// ----- +// 128 bits +typedef __uint128_t _os_workgroup_atomic_flags; + +typedef uint32_t os_joined_cnt_t; +#define OS_WORKGROUP_JOINED_COUNT_SHIFT 96 +#define OS_WORKGROUP_JOINED_COUNT_MASK (((__uint128_t) 0xffffffff) << OS_WORKGROUP_JOINED_COUNT_SHIFT) +#define OS_WORKGROUP_ARENA_MASK 0xffffffffffffffffull + +#define OS_WORKGROUP_HEADER_INTERNAL \ + DISPATCH_UNION_LE(_os_workgroup_atomic_flags volatile wg_atomic_flags, \ + os_workgroup_arena_t wg_arena, \ + os_workgroup_type_t wg_type, \ + const uint16_t empty, \ + os_joined_cnt_t joined_cnt \ + ) +#endif + +static inline os_joined_cnt_t +_wg_joined_cnt(_os_workgroup_atomic_flags wgaf) +{ + return (os_joined_cnt_t) (((wgaf & OS_WORKGROUP_JOINED_COUNT_MASK)) >> OS_WORKGROUP_JOINED_COUNT_SHIFT); +} + +static inline os_workgroup_arena_t +_wg_arena(_os_workgroup_atomic_flags wgaf) +{ + return (os_workgroup_arena_t) (wgaf & OS_WORKGROUP_ARENA_MASK); +} + +#define OS_WORKGROUP_HEADER \ + struct _os_object_s _as_os_obj[0]; \ + OS_OBJECT_STRUCT_HEADER(workgroup); \ + const char *name; \ + uint64_t volatile wg_state; \ + union { \ + work_interval_t wi; \ + mach_port_t port; \ + }; \ + OS_WORKGROUP_HEADER_INTERNAL; + +struct os_workgroup_s { + OS_WORKGROUP_HEADER +}; + +struct os_workgroup_interval_s { + struct os_workgroup_s _as_wg[0]; + OS_WORKGROUP_HEADER + os_clockid_t clock; + /* Needed to serialize updates to wii when there are multiple racey calls to + * os_workgroup_interval_update */ + os_unfair_lock wii_lock; + work_interval_instance_t wii; +}; + +struct os_workgroup_parallel_s { + OS_WORKGROUP_HEADER +}; + +_Static_assert(sizeof(struct os_workgroup_attr_s) == sizeof(struct os_workgroup_attr_opaque_s), + "Incorrect size of workgroup attribute structure"); +_Static_assert(sizeof(struct os_workgroup_join_token_s) == sizeof(struct os_workgroup_join_token_opaque_s), + "Incorrect size of workgroup join token structure"); +_Static_assert(sizeof(struct os_workgroup_interval_data_s) == sizeof(struct os_workgroup_interval_data_opaque_s), + "Incorrect size of workgroup interval data structure"); + +#endif /* __OS_WORKGROUP_INTERNAL__ */ diff --git a/xcodeconfig/libdispatch-dyld-stub.xcconfig b/xcodeconfig/libdispatch-dyld-stub.xcconfig deleted file mode 100644 index 763bafe1e..000000000 --- a/xcodeconfig/libdispatch-dyld-stub.xcconfig +++ /dev/null @@ -1,28 +0,0 @@ -// -// Copyright (c) 2016 Apple Inc. All rights reserved. -// -// @APPLE_APACHE_LICENSE_HEADER_START@ -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// @APPLE_APACHE_LICENSE_HEADER_END@ -// - -PRODUCT_NAME = libdispatch_dyld_stub -INSTALL_PATH = /usr/local/lib/dyld_stub -BUILD_VARIANTS = normal -GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_VARIANT_DYLD_STUB=1 $(STATICLIB_PREPROCESSOR_DEFINITIONS) -OTHER_LDFLAGS = -VERSIONING_SYSTEM = -EXCLUDED_SOURCE_FILE_NAMES = * -INCLUDED_SOURCE_FILE_NAMES = voucher.c // minimal with DISPATCH_VARIANT_DYLD_STUB diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases index d8a5113a2..24cbc6b2b 100644 --- a/xcodeconfig/libdispatch.aliases +++ b/xcodeconfig/libdispatch.aliases @@ -23,5 +23,9 @@ __dispatch_queue_attrs __dispatch_queue_attr_concurrent __dispatch_source_type_memorypressure __dispatch_source_type_memorystatus _dispatch_assert_queue$V2 _dispatch_assert_queue _dispatch_assert_queue_not$V2 _dispatch_assert_queue_not +_dispatch_async _dispatch_channel_async +_dispatch_async_f _dispatch_channel_async_f _dispatch_queue_create_with_target$V2 _dispatch_queue_create_with_target +_dispatch_source_cancel _dispatch_channel_cancel _dispatch_source_set_timer __dispatch_source_set_runloop_timer_4CF +_dispatch_source_testcancel _dispatch_channel_testcancel diff --git a/xcodeconfig/libdispatch.clean b/xcodeconfig/libdispatch.clean index c6ba14c4b..1e2bff8c9 100644 --- a/xcodeconfig/libdispatch.clean +++ b/xcodeconfig/libdispatch.clean @@ -18,11 +18,17 @@ # @APPLE_APACHE_LICENSE_HEADER_END@ # +__MergedGlobals __dispatch_bug.last_seen __dispatch_bug_deprecated.last_seen __dispatch_bug_kevent_client.last_seen -__dispatch_bug_kevent_client.last_seen.37 -__dispatch_bug_kevent_client.last_seen.39 +#if defined(__x86_64__) +__dispatch_bug_kevent_client.last_seen.44 +__dispatch_bug_kevent_client.last_seen.46 +#else +__dispatch_bug_kevent_client.last_seen.38 +__dispatch_bug_kevent_client.last_seen.40 +#endif __dispatch_bug_kevent_vanished.last_seen __dispatch_bug_mach_client.last_seen @@ -31,6 +37,7 @@ __dispatch_build __dispatch_child_of_unsafe_fork __dispatch_continuation_cache_limit +__dispatch_custom_workloop_root_queue __dispatch_data_empty __dispatch_host_time_data.0 __dispatch_host_time_data.1 diff --git a/xcodeconfig/libdispatch.dirty b/xcodeconfig/libdispatch.dirty index d8d1a0d6e..53cd19f74 100644 --- a/xcodeconfig/libdispatch.dirty +++ b/xcodeconfig/libdispatch.dirty @@ -34,6 +34,8 @@ _OBJC_CLASS_$_OS_dispatch_queue_serial __OS_dispatch_queue_serial_vtable _OBJC_CLASS_$_OS_dispatch_queue_concurrent __OS_dispatch_queue_concurrent_vtable +_OBJC_CLASS_$_OS_dispatch_queue_cooperative +__OS_dispatch_queue_cooperative_vtable _OBJC_CLASS_$_OS_dispatch_queue_global __OS_dispatch_queue_global_vtable _OBJC_CLASS_$_OS_dispatch_queue_pthread_root @@ -48,6 +50,8 @@ _OBJC_CLASS_$_OS_dispatch_queue_attr __OS_dispatch_queue_attr_vtable _OBJC_CLASS_$_OS_dispatch_source __OS_dispatch_source_vtable +_OBJC_CLASS_$_OS_dispatch_channel +__OS_dispatch_channel_vtable _OBJC_CLASS_$_OS_dispatch_mach __OS_dispatch_mach_vtable _OBJC_CLASS_$_OS_dispatch_mach_msg @@ -61,6 +65,10 @@ __OS_dispatch_disk_vtable # os_object_t classes _OBJC_CLASS_$_OS_object _OBJC_CLASS_$_OS_voucher +_OBJC_CLASS_$_OS_os_eventlink +_OBJC_CLASS_$_OS_os_workgroup +_OBJC_CLASS_$_OS_os_workgroup_interval +_OBJC_CLASS_$_OS_os_workgroup_parallel #_OBJC_CLASS_$_OS_voucher_recipe # non-os_object_t classes _OBJC_CLASS_$_OS_dispatch_data @@ -73,6 +81,7 @@ _OBJC_METACLASS_$_OS_dispatch_queue _OBJC_METACLASS_$_OS_dispatch_workloop _OBJC_METACLASS_$_OS_dispatch_queue_serial _OBJC_METACLASS_$_OS_dispatch_queue_concurrent +_OBJC_METACLASS_$_OS_dispatch_queue_cooperative _OBJC_METACLASS_$_OS_dispatch_queue_global _OBJC_METACLASS_$_OS_dispatch_queue_pthread_root _OBJC_METACLASS_$_OS_dispatch_queue_main @@ -80,6 +89,7 @@ _OBJC_METACLASS_$_OS_dispatch_queue_runloop _OBJC_METACLASS_$_OS_dispatch_queue_mgr _OBJC_METACLASS_$_OS_dispatch_queue_attr _OBJC_METACLASS_$_OS_dispatch_source +_OBJC_METACLASS_$_OS_dispatch_channel _OBJC_METACLASS_$_OS_dispatch_mach _OBJC_METACLASS_$_OS_dispatch_mach_msg _OBJC_METACLASS_$_OS_dispatch_io @@ -87,6 +97,10 @@ _OBJC_METACLASS_$_OS_dispatch_operation _OBJC_METACLASS_$_OS_dispatch_disk _OBJC_METACLASS_$_OS_object _OBJC_METACLASS_$_OS_voucher +_OBJC_METACLASS_$_OS_os_eventlink +_OBJC_METACLASS_$_OS_os_workgroup +_OBJC_METACLASS_$_OS_os_workgroup_interval +_OBJC_METACLASS_$_OS_os_workgroup_parallel #_OBJC_METACLASS_$_OS_voucher_recipe _OBJC_METACLASS_$_OS_dispatch_data _OBJC_METACLASS_$_OS_dispatch_data_empty @@ -118,9 +132,11 @@ __dispatch_logv_pred __dispatch_mach_calendar_pred __dispatch_mach_host_port_pred __dispatch_mach_notify_port_pred +__dispatch_mach_notify_unote __dispatch_mach_xpc_hooks __dispatch_main_heap __dispatch_main_q_handle_pred +__dispatch_memorypressure_source __dispatch_mgr_sched_pred __dispatch_queue_serial_numbers __dispatch_root_queues_pred @@ -130,6 +146,7 @@ __firehose_task_buffer_pred __voucher_activity_debug_channel __voucher_libtrace_hooks __voucher_task_mach_voucher_pred +__voucher_process_can_use_arbitrary_personas_pred # 32bits __dispatch_mach_host_port @@ -151,3 +168,5 @@ __dispatch_io_fds __dispatch_io_devs_lockq __dispatch_io_fds_lockq __dispatch_io_init_pred +__voucher_activity_disabled.disabled +__voucher_process_can_use_arbitrary_personas diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index b586837d5..c61d6b280 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -34,6 +34,8 @@ _OBJC_CLASS_$_OS_dispatch_queue_serial __OS_dispatch_queue_serial_vtable _OBJC_CLASS_$_OS_dispatch_queue_concurrent __OS_dispatch_queue_concurrent_vtable +_OBJC_CLASS_$_OS_dispatch_queue_cooperative +__OS_dispatch_queue_cooperative_vtable _OBJC_CLASS_$_OS_dispatch_queue_global __OS_dispatch_queue_global_vtable _OBJC_CLASS_$_OS_dispatch_queue_pthread_root @@ -48,6 +50,8 @@ _OBJC_CLASS_$_OS_dispatch_queue_attr __OS_dispatch_queue_attr_vtable _OBJC_CLASS_$_OS_dispatch_source __OS_dispatch_source_vtable +_OBJC_CLASS_$_OS_dispatch_channel +__OS_dispatch_channel_vtable _OBJC_CLASS_$_OS_dispatch_mach __OS_dispatch_mach_vtable _OBJC_CLASS_$_OS_dispatch_mach_msg @@ -61,6 +65,10 @@ __OS_dispatch_disk_vtable # os_object_t classes _OBJC_CLASS_$_OS_object _OBJC_CLASS_$_OS_voucher +_OBJC_CLASS_$_OS_os_eventlink +_OBJC_CLASS_$_OS_os_workgroup +_OBJC_CLASS_$_OS_os_workgroup_interval +_OBJC_CLASS_$_OS_os_workgroup_parallel #_OBJC_CLASS_$_OS_voucher_recipe # non-os_object_t classes _OBJC_CLASS_$_OS_dispatch_data @@ -73,6 +81,7 @@ _OBJC_METACLASS_$_OS_dispatch_queue _OBJC_METACLASS_$_OS_dispatch_workloop _OBJC_METACLASS_$_OS_dispatch_queue_serial _OBJC_METACLASS_$_OS_dispatch_queue_concurrent +_OBJC_METACLASS_$_OS_dispatch_queue_cooperative _OBJC_METACLASS_$_OS_dispatch_queue_global _OBJC_METACLASS_$_OS_dispatch_queue_pthread_root _OBJC_METACLASS_$_OS_dispatch_queue_main @@ -80,6 +89,7 @@ _OBJC_METACLASS_$_OS_dispatch_queue_runloop _OBJC_METACLASS_$_OS_dispatch_queue_mgr _OBJC_METACLASS_$_OS_dispatch_queue_attr _OBJC_METACLASS_$_OS_dispatch_source +_OBJC_METACLASS_$_OS_dispatch_channel _OBJC_METACLASS_$_OS_dispatch_mach _OBJC_METACLASS_$_OS_dispatch_mach_msg _OBJC_METACLASS_$_OS_dispatch_io @@ -87,6 +97,10 @@ _OBJC_METACLASS_$_OS_dispatch_operation _OBJC_METACLASS_$_OS_dispatch_disk _OBJC_METACLASS_$_OS_object _OBJC_METACLASS_$_OS_voucher +_OBJC_METACLASS_$_OS_os_eventlink +_OBJC_METACLASS_$_OS_os_workgroup +_OBJC_METACLASS_$_OS_os_workgroup_interval +_OBJC_METACLASS_$_OS_os_workgroup_parallel #_OBJC_METACLASS_$_OS_voucher_recipe _OBJC_METACLASS_$_OS_dispatch_data _OBJC_METACLASS_$_OS_dispatch_data_empty diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index f473b8ffb..48f35f27a 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -18,86 +18,6 @@ // @APPLE_APACHE_LICENSE_HEADER_END@ // -#include "/Makefiles/CoreOS/Xcode/BSD.xcconfig" -#include "/AppleInternal/XcodeConfig/PlatformSupport.xcconfig" - -SDKROOT = macosx.internal -SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator -PRODUCT_NAME = libdispatch -EXECUTABLE_PREFIX = -INSTALL_PATH = /usr/lib/system -PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch -PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch -OS_PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os -OS_PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os -HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/src -LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system $(SDKROOT)/usr/local/lib -INSTALLHDRS_SCRIPT_PHASE = YES -ALWAYS_SEARCH_USER_PATHS = NO -USE_HEADERMAP = NO -BUILD_VARIANTS = normal debug profile -ONLY_ACTIVE_ARCH = NO -CLANG_LINK_OBJC_RUNTIME = NO -GCC_C_LANGUAGE_STANDARD = gnu11 -CLANG_CXX_LANGUAGE_STANDARD = gnu++11 -ENABLE_STRICT_OBJC_MSGSEND = YES -GCC_ENABLE_CPP_EXCEPTIONS = NO -GCC_STRICT_ALIASING = YES -GCC_SYMBOLS_PRIVATE_EXTERN = YES -GCC_ENABLE_PASCAL_STRINGS = NO -GCC_WARN_SHADOW = YES -GCC_WARN_64_TO_32_BIT_CONVERSION = YES -GCC_WARN_ABOUT_RETURN_TYPE = YES -GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES -GCC_WARN_ABOUT_MISSING_NEWLINE = YES -GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS = YES -GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES -GCC_WARN_SIGN_COMPARE = YES -GCC_WARN_STRICT_SELECTOR_MATCH = YES -GCC_WARN_UNDECLARED_SELECTOR = YES -GCC_WARN_UNINITIALIZED_AUTOS = YES -GCC_WARN_UNKNOWN_PRAGMAS = YES -GCC_WARN_UNUSED_FUNCTION = YES -GCC_WARN_UNUSED_LABEL = YES -GCC_WARN_UNUSED_PARAMETER = YES -GCC_WARN_UNUSED_VARIABLE = YES -CLANG_WARN_ASSIGN_ENUM = YES -CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES -CLANG_WARN_DOCUMENTATION_COMMENTS = YES -CLANG_WARN__DUPLICATE_METHOD_MATCH = YES -CLANG_WARN_EMPTY_BODY = YES -CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES -CLANG_WARN_INFINITE_RECURSION = YES -CLANG_WARN_OBJC_IMPLICIT_ATOMIC_PROPERTIES = YES -CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS = YES -CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES -CLANG_WARN_SUSPICIOUS_MOVE = YES -CLANG_WARN_UNREACHABLE_CODE = YES -CLANG_WARN_UNGUARDED_AVAILABILITY = YES -GCC_TREAT_WARNINGS_AS_ERRORS = YES -GCC_OPTIMIZATION_LEVEL = s -GCC_NO_COMMON_BLOCKS = YES -GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 -STATICLIB_PREPROCESSOR_DEFINITIONS = DISPATCH_VARIANT_STATIC=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 -WARNING_CFLAGS = -Wall -Wextra -Warray-bounds-pointer-arithmetic -Watomic-properties -Wcomma -Wconditional-uninitialized -Wcovered-switch-default -Wdate-time -Wdeprecated -Wdouble-promotion -Wduplicate-enum -Wexpansion-to-defined -Wfloat-equal -Widiomatic-parentheses -Wignored-qualifiers -Wnullable-to-nonnull-conversion -Wobjc-interface-ivars -Wover-aligned -Wpacked -Wpointer-arith -Wselector -Wstatic-in-inline -Wsuper-class-method-mismatch -Wswitch-enum -Wtautological-compare -Wunused -Wno-unknown-warning-option $(NO_WARNING_CFLAGS) -NO_WARNING_CFLAGS = -Wno-pedantic -Wno-bad-function-cast -Wno-c++-compat -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-cast-align -Wno-cast-qual -Wno-disabled-macro-expansion -Wno-documentation-unknown-command -Wno-format-nonliteral -Wno-missing-variable-declarations -Wno-old-style-cast -Wno-padded -Wno-reserved-id-macro -Wno-shift-sign-overflow -Wno-undef -Wno-unreachable-code-aggressive -Wno-unused-macros -Wno-used-but-marked-unused -Wno-vla -Wno-unguarded-availability-new -OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(PLATFORM_CFLAGS) -OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions -OTHER_CFLAGS_normal = -momit-leaf-frame-pointer -OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 -DDISPATCH_PERF_MON=1 -OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 -DOS_DEBUG=1 -GENERATE_PROFILING_CODE = NO -DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) -SIM_SUFFIX[sdk=*simulator*] = _sim -DYLIB_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem$(SIM_SUFFIX)_kernel -lsystem$(SIM_SUFFIX)_platform -lsystem$(SIM_SUFFIX)_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind -OBJC_LDFLAGS = -Wl,-upward-lobjc -LIBDARWIN_LDFLAGS = -Wl,-upward-lsystem_darwin -LIBDARWIN_LDFLAGS[sdk=*simulator*] = -ORDER_LDFLAGS = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-dirty_data_list,$(SRCROOT)/xcodeconfig/libdispatch.dirty -ORDER_LDFLAGS[sdk=macosx*] = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -ALIASES_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(LIBDARWIN_LDFLAGS) $(DYLIB_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) $(ORDER_LDFLAGS) -OTHER_MIGFLAGS = -novouchers COPY_HEADERS_RUN_UNIFDEF = YES COPY_HEADERS_UNIFDEF_FLAGS = -U__DISPATCH_BUILDING_DISPATCH__ -U__linux__ -DTARGET_OS_WIN32=0 -U__ANDROID__ diff --git a/xcodeconfig/libfirehose_kernel.xcconfig b/xcodeconfig/libfirehose_kernel.xcconfig index e6d83a3aa..b21315812 100644 --- a/xcodeconfig/libfirehose_kernel.xcconfig +++ b/xcodeconfig/libfirehose_kernel.xcconfig @@ -21,7 +21,7 @@ SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos PRODUCT_NAME = $(TARGET_NAME) INSTALL_PATH = /usr/local/lib/kernel/ -GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) KERNEL=1 DISPATCH_USE_DTRACE=0 +GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) KERNEL=1 DISPATCH_USE_DTRACE=0 OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY=1 OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY=1 OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY=0 OTHER_MIGFLAGS = -novouchers OTHER_LDFLAGS = OTHER_CFLAGS = -mkernel -nostdinc -Wno-packed diff --git a/xcodescripts/check-order.sh b/xcodescripts/check-order.sh index 60cb9ebff..3801df0ee 100644 --- a/xcodescripts/check-order.sh +++ b/xcodescripts/check-order.sh @@ -23,12 +23,16 @@ test "$ACTION" = install || exit 0 list_objc_syms () { - nm -arch $1 -nU ${DSTROOT}/usr/lib/system/libdispatch.dylib | grep _OBJC | cut -d' ' -f3 + nm -arch $1 -jnU ${DSTROOT}/usr/lib/system/libdispatch.dylib | grep -E '^_OBJC_(CLASS|METACLASS)_\$' } list_mutable_data_syms () { - nm -arch $1 -m ${DSTROOT}/usr/lib/system/libdispatch.dylib |grep __DATA|egrep -v '(__const|__crash_info)'|sed 's/^.* //' + nm -arch $1 -m ${DSTROOT}/usr/lib/system/libdispatch.dylib | awk ' + /__DATA.* _OBJC_(CLASS|METACLASS)_\$/{ print $NF; next } + /__const|__crash_info| _OBJC| __OBJC/{ next } + /__DATA/{ print $NF } + ' } list_objc_order () diff --git a/xcodescripts/install-headers.sh b/xcodescripts/install-headers.sh index 1fb149b63..212bf74ab 100755 --- a/xcodescripts/install-headers.sh +++ b/xcodescripts/install-headers.sh @@ -25,8 +25,21 @@ fi mkdir -p "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" || true mkdir -p "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" || true + cp -X "${SCRIPT_INPUT_FILE_1}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_2}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_3}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_4}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_5}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_2}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_3}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_4}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_5}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_6}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_7}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" + + +cp -X "${SCRIPT_INPUT_FILE_8}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_9}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_10}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_11}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_12}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_13}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_14}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_15}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" diff --git a/xcodescripts/mig-headers.sh b/xcodescripts/mig-headers.sh index bd477c027..e2aff4c59 100755 --- a/xcodescripts/mig-headers.sh +++ b/xcodescripts/mig-headers.sh @@ -19,6 +19,7 @@ # @APPLE_APACHE_LICENSE_HEADER_END@ # + export MIGCC="$(xcrun -find cc)" export MIGCOM="$(xcrun -find migcom)" export PATH="${PLATFORM_DEVELOPER_BIN_DIR}:${DEVELOPER_BIN_DIR}:${PATH}" diff --git a/xcodescripts/postprocess-headers.sh b/xcodescripts/postprocess-headers.sh index 41f466939..c521fbe96 100755 --- a/xcodescripts/postprocess-headers.sh +++ b/xcodescripts/postprocess-headers.sh @@ -19,3 +19,8 @@ # @APPLE_APACHE_LICENSE_HEADER_END@ # + +unifdef ${COPY_HEADERS_UNIFDEF_FLAGS} -o "${SCRIPT_INPUT_FILE_1}" "${SCRIPT_INPUT_FILE_1}" || true +unifdef ${COPY_HEADERS_UNIFDEF_FLAGS} -o "${SCRIPT_INPUT_FILE_2}" "${SCRIPT_INPUT_FILE_2}" || true +unifdef ${COPY_HEADERS_UNIFDEF_FLAGS} -o "${SCRIPT_INPUT_FILE_3}" "${SCRIPT_INPUT_FILE_3}" || true +unifdef ${COPY_HEADERS_UNIFDEF_FLAGS} -o "${SCRIPT_INPUT_FILE_4}" "${SCRIPT_INPUT_FILE_4}" || true