From 68c35538cb91f5a816cadc24af1dc3feb5f71dc8 Mon Sep 17 00:00:00 2001 From: Rokhini Prabhu Date: Fri, 27 Aug 2021 09:38:39 -0700 Subject: [PATCH 001/249] Merge libdispatch-1186 Signed-off-by: Rokhini Prabhu --- cmake/modules/DispatchCompilerWarnings.cmake | 2 +- dispatch/base.h | 37 +- dispatch/block.h | 2 +- dispatch/dispatch.h | 3 +- dispatch/object.h | 58 ++ dispatch/queue.h | 184 ++++- dispatch/source.h | 13 +- dispatch/workloop.h | 140 ++++ libdispatch.xcodeproj/project.pbxproj | 471 +++++++---- man/dispatch_semaphore_create.3 | 12 +- os/firehose_server_private.h | 1 + os/object.h | 7 +- os/object_private.h | 27 +- private/channel_private.h | 567 +++++++++++++ private/mach_private.h | 95 ++- private/private.h | 7 +- private/queue_private.h | 52 +- private/source_private.h | 18 + private/workloop_private.h | 255 +----- src/allocator_internal.h | 2 +- src/block.cpp | 2 +- src/data.m | 6 +- src/event/event.c | 10 +- src/event/event_config.h | 10 +- src/event/event_internal.h | 3 +- src/event/event_kevent.c | 137 ++-- src/firehose/firehose.defs | 1 + src/firehose/firehose_buffer.c | 85 +- src/firehose/firehose_inline_internal.h | 55 +- src/firehose/firehose_reply.defs | 1 + src/firehose/firehose_server.c | 71 +- src/firehose/firehose_server_internal.h | 2 - src/firehose/firehose_server_object.m | 13 +- src/firehose/firehose_types.defs | 1 + src/init.c | 45 +- src/inline_internal.h | 63 +- src/internal.h | 36 +- src/introspection.c | 7 +- src/io.c | 28 +- src/mach.c | 316 ++++++-- src/mach_internal.h | 2 +- src/object.c | 66 +- src/object.m | 76 +- src/object_internal.h | 21 +- src/protocol.defs | 3 + src/queue.c | 786 ++++++++++++++++--- src/queue_internal.h | 48 +- src/semaphore.c | 6 +- src/shims.h | 2 - src/shims/atomic.h | 107 ++- src/shims/lock.c | 4 +- src/shims/lock.h | 8 +- src/shims/target.h | 2 +- src/source.c | 86 +- src/source_internal.h | 26 +- src/voucher.c | 23 +- src/voucher_internal.h | 2 +- xcodeconfig/libdispatch-dyld-stub.xcconfig | 28 - xcodeconfig/libdispatch.aliases | 4 + xcodeconfig/libdispatch.clean | 1 + xcodeconfig/libdispatch.dirty | 3 + xcodeconfig/libdispatch.order | 3 + xcodeconfig/libdispatch.xcconfig | 106 ++- xcodeconfig/libfirehose_kernel.xcconfig | 2 +- xcodescripts/check-order.sh | 8 +- xcodescripts/mig-headers.sh | 1 + 66 files changed, 3105 insertions(+), 1164 deletions(-) create mode 100644 dispatch/workloop.h create mode 100644 private/channel_private.h delete mode 100644 xcodeconfig/libdispatch-dyld-stub.xcconfig diff --git a/cmake/modules/DispatchCompilerWarnings.cmake b/cmake/modules/DispatchCompilerWarnings.cmake index a7b31c818..d568c721a 100644 --- a/cmake/modules/DispatchCompilerWarnings.cmake +++ b/cmake/modules/DispatchCompilerWarnings.cmake @@ -38,7 +38,7 @@ else() add_compile_options(-Wsign-conversion) add_compile_options(-Wstatic-in-inline) add_compile_options(-Wsuper-class-method-mismatch) - add_compile_options(-Wswitch-enum) + add_compile_options(-Wswitch) add_compile_options(-Wunguarded-availability) add_compile_options(-Wunreachable-code) add_compile_options(-Wunused) diff --git a/dispatch/base.h b/dispatch/base.h index 0c8540acb..e6c71b0e0 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -237,13 +237,35 @@ #endif #endif -#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) || defined(_WIN32) +#if __has_attribute(enum_extensibility) +#define __DISPATCH_ENUM_ATTR __attribute__((__enum_extensibility__(open))) +#define __DISPATCH_ENUM_ATTR_CLOSED __attribute__((__enum_extensibility__(closed))) +#else +#define __DISPATCH_ENUM_ATTR +#define __DISPATCH_ENUM_ATTR_CLOSED +#endif // __has_attribute(enum_extensibility) + +#if __has_attribute(flag_enum) +#define __DISPATCH_OPTIONS_ATTR __attribute__((__flag_enum__)) +#else +#define __DISPATCH_OPTIONS_ATTR +#endif // __has_attribute(flag_enum) + + +#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) || \ + __has_extension(cxx_fixed_enum) || defined(_WIN32) #define DISPATCH_ENUM(name, type, ...) \ - typedef enum : type { __VA_ARGS__ } name##_t + typedef enum : type { __VA_ARGS__ } __DISPATCH_ENUM_ATTR name##_t +#define DISPATCH_OPTIONS(name, type, ...) \ + typedef enum : type { __VA_ARGS__ } __DISPATCH_OPTIONS_ATTR __DISPATCH_ENUM_ATTR name##_t #else #define DISPATCH_ENUM(name, type, ...) \ - enum { __VA_ARGS__ }; typedef type name##_t -#endif + enum { __VA_ARGS__ } __DISPATCH_ENUM_ATTR; typedef type name##_t +#define DISPATCH_OPTIONS(name, type, ...) \ + enum { __VA_ARGS__ } __DISPATCH_OPTIONS_ATTR __DISPATCH_ENUM_ATTR; typedef type name##_t +#endif // __has_feature(objc_fixed_enum) ... + + #if __has_feature(enumerator_attributes) #define DISPATCH_ENUM_API_AVAILABLE(...) API_AVAILABLE(__VA_ARGS__) @@ -256,12 +278,11 @@ #define DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT(...) #endif -#if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \ - SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#ifdef __swift__ #define DISPATCH_SWIFT3_OVERLAY 1 -#else +#else // __swift__ #define DISPATCH_SWIFT3_OVERLAY 0 -#endif // SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#endif // __swift__ #if __has_feature(attribute_availability_swift) #define DISPATCH_SWIFT_UNAVAILABLE(_msg) \ diff --git a/dispatch/block.h b/dispatch/block.h index d60cb2c18..4d6f5b548 100644 --- a/dispatch/block.h +++ b/dispatch/block.h @@ -100,7 +100,7 @@ __BEGIN_DECLS * for synchronous execution or when the dispatch block object is invoked * directly. */ -DISPATCH_ENUM(dispatch_block_flags, unsigned long, +DISPATCH_OPTIONS(dispatch_block_flags, unsigned long, DISPATCH_BLOCK_BARRIER DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x1, DISPATCH_BLOCK_DETACHED diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index 8f3292c32..cbc39ede6 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -50,7 +50,7 @@ #endif #endif -#define DISPATCH_API_VERSION 20180109 +#define DISPATCH_API_VERSION 20181008 #ifndef __DISPATCH_BUILDING_DISPATCH__ #ifndef __DISPATCH_INDIRECT__ @@ -69,6 +69,7 @@ #include #include #include +#include #undef __DISPATCH_INDIRECT__ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ diff --git a/dispatch/object.h b/dispatch/object.h index 17167bd72..024a3c2a8 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -26,6 +26,10 @@ #include // for HeaderDoc #endif +#if __has_include() +#include +#endif + DISPATCH_ASSUME_NONNULL_BEGIN /*! @@ -95,6 +99,7 @@ typedef union { struct dispatch_queue_attr_s *_dqa; struct dispatch_group_s *_dg; struct dispatch_source_s *_ds; + struct dispatch_channel_s *_dch; struct dispatch_mach_s *_dm; struct dispatch_mach_msg_s *_dmsg; struct dispatch_semaphore_s *_dsema; @@ -178,6 +183,16 @@ typedef void (^dispatch_block_t)(void); __BEGIN_DECLS +/*! + * @typedef dispatch_qos_class_t + * Alias for qos_class_t type. + */ +#if __has_include() +typedef qos_class_t dispatch_qos_class_t; +#else +typedef unsigned int dispatch_qos_class_t; +#endif + /*! * @function dispatch_retain * @@ -374,6 +389,49 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_resume(dispatch_object_t object); +/*! + * @function dispatch_set_qos_class_floor + * + * @abstract + * Sets the QOS class floor on a dispatch queue, source or workloop. + * + * @discussion + * The QOS class of workitems submitted to this object asynchronously will be + * elevated to at least the specified QOS class floor. The QOS of the workitem + * will be used if higher than the floor even when the workitem has been created + * without "ENFORCE" semantics. + * + * Setting the QOS class floor is equivalent to the QOS effects of configuring + * a queue whose target queue has a QoS class set to the same value. + * + * @param object + * A dispatch queue, workloop, or source to configure. + * The object must be inactive. + * + * Passing another object type or an object that has been activated is undefined + * and will cause the process to be terminated. + * + * @param qos_class + * A QOS class value: + * - QOS_CLASS_USER_INTERACTIVE + * - QOS_CLASS_USER_INITIATED + * - QOS_CLASS_DEFAULT + * - QOS_CLASS_UTILITY + * - QOS_CLASS_BACKGROUND + * Passing any other value is undefined. + * + * @param relative_priority + * A relative priority within the QOS class. This value is a negative + * offset from the maximum supported scheduler priority for the given class. + * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY + * is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_set_qos_class_floor(dispatch_object_t object, + dispatch_qos_class_t qos_class, int relative_priority); + #ifdef __BLOCKS__ /*! * @function dispatch_wait diff --git a/dispatch/queue.h b/dispatch/queue.h index 7c4a0f49d..ddace0659 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -26,10 +26,6 @@ #include // for HeaderDoc #endif -#if __has_include() -#include -#endif - DISPATCH_ASSUME_NONNULL_BEGIN /*! @@ -336,6 +332,102 @@ void dispatch_sync_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); +/*! + * @function dispatch_async_and_wait + * + * @abstract + * Submits a block for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a workitem to a dispatch queue like dispatch_async(), however + * dispatch_async_and_wait() will not return until the workitem has finished. + * + * Like functions of the dispatch_sync family, dispatch_async_and_wait() is + * subject to dead-lock (See dispatch_sync() for details). + * + * However, dispatch_async_and_wait() differs from functions of the + * dispatch_sync family in two fundamental ways: how it respects queue + * attributes and how it chooses the execution context invoking the workitem. + * + * Differences with dispatch_sync() + * + * Work items submitted to a queue with dispatch_async_and_wait() observe all + * queue attributes of that queue when invoked (inluding autorelease frequency + * or QOS class). + * + * When the runtime has brought up a thread to invoke the asynchronous workitems + * already submitted to the specified queue, that servicing thread will also be + * used to execute synchronous work submitted to the queue with + * dispatch_async_and_wait(). + * + * However, if the runtime has not brought up a thread to service the specified + * queue (because it has no workitems enqueued, or only synchronous workitems), + * then dispatch_async_and_wait() will invoke the workitem on the calling thread, + * similar to the behaviour of functions in the dispatch_sync family. + * + * As an exception, if the queue the work is submitted to doesn't target + * a global concurrent queue (for example because it targets the main queue), + * then the workitem will never be invoked by the thread calling + * dispatch_async_and_wait(). + * + * In other words, dispatch_async_and_wait() is similar to submitting + * a dispatch_block_create()d workitem to a queue and then waiting on it, as + * shown in the code example below. However, dispatch_async_and_wait() is + * significantly more efficient when a new thread is not required to execute + * the workitem (as it will use the stack of the submitting thread instead of + * requiring heap allocations). + * + * + * dispatch_block_t b = dispatch_block_create(0, block); + * dispatch_async(queue, b); + * dispatch_block_wait(b, DISPATCH_TIME_FOREVER); + * Block_release(b); + * + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block to be invoked on the target dispatch queue. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_async_and_wait(dispatch_queue_t queue, + DISPATCH_NOESCAPE dispatch_block_t block); +#endif + +/*! + * @function dispatch_async_and_wait_f + * + * @abstract + * Submits a function for synchronous execution on a dispatch queue. + * + * @discussion + * See dispatch_async_and_wait() for details. + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_async_and_wait_f(). + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_async_and_wait_f(dispatch_queue_t queue, + void *_Nullable context, dispatch_function_t work); + #if defined(__APPLE__) && \ (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && \ @@ -549,16 +641,6 @@ dispatch_get_main_queue(void) typedef long dispatch_queue_priority_t; -/*! - * @typedef dispatch_qos_class_t - * Alias for qos_class_t type. - */ -#if __has_include() -typedef qos_class_t dispatch_qos_class_t; -#else -typedef unsigned int dispatch_qos_class_t; -#endif - /*! * @function dispatch_get_global_queue * @@ -1214,7 +1296,8 @@ dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, * Submits a block to a dispatch queue like dispatch_async(), but marks that * block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT queues). * - * See dispatch_async() for details. + * See dispatch_async() for details and "Dispatch Barrier API" for a description + * of the barrier semantics. * * @param queue * The target dispatch queue to which the block is submitted. @@ -1245,7 +1328,8 @@ dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block); * that function as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT * queues). * - * See dispatch_async_f() for details. + * See dispatch_async_f() for details and "Dispatch Barrier API" for a + * description of the barrier semantics. * * @param queue * The target dispatch queue to which the function is submitted. @@ -1278,7 +1362,8 @@ dispatch_barrier_async_f(dispatch_queue_t queue, * Submits a block to a dispatch queue like dispatch_sync(), but marks that * block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT queues). * - * See dispatch_sync() for details. + * See dispatch_sync() for details and "Dispatch Barrier API" for a description + * of the barrier semantics. * * @param queue * The target dispatch queue to which the block is submitted. @@ -1327,6 +1412,67 @@ void dispatch_barrier_sync_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); +/*! + * @function dispatch_barrier_async_and_wait + * + * @abstract + * Submits a block for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a block to a dispatch queue like dispatch_async_and_wait(), but marks + * that block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT + * queues). + * + * See "Dispatch Barrier API" for a description of the barrier semantics. + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param work + * The application-defined block to invoke on the target queue. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_barrier_async_and_wait(dispatch_queue_t queue, + DISPATCH_NOESCAPE dispatch_block_t block); +#endif + +/*! + * @function dispatch_barrier_async_and_wait_f + * + * @abstract + * Submits a function for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a function to a dispatch queue like dispatch_async_and_wait_f(), but + * marks that function as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT + * queues). + * + * See "Dispatch Barrier API" for a description of the barrier semantics. + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_barrier_async_and_wait_f(). + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_barrier_async_and_wait_f(dispatch_queue_t queue, + void *_Nullable context, dispatch_function_t work); + /*! * @functiongroup Dispatch queue-specific contexts * This API allows different subsystems to associate context to a shared queue @@ -1511,9 +1657,9 @@ dispatch_assert_queue_barrier(dispatch_queue_t queue); * Verifies that the current block is not executing on a given dispatch queue. * * @discussion - * This function is the equivalent of dispatch_queue_assert() with the test for + * This function is the equivalent of dispatch_assert_queue() with the test for * equality inverted. That means that it will terminate the application when - * dispatch_queue_assert() would return, and vice-versa. See discussion there. + * dispatch_assert_queue() would return, and vice-versa. See discussion there. * * The variant dispatch_assert_queue_not_debug() is compiled out when the * preprocessor macro NDEBUG is defined. (See also assert(3)). diff --git a/dispatch/source.h b/dispatch/source.h index 3289076de..40453fa3e 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -123,7 +123,8 @@ DISPATCH_SOURCE_TYPE_DECL(mach_send); * @const DISPATCH_SOURCE_TYPE_MACH_RECV * @discussion A dispatch source that monitors a Mach port for pending messages. * The handle is a Mach port with a receive right (mach_port_t). - * The mask is unused (pass zero for now). + * The mask is a mask of desired events from dispatch_source_mach_recv_flags_t, + * but no flags are currently defined (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv) API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() @@ -218,6 +219,12 @@ DISPATCH_SOURCE_TYPE_DECL(write); typedef unsigned long dispatch_source_mach_send_flags_t; +/*! + * @typedef dispatch_source_mach_recv_flags_t + * Type of dispatch_source_mach_recv flags + */ +typedef unsigned long dispatch_source_mach_recv_flags_t; + /*! * @typedef dispatch_source_memorypressure_flags_t * Type of dispatch_source_memorypressure flags @@ -582,7 +589,7 @@ dispatch_source_get_handle(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_DATA_OR: n/a * DISPATCH_SOURCE_TYPE_DATA_REPLACE: n/a * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t - * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a + * DISPATCH_SOURCE_TYPE_MACH_RECV: dispatch_source_mach_recv_flags_t * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE dispatch_source_memorypressure_flags_t * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_flags_t * DISPATCH_SOURCE_TYPE_READ: n/a @@ -619,7 +626,7 @@ dispatch_source_get_mask(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_DATA_OR: application defined data * DISPATCH_SOURCE_TYPE_DATA_REPLACE: application defined data * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t - * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a + * DISPATCH_SOURCE_TYPE_MACH_RECV: dispatch_source_mach_recv_flags_t * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE dispatch_source_memorypressure_flags_t * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_flags_t * DISPATCH_SOURCE_TYPE_READ: estimated bytes available to read diff --git a/dispatch/workloop.h b/dispatch/workloop.h new file mode 100644 index 000000000..2c6cf18c5 --- /dev/null +++ b/dispatch/workloop.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2017-2019 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_WORKLOOP__ +#define __DISPATCH_WORKLOOP__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +DISPATCH_ASSUME_NONNULL_BEGIN + +__BEGIN_DECLS + +/*! + * @typedef dispatch_workloop_t + * + * @abstract + * Dispatch workloops invoke workitems submitted to them in priority order. + * + * @discussion + * A dispatch workloop is a flavor of dispatch_queue_t that is a priority + * ordered queue (using the QOS class of the submitted workitems as the + * ordering). + * + * Between each workitem invocation, the workloop will evaluate whether higher + * priority workitems have since been submitted, either directly to the + * workloop or to any queues that target the workloop, and execute these first. + * + * Serial queues targeting a workloop maintain FIFO execution of their + * workitems. However, the workloop may reorder workitems submitted to + * independent serial queues targeting it with respect to each other, + * based on their priorities, while preserving FIFO execution with respect to + * each serial queue. + * + * A dispatch workloop is a "subclass" of dispatch_queue_t which can be passed + * to all APIs accepting a dispatch queue, except for functions from the + * dispatch_sync() family. dispatch_async_and_wait() must be used for workloop + * objects. Functions from the dispatch_sync() family on queues targeting + * a workloop are still permitted but discouraged for performance reasons. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct dispatch_workloop_s *dispatch_workloop_t; +#else +DISPATCH_DECL_SUBCLASS(dispatch_workloop, dispatch_queue); +#endif + +/*! + * @function dispatch_workloop_create + * + * @abstract + * Creates a new dispatch workloop to which workitems may be submitted. + * + * @param label + * A string label to attach to the workloop. + * + * @result + * The newly created dispatch workloop. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_workloop_t +dispatch_workloop_create(const char *_Nullable label); + +/*! + * @function dispatch_workloop_create_inactive + * + * @abstract + * Creates a new inactive dispatch workloop that can be setup and then + * activated. + * + * @discussion + * Creating an inactive workloop allows for it to receive further configuration + * before it is activated, and workitems can be submitted to it. + * + * Submitting workitems to an inactive workloop is undefined and will cause the + * process to be terminated. + * + * @param label + * A string label to attach to the workloop. + * + * @result + * The newly created dispatch workloop. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_workloop_t +dispatch_workloop_create_inactive(const char *_Nullable label); + +/*! + * @function dispatch_workloop_set_autorelease_frequency + * + * @abstract + * Sets the autorelease frequency of the workloop. + * + * @discussion + * See dispatch_queue_attr_make_with_autorelease_frequency(). + * The default policy for a workloop is + * DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM. + * + * @param workloop + * The dispatch workloop to modify. + * + * This workloop must be inactive, passing an activated object is undefined + * and will cause the process to be terminated. + * + * @param frequency + * The requested autorelease frequency. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t workloop, + dispatch_autorelease_frequency_t frequency); + +__END_DECLS + +DISPATCH_ASSUME_NONNULL_END + +#endif diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index af2ed2cc2..68d920082 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -122,14 +122,12 @@ 6E4BACC31D48A42100B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACC51D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACC71D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; - 6E4BACC81D48A42400B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACCA1D48A89500B562AE /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; 6E4BACF51D49A04600B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACF61D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACF71D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACF91D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACFB1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; - 6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E5662E11F8C2E3E00BC2474 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; 6E5662E21F8C2E4F00BC2474 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; 6E5662E31F8C2E5100BC2474 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; @@ -155,7 +153,6 @@ 6E9C6CAA20F9848D00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; 6E9C6CAB20F9848E00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; 6E9C6CAC20F9848E00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; - 6E9C6CAD20F9848F00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; 6EA283D71CAB93920041B2E0 /* libdispatch.codes in Copy Trace Definitions */ = {isa = PBXBuildFile; fileRef = 6EA283D01CAB93270041B2E0 /* libdispatch.codes */; }; 6EA793891D458A5800929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; 6EA7938E1D458A5C00929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; @@ -165,16 +162,16 @@ 6EA962991D48622800759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629B1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629D1D48622B00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; - 6EA9629E1D48622C00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629F1D48625000759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A01D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A11D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A31D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A51D48625400759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; - 6EA962A61D48625500759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; 6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6EC8DC271E3E84610044B652 /* channel_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EC8DC261E3E84610044B652 /* channel_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 6EC8DC281E3E847A0044B652 /* channel_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EC8DC261E3E84610044B652 /* channel_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; 6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; 6ED64B431BBD898600C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; @@ -204,7 +201,6 @@ 6EF2CAAE1C8899EA001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; - 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; 6EF2CAB51C889D67001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; @@ -226,28 +222,10 @@ 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + B609581E221DFA2A00F39D1F /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; + B609581F221DFA4B00F39D1F /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; B683588F1FA77F5A00AA0D58 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; B68358901FA77F5B00AA0D58 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; - C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; - C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; - C00B0DF41C5AEBBE000330B3 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; - C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; - C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; - C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; - C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; - C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; - C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; - C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; - C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; - C00B0DFD1C5AEBBE000330B3 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; - C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; - C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; - C00B0E001C5AEBBE000330B3 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; - C00B0E011C5AEBBE000330B3 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; - C00B0E021C5AEBBE000330B3 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; - C00B0E031C5AEBBE000330B3 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; - C00B0E041C5AEBBE000330B3 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; - C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; C01866A61C5973210040FC07 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; C01866A71C5973210040FC07 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; C01866A81C5973210040FC07 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; @@ -290,6 +268,99 @@ E43A72841AF85BCB00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; E43A72851AF85BCC00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; E43A72871AF85BCD00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + E43B88322241F19000215272 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED960E8361E600161930 /* dispatch.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88332241F19000215272 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88342241F19000215272 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = 961B994F0F3E85C30006BC96 /* object.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88352241F19000215272 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; + E43B88362241F19000215272 /* channel_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EC8DC261E3E84610044B652 /* channel_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88372241F19000215272 /* queue.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8B0E8361E600161930 /* queue.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88382241F19000215272 /* source.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8D0E8361E600161930 /* source.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88392241F19000215272 /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; + E43B883A2241F19000215272 /* voucher_activity_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */; }; + E43B883B2241F19000215272 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B883C2241F19000215272 /* group.h in Headers */ = {isa = PBXBuildFile; fileRef = FC5C9C1D0EADABE3006E462D /* group.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B883D2241F19000215272 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; + E43B883E2241F19000215272 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B883F2241F19000215272 /* io.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C310D30CC7004407EA /* io.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88402241F19000215272 /* voucher_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E7418066276009FFDB6 /* voucher_internal.h */; }; + E43B88412241F19000215272 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C901445E1C73A7FE002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88422241F19000215272 /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; + E43B88432241F19000215272 /* data.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C510D30D0C004407EA /* data.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88442241F19000215272 /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + E43B88452241F19000215272 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = 96032E4C0F5CC8D100241C5F /* time.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88462241F19000215272 /* private.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED930E8361E600161930 /* private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88472241F19000215272 /* block.h in Headers */ = {isa = PBXBuildFile; fileRef = E4D76A9218E325D200B1F98B /* block.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88482241F19000215272 /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88492241F19000215272 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B884A2241F19000215272 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C90144641C73A845002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B884B2241F19000215272 /* source_private.h in Headers */ = {isa = PBXBuildFile; fileRef = FCEF047F0F5661960067401F /* source_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B884C2241F19000215272 /* target.h in Headers */ = {isa = PBXBuildFile; fileRef = F7DC045A2060BBBE00C90737 /* target.h */; }; + E43B884D2241F19000215272 /* benchmark.h in Headers */ = {isa = PBXBuildFile; fileRef = 961B99350F3E83980006BC96 /* benchmark.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B884E2241F19000215272 /* internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8F0E8361E600161930 /* internal.h */; settings = {ATTRIBUTES = (); }; }; + E43B884F2241F19000215272 /* workloop_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E70181C1F4EB51B0077C1DC /* workloop_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88502241F19000215272 /* object_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 965ECC200F3EAB71004DDD89 /* object_internal.h */; }; + E43B88512241F19000215272 /* queue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D950F3EA2170041FF5D /* queue_internal.h */; }; + E43B88522241F19000215272 /* source_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC0B34780FA2851C0080FFA0 /* source_internal.h */; }; + E43B88532241F19000215272 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; }; + E43B88542241F19000215272 /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; + E43B88552241F19000215272 /* voucher_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E711805C473009FFDB6 /* voucher_private.h */; }; + E43B88562241F19000215272 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; }; + E43B88572241F19000215272 /* tsd.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A4109923C7003403D5 /* tsd.h */; }; + E43B88582241F19000215272 /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; + E43B88592241F19000215272 /* atomic.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D820F3EA1020041FF5D /* atomic.h */; }; + E43B885A2241F19000215272 /* shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D830F3EA1020041FF5D /* shims.h */; }; + E43B885B2241F19000215272 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A3109923C7003403D5 /* time.h */; }; + E43B885C2241F19000215272 /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; + E43B885D2241F19000215272 /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; + E43B885E2241F19000215272 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; + E43B885F2241F19000215272 /* layout_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BE17C6318EA305E002CA4E8 /* layout_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88602241F19000215272 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; + E43B88612241F19000215272 /* config.h in Headers */ = {isa = PBXBuildFile; fileRef = FC9C70E7105EC9620074F9CA /* config.h */; }; + E43B88622241F19000215272 /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; + E43B88632241F19000215272 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; + E43B88642241F19000215272 /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; + E43B88652241F19000215272 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; + E43B88662241F19000215272 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; }; + E43B88672241F19000215272 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; + E43B88682241F19000215272 /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; + E43B88692241F19000215272 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; + E43B886A2241F19000215272 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B886B2241F19000215272 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; + E43B886C2241F19000215272 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; + E43B886D2241F19000215272 /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B886E2241F19000215272 /* mach_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4ECBAA415253C25002C313C /* mach_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B886F2241F19000215272 /* allocator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */; }; + E43B88702241F19000215272 /* introspection_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44F9DA816543F79001DCD38 /* introspection_internal.h */; }; + E43B88722241F19000215272 /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E43B88732241F19000215272 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + E43B88742241F19000215272 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + E43B88752241F19000215272 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + E43B88762241F19000215272 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + E43B88772241F19000215272 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E43B88782241F19000215272 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + E43B88792241F19000215272 /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; + E43B887A2241F19000215272 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + E43B887B2241F19000215272 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + E43B887C2241F19000215272 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + E43B887D2241F19000215272 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + E43B887E2241F19000215272 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + E43B887F2241F19000215272 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + E43B88802241F19000215272 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + E43B88812241F19000215272 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; + E43B88822241F19000215272 /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + E43B88832241F19000215272 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + E43B88842241F19000215272 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + E43B88852241F19000215272 /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + E43B88862241F19000215272 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + E43B88872241F19000215272 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + E43B88882241F19000215272 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E43B88892241F19000215272 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + E43B888A2241F19000215272 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E43B888B2241F19000215272 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + E43B888C2241F19000215272 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + E43B888D2241F19000215272 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + E43B888E2241F19000215272 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + E43B888F2241F19000215272 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; @@ -331,6 +402,7 @@ E4630251176162D200E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; E4630252176162D300E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; E4630253176162D400E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; + E4834144225D27F600954FC6 /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; E48AF55A16E70FD9004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E48AF55B16E72D44004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; @@ -366,7 +438,6 @@ E49BB6EC1E70748100868613 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; E49BB6ED1E70748100868613 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; E49BB7091E70A39700868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; - E49BB70A1E70A3B000868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F24AB125D57FA0057C971 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED960E8361E600161930 /* dispatch.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -563,13 +634,6 @@ remoteGlobalIDString = FCFA5A9F10D1AE050074F59A; remoteInfo = ddt; }; - C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; - proxyType = 1; - remoteGlobalIDString = C00B0DF01C5AEBBE000330B3; - remoteInfo = "libdispatch dyld stub"; - }; C01866C11C597AEA0040FC07 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -598,6 +662,20 @@ remoteGlobalIDString = D2AAC045055464E500DB518D; remoteInfo = libdispatch; }; + E43B882A2241F19000215272 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E4EC121612514715000DDBD1; + remoteInfo = "libdispatch mp resolved"; + }; + E43B882C2241F19000215272 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E49BB6CE1E70748100868613; + remoteInfo = "libdispatch armv81 resolved"; + }; E47D6ECC125FEBA10070D91C /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -713,6 +791,8 @@ 6EC670C61E37E201004F10D6 /* dispatch_network_event_thread.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_network_event_thread.c; sourceTree = ""; }; 6EC670C71E37E201004F10D6 /* perf_mach_async.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_mach_async.c; sourceTree = ""; }; 6EC670C81E37E201004F10D6 /* perf_pipepingpong.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_pipepingpong.c; sourceTree = ""; }; + 6EC8DBE61E3E832C0044B652 /* dispatch_channel.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_channel.c; sourceTree = ""; }; + 6EC8DC261E3E84610044B652 /* channel_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_private.h; sourceTree = ""; }; 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_kevent_cancel_races.c; sourceTree = ""; }; 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_private.h; sourceTree = ""; }; 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_state_machine.c; sourceTree = ""; }; @@ -755,6 +835,7 @@ 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = "queue-tip.xcodeproj"; path = "tools/queue-tip/queue-tip.xcodeproj"; sourceTree = ""; }; + B6095819221DFA2A00F39D1F /* workloop.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workloop.h; sourceTree = ""; }; B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_no_blocks.c; sourceTree = ""; }; B68330BC1EBCF6080003E71C /* dispatch_wl.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_wl.c; sourceTree = ""; }; B683588A1FA77F4900AA0D58 /* time_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = time_private.h; sourceTree = ""; }; @@ -765,8 +846,6 @@ B6AE9A561D7F53C100AC007F /* perf_async_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_async_bench.m; sourceTree = ""; }; B6AE9A581D7F53CB00AC007F /* perf_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_bench.m; sourceTree = ""; }; B6FA01801F0AD522004479BF /* dispatch_pthread_root_queue.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_pthread_root_queue.c; sourceTree = ""; }; - C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_dyld_stub.a; sourceTree = BUILT_PRODUCTS_DIR; }; - C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-dyld-stub.xcconfig"; sourceTree = ""; }; C01866BD1C5973210040FC07 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = "libdispatch-mp-static.xcconfig"; sourceTree = ""; }; C01866BF1C5976C90040FC07 /* run-on-install.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "run-on-install.sh"; sourceTree = ""; }; @@ -788,6 +867,7 @@ E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.aliases; sourceTree = ""; }; E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; explicitFileType = sourcecode.dtrace; fileEncoding = 4; path = provider.d; sourceTree = ""; }; E43A724F1AF85BBC00BAA921 /* block.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = block.cpp; sourceTree = ""; }; + E43B889A2241F19000215272 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = ""; }; E44757D917F4572600B82CA1 /* inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inline_internal.h; sourceTree = ""; }; E448727914C6215D00BB45C2 /* libdispatch.order */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.order; sourceTree = ""; }; @@ -935,9 +1015,9 @@ E4EC122D12514715000DDBD1 /* libdispatch_mp.a */, E49BB6F21E70748100868613 /* libdispatch_armv81.a */, C01866BD1C5973210040FC07 /* libdispatch.a */, - C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */, 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */, 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */, + E43B889A2241F19000215272 /* libdispatch.dylib */, ); name = Products; sourceTree = ""; @@ -1018,6 +1098,7 @@ 924D8EAA1C116B9F002AC2BC /* dispatch_c99.c */, 6E326AB11C224830002A6505 /* dispatch_cascade.c */, 6E67D8D91C16C94B00FC98AC /* dispatch_cf_main.c */, + 6EC8DBE61E3E832C0044B652 /* dispatch_channel.c */, 6E326ADE1C23451A002A6505 /* dispatch_concur.c */, 6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */, 6E8E4EC71C1A61680004F5CC /* dispatch_data.m */, @@ -1125,7 +1206,6 @@ E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */, E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */, C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */, - C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */, E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */, 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */, 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */, @@ -1230,6 +1310,7 @@ 721F5C5C0F15520500FF03A6 /* semaphore.h */, FC7BED8D0E8361E600161930 /* source.h */, 96032E4C0F5CC8D100241C5F /* time.h */, + B6095819221DFA2A00F39D1F /* workloop.h */, E421E5F81716ADA10090DC9B /* introspection.h */, ); name = "Dispatch Public Headers"; @@ -1244,6 +1325,7 @@ E48AF55916E70FD9004105FF /* io_private.h */, 96BC39BC0F3EBAB100C59689 /* queue_private.h */, 6E70181C1F4EB51B0077C1DC /* workloop_private.h */, + 6EC8DC261E3E84610044B652 /* channel_private.h */, FCEF047F0F5661960067401F /* source_private.h */, E4ECBAA415253C25002C313C /* mach_private.h */, B683588A1FA77F4900AA0D58 /* time_private.h */, @@ -1314,6 +1396,7 @@ 72CC94300ECCD8750031B751 /* base.h in Headers */, 961B99500F3E85C30006BC96 /* object.h in Headers */, E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */, + 6EC8DC271E3E84610044B652 /* channel_private.h in Headers */, FC7BED9A0E8361E600161930 /* queue.h in Headers */, FC7BED9C0E8361E600161930 /* source.h in Headers */, 6E9955581C3AF7710071D40C /* venture_private.h in Headers */, @@ -1338,6 +1421,7 @@ F7DC045B2060BBBE00C90737 /* target.h in Headers */, 961B99360F3E83980006BC96 /* benchmark.h in Headers */, FC7BED9E0E8361E600161930 /* internal.h in Headers */, + B609581E221DFA2A00F39D1F /* workloop.h in Headers */, 6E7018211F4EB51B0077C1DC /* workloop_private.h in Headers */, 965ECC210F3EAB71004DDD89 /* object_internal.h in Headers */, 96929D960F3EA2170041FF5D /* queue_internal.h in Headers */, @@ -1375,10 +1459,82 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + E43B88312241F19000215272 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + E43B88322241F19000215272 /* dispatch.h in Headers */, + E43B88332241F19000215272 /* base.h in Headers */, + E43B88342241F19000215272 /* object.h in Headers */, + E43B88352241F19000215272 /* inline_internal.h in Headers */, + E43B88362241F19000215272 /* channel_private.h in Headers */, + E43B88372241F19000215272 /* queue.h in Headers */, + E43B88382241F19000215272 /* source.h in Headers */, + E43B88392241F19000215272 /* venture_private.h in Headers */, + E43B883A2241F19000215272 /* voucher_activity_private.h in Headers */, + E43B883B2241F19000215272 /* semaphore.h in Headers */, + E43B883C2241F19000215272 /* group.h in Headers */, + E43B883D2241F19000215272 /* priority.h in Headers */, + E43B883E2241F19000215272 /* once.h in Headers */, + E43B883F2241F19000215272 /* io.h in Headers */, + E43B88402241F19000215272 /* voucher_internal.h in Headers */, + E43B88412241F19000215272 /* module.modulemap in Headers */, + E43B88422241F19000215272 /* atomic_sfb.h in Headers */, + E43B88432241F19000215272 /* data.h in Headers */, + E43B88442241F19000215272 /* firehose_internal.h in Headers */, + E43B88452241F19000215272 /* time.h in Headers */, + E43B88462241F19000215272 /* private.h in Headers */, + E43B88472241F19000215272 /* block.h in Headers */, + E43B88482241F19000215272 /* data_private.h in Headers */, + E43B88492241F19000215272 /* queue_private.h in Headers */, + E43B884A2241F19000215272 /* module.modulemap in Headers */, + E43B884B2241F19000215272 /* source_private.h in Headers */, + E43B884C2241F19000215272 /* target.h in Headers */, + E43B884D2241F19000215272 /* benchmark.h in Headers */, + E43B884E2241F19000215272 /* internal.h in Headers */, + E4834144225D27F600954FC6 /* workloop.h in Headers */, + E43B884F2241F19000215272 /* workloop_private.h in Headers */, + E43B88502241F19000215272 /* object_internal.h in Headers */, + E43B88512241F19000215272 /* queue_internal.h in Headers */, + E43B88522241F19000215272 /* source_internal.h in Headers */, + E43B88532241F19000215272 /* semaphore_internal.h in Headers */, + E43B88542241F19000215272 /* data_internal.h in Headers */, + E43B88552241F19000215272 /* voucher_private.h in Headers */, + E43B88562241F19000215272 /* io_internal.h in Headers */, + E43B88572241F19000215272 /* tsd.h in Headers */, + E43B88582241F19000215272 /* event_config.h in Headers */, + E43B88592241F19000215272 /* atomic.h in Headers */, + E43B885A2241F19000215272 /* shims.h in Headers */, + E43B885B2241F19000215272 /* time.h in Headers */, + E43B885C2241F19000215272 /* mach_internal.h in Headers */, + E43B885D2241F19000215272 /* firehose_buffer_internal.h in Headers */, + E43B885E2241F19000215272 /* yield.h in Headers */, + E43B885F2241F19000215272 /* layout_private.h in Headers */, + E43B88602241F19000215272 /* perfmon.h in Headers */, + E43B88612241F19000215272 /* config.h in Headers */, + E43B88622241F19000215272 /* venture_internal.h in Headers */, + E43B88632241F19000215272 /* lock.h in Headers */, + E43B88642241F19000215272 /* trace.h in Headers */, + E43B88652241F19000215272 /* getprogname.h in Headers */, + E43B88662241F19000215272 /* event_internal.h in Headers */, + E43B88672241F19000215272 /* firehose_inline_internal.h in Headers */, + E43B88682241F19000215272 /* hw_config.h in Headers */, + E43B88692241F19000215272 /* object_private.h in Headers */, + E43B886A2241F19000215272 /* time_private.h in Headers */, + E43B886B2241F19000215272 /* workqueue_internal.h in Headers */, + E43B886C2241F19000215272 /* object.h in Headers */, + E43B886D2241F19000215272 /* io_private.h in Headers */, + E43B886E2241F19000215272 /* mach_private.h in Headers */, + E43B886F2241F19000215272 /* allocator_internal.h in Headers */, + E43B88702241F19000215272 /* introspection_internal.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; E49F24AA125D57FA0057C971 /* Headers */ = { isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 6EC8DC281E3E847A0044B652 /* channel_private.h in Headers */, E49F24AB125D57FA0057C971 /* dispatch.h in Headers */, E49F24AC125D57FA0057C971 /* base.h in Headers */, 6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */, @@ -1413,6 +1569,7 @@ 6ED64B501BBD8A1400C35F4D /* firehose_internal.h in Headers */, E49F24BF125D57FA0057C971 /* io_internal.h in Headers */, E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */, + B609581F221DFA4B00F39D1F /* workloop.h in Headers */, E49F24C1125D57FA0057C971 /* tsd.h in Headers */, E49F24C2125D57FA0057C971 /* atomic.h in Headers */, E49F24C3125D57FA0057C971 /* shims.h in Headers */, @@ -1484,7 +1641,7 @@ /* Begin PBXLegacyTarget section */ 92F3FECA1BEC69E500025962 /* darwintests */ = { isa = PBXLegacyTarget; - buildArgumentsString = "$(ACTION)"; + buildArgumentsString = "-j -k $(ACTION)"; buildConfigurationList = 92F3FECB1BEC69E500025962 /* Build configuration list for PBXLegacyTarget "darwintests" */; buildPhases = ( ); @@ -1532,28 +1689,11 @@ productReference = 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */; productType = "com.apple.product-type.library.static"; }; - C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */ = { - isa = PBXNativeTarget; - buildConfigurationList = C00B0E071C5AEBBE000330B3 /* Build configuration list for PBXNativeTarget "libdispatch dyld stub" */; - buildPhases = ( - C00B0DF11C5AEBBE000330B3 /* Sources */, - C00B0E061C5AEBBE000330B3 /* Symlink libdispatch.a -> libdispatch_dyld_target.a */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = "libdispatch dyld stub"; - productName = libdispatch; - productReference = C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */; - productType = "com.apple.product-type.library.static"; - }; C01866A41C5973210040FC07 /* libdispatch mp static */ = { isa = PBXNativeTarget; buildConfigurationList = C01866BA1C5973210040FC07 /* Build configuration list for PBXNativeTarget "libdispatch mp static" */; buildPhases = ( C01866A51C5973210040FC07 /* Sources */, - C01866C01C59777B0040FC07 /* Symlink to the loaderd path */, ); buildRules = ( ); @@ -1584,13 +1724,31 @@ E47D6ECD125FEBA10070D91C /* PBXTargetDependency */, E49BB6F81E7074C100868613 /* PBXTargetDependency */, C01866C21C597AEA0040FC07 /* PBXTargetDependency */, - C00B0E141C5AEED6000330B3 /* PBXTargetDependency */, ); name = libdispatch; productName = libdispatch; productReference = D2AAC046055464E500DB518D /* libdispatch.dylib */; productType = "com.apple.product-type.library.dynamic"; }; + E43B88262241F19000215272 /* libdispatch_driverkit */ = { + isa = PBXNativeTarget; + buildConfigurationList = E43B88972241F19000215272 /* Build configuration list for PBXNativeTarget "libdispatch_driverkit" */; + buildPhases = ( + E43B88312241F19000215272 /* Headers */, + E43B88712241F19000215272 /* Sources */, + E43B88922241F19000215272 /* Install Headers */, + ); + buildRules = ( + ); + dependencies = ( + E43B88292241F19000215272 /* PBXTargetDependency */, + E43B882B2241F19000215272 /* PBXTargetDependency */, + ); + name = libdispatch_driverkit; + productName = libdispatch; + productReference = E43B889A2241F19000215272 /* libdispatch.dylib */; + productType = "com.apple.product-type.library.dynamic"; + }; E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */ = { isa = PBXNativeTarget; buildConfigurationList = E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch armv81 resolved" */; @@ -1670,7 +1828,7 @@ attributes = { BuildIndependentTargetsInParallel = YES; DefaultBuildSystemTypeForWorkspace = Latest; - LastUpgradeCheck = 1010; + LastUpgradeCheck = 1100; TargetAttributes = { 3F3C9326128E637B0042B1F7 = { ProvisioningStyle = Manual; @@ -1701,9 +1859,6 @@ CreatedOnToolsVersion = 9.3; ProvisioningStyle = Automatic; }; - C00B0DF01C5AEBBE000330B3 = { - ProvisioningStyle = Manual; - }; C01866A41C5973210040FC07 = { ProvisioningStyle = Manual; }; @@ -1757,8 +1912,8 @@ E4EC121612514715000DDBD1 /* libdispatch mp resolved */, E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */, E4B51595164B2DA300E003AF /* libdispatch introspection */, + E43B88262241F19000215272 /* libdispatch_driverkit */, C01866A41C5973210040FC07 /* libdispatch mp static */, - C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */, 6E43553E215B5D9D00C13177 /* libdispatch_introspection */, 6EA833C22162D6380045EFDC /* libdispatch_introspection_Sim */, 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */, @@ -1854,68 +2009,60 @@ shellScript = ". \"${SRCROOT}/xcodescripts/check-order.sh\"\n"; showEnvVarsInLog = 0; }; - C00B0E061C5AEBBE000330B3 /* Symlink libdispatch.a -> libdispatch_dyld_target.a */ = { + E4128EB213B9612700ABB2CB /* Postprocess Headers */ = { isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; + buildActionMask = 8; files = ( ); inputPaths = ( - "$(SRCROOT)/xcodescripts/run-on-install.sh", + "$(SRCROOT)/xcodescripts/postprocess-headers.sh", ); - name = "Symlink libdispatch.a -> libdispatch_dyld_target.a"; + name = "Postprocess Headers"; outputPaths = ( - "${DSTROOT}${INSTALL_PATH}/libdispatch.a", ); - runOnlyForDeploymentPostprocessing = 0; + runOnlyForDeploymentPostprocessing = 1; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf ${PRODUCT_NAME}.a ${SCRIPT_OUTPUT_FILE_0}"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; - C01866C01C59777B0040FC07 /* Symlink to the loaderd path */ = { + E421E5FC1716B8E10090DC9B /* Install DTrace Header */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; files = ( ); inputPaths = ( - "$(SRCROOT)/xcodescripts/run-on-install.sh", + "$(SRCROOT)/xcodescripts/install-dtrace.sh", + "$(SRCROOT)/src/provider.d", ); - name = "Symlink to the loaderd path"; + name = "Install DTrace Header"; outputPaths = ( - "${DSTROOT}/usr/local/lib/loaderd/${PRODUCT_NAME}.a", + "$(CONFIGURATION_BUILD_DIR)/$(PUBLIC_HEADERS_FOLDER_PATH)/introspection.d", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf ../../../..${INSTALL_PATH}/${PRODUCT_NAME}.a ${DSTROOT}/usr/local/lib/loaderd/${PRODUCT_NAME}.a"; - showEnvVarsInLog = 0; - }; - E4128EB213B9612700ABB2CB /* Postprocess Headers */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 8; - files = ( - ); - inputPaths = ( - "$(SRCROOT)/xcodescripts/postprocess-headers.sh", - ); - name = "Postprocess Headers"; - outputPaths = ( - ); - runOnlyForDeploymentPostprocessing = 1; - shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; - E421E5FC1716B8E10090DC9B /* Install DTrace Header */ = { + E43B88922241F19000215272 /* Install Headers */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; files = ( ); inputPaths = ( - "$(SRCROOT)/xcodescripts/install-dtrace.sh", - "$(SRCROOT)/src/provider.d", + "$(SRCROOT)/xcodescripts/install-headers.sh", + "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/object_private.h", + "$(SRCROOT)/os/venture_private.h", + "$(SRCROOT)/os/voucher_private.h", + "$(SRCROOT)/os/voucher_activity_private.h", ); - name = "Install DTrace Header"; + name = "Install Headers"; outputPaths = ( - "$(CONFIGURATION_BUILD_DIR)/$(PUBLIC_HEADERS_FOLDER_PATH)/introspection.d", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; @@ -2131,40 +2278,6 @@ ); runOnlyForDeploymentPostprocessing = 0; }; - C00B0DF11C5AEBBE000330B3 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 6E9C6CAD20F9848F00EA81C0 /* yield.c in Sources */, - C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */, - C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */, - C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */, - C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */, - C00B0DF41C5AEBBE000330B3 /* init.c in Sources */, - C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */, - C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */, - 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */, - C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */, - C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */, - C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */, - C00B0DFD1C5AEBBE000330B3 /* apply.c in Sources */, - C00B0E001C5AEBBE000330B3 /* source.c in Sources */, - 6E4BACC81D48A42400B562AE /* mach.c in Sources */, - 6EA9629E1D48622C00759D53 /* event.c in Sources */, - 6EA962A61D48625500759D53 /* event_kevent.c in Sources */, - 6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */, - C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */, - C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */, - C00B0E031C5AEBBE000330B3 /* io.c in Sources */, - C00B0E021C5AEBBE000330B3 /* data.c in Sources */, - C00B0E041C5AEBBE000330B3 /* transform.c in Sources */, - C00B0E011C5AEBBE000330B3 /* time.c in Sources */, - C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */, - C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */, - E49BB70A1E70A3B000868613 /* venture.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; C01866A51C5973210040FC07 /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; @@ -2236,6 +2349,43 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + E43B88712241F19000215272 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + E43B88722241F19000215272 /* provider.d in Sources */, + E43B88732241F19000215272 /* protocol.defs in Sources */, + E43B88742241F19000215272 /* firehose.defs in Sources */, + E43B88752241F19000215272 /* firehose_reply.defs in Sources */, + E43B88762241F19000215272 /* resolver.c in Sources */, + E43B88772241F19000215272 /* init.c in Sources */, + E43B88782241F19000215272 /* object.c in Sources */, + E43B88792241F19000215272 /* object.m in Sources */, + E43B887A2241F19000215272 /* block.cpp in Sources */, + E43B887B2241F19000215272 /* lock.c in Sources */, + E43B887C2241F19000215272 /* semaphore.c in Sources */, + E43B887D2241F19000215272 /* once.c in Sources */, + E43B887E2241F19000215272 /* queue.c in Sources */, + E43B887F2241F19000215272 /* apply.c in Sources */, + E43B88802241F19000215272 /* source.c in Sources */, + E43B88812241F19000215272 /* yield.c in Sources */, + E43B88822241F19000215272 /* mach.c in Sources */, + E43B88832241F19000215272 /* event.c in Sources */, + E43B88842241F19000215272 /* event_kevent.c in Sources */, + E43B88852241F19000215272 /* event_epoll.c in Sources */, + E43B88862241F19000215272 /* voucher.c in Sources */, + E43B88872241F19000215272 /* firehose_buffer.c in Sources */, + E43B88882241F19000215272 /* io.c in Sources */, + E43B88892241F19000215272 /* data.c in Sources */, + E43B888A2241F19000215272 /* data.m in Sources */, + E43B888B2241F19000215272 /* transform.c in Sources */, + E43B888C2241F19000215272 /* time.c in Sources */, + E43B888D2241F19000215272 /* allocator.c in Sources */, + E43B888E2241F19000215272 /* benchmark.c in Sources */, + E43B888F2241F19000215272 /* venture.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; E49BB6D01E70748100868613 /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; @@ -2428,11 +2578,6 @@ name = ddt; targetProxy = 9BEBA57720127D4400E6FD0D /* PBXContainerItemProxy */; }; - C00B0E141C5AEED6000330B3 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */; - targetProxy = C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */; - }; C01866C21C597AEA0040FC07 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = C01866A41C5973210040FC07 /* libdispatch mp static */; @@ -2448,6 +2593,16 @@ target = D2AAC045055464E500DB518D /* libdispatch */; targetProxy = E4128E4913B94BCE00ABB2CB /* PBXContainerItemProxy */; }; + E43B88292241F19000215272 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E4EC121612514715000DDBD1 /* libdispatch mp resolved */; + targetProxy = E43B882A2241F19000215272 /* PBXContainerItemProxy */; + }; + E43B882B2241F19000215272 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */; + targetProxy = E43B882C2241F19000215272 /* PBXContainerItemProxy */; + }; E47D6ECD125FEBA10070D91C /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = E4EC121612514715000DDBD1 /* libdispatch mp resolved */; @@ -2613,43 +2768,53 @@ }; name = Debug; }; - C00B0E081C5AEBBE000330B3 /* Release */ = { + C01866BB1C5973210040FC07 /* Release */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; + baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; buildSettings = { }; name = Release; }; - C00B0E091C5AEBBE000330B3 /* Debug */ = { + C01866BC1C5973210040FC07 /* Debug */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; + baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; buildSettings = { }; name = Debug; }; - C01866BB1C5973210040FC07 /* Release */ = { + C927F35B10FD7F0600C5AB8B /* Release */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; buildSettings = { }; name = Release; }; - C01866BC1C5973210040FC07 /* Debug */ = { + C927F35C10FD7F0600C5AB8B /* Debug */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; buildSettings = { }; name = Debug; }; - C927F35B10FD7F0600C5AB8B /* Release */ = { + E43B88982241F19000215272 /* Release */ = { isa = XCBuildConfiguration; + baseConfigurationReference = E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */; buildSettings = { + ARCHS = "$(ARCHS_STANDARD)"; + DRIVERKIT = 1; + DRIVERKITROOT = /System/DriverKit; + SDKROOT = driverkit.internal; + SUPPORTED_PLATFORMS = macosx; }; name = Release; }; - C927F35C10FD7F0600C5AB8B /* Debug */ = { + E43B88992241F19000215272 /* Debug */ = { isa = XCBuildConfiguration; + baseConfigurationReference = E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */; buildSettings = { + ARCHS = "$(ARCHS_STANDARD)"; + DRIVERKIT = 1; + DRIVERKITROOT = /System/DriverKit; + SDKROOT = driverkit.internal; + SUPPORTED_PLATFORMS = macosx; }; name = Debug; }; @@ -2845,15 +3010,6 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; - C00B0E071C5AEBBE000330B3 /* Build configuration list for PBXNativeTarget "libdispatch dyld stub" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - C00B0E081C5AEBBE000330B3 /* Release */, - C00B0E091C5AEBBE000330B3 /* Debug */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; C01866BA1C5973210040FC07 /* Build configuration list for PBXNativeTarget "libdispatch mp static" */ = { isa = XCConfigurationList; buildConfigurations = ( @@ -2872,6 +3028,15 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + E43B88972241F19000215272 /* Build configuration list for PBXNativeTarget "libdispatch_driverkit" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + E43B88982241F19000215272 /* Release */, + E43B88992241F19000215272 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch armv81 resolved" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 index c0aa45171..c6023cdae 100644 --- a/man/dispatch_semaphore_create.3 +++ b/man/dispatch_semaphore_create.3 @@ -109,13 +109,15 @@ and .Fn dispatch_release . .Sh CAVEATS Unbalanced dispatch semaphores cannot be released. -For a given semaphore, calls to +For a given semaphore, the count at the time +.Fn dispatch_release +is called must be equal to or larger than the +count the semaphore was created with. In other words, at the time of releasing +the semaphore, there must have been at least as many .Fn dispatch_semaphore_signal -and +calls as there were successful .Fn dispatch_semaphore_wait -must be balanced before -.Fn dispatch_release -is called on it. +calls that did not timeout. .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_object 3 diff --git a/os/firehose_server_private.h b/os/firehose_server_private.h index d2c379e62..bab44824b 100644 --- a/os/firehose_server_private.h +++ b/os/firehose_server_private.h @@ -399,6 +399,7 @@ OS_ENUM(firehose_server_queue, unsigned long, FIREHOSE_SERVER_QUEUE_UNKNOWN, FIREHOSE_SERVER_QUEUE_IO, FIREHOSE_SERVER_QUEUE_MEMORY, + FIREHOSE_SERVER_QUEUE_IO_WL, ); /*! diff --git a/os/object.h b/os/object.h index 1ad1158c5..2979de891 100644 --- a/os/object.h +++ b/os/object.h @@ -91,12 +91,11 @@ #endif #ifndef OS_OBJECT_SWIFT3 -#if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \ - SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#ifdef __swift__ #define OS_OBJECT_SWIFT3 1 -#else +#else // __swift__ #define OS_OBJECT_SWIFT3 0 -#endif // SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#endif // __swift__ #endif // OS_OBJECT_SWIFT3 #if OS_OBJECT_USE_OBJC diff --git a/os/object_private.h b/os/object_private.h index a667f79f0..003369ecc 100644 --- a/os/object_private.h +++ b/os/object_private.h @@ -97,10 +97,26 @@ #define OS_OBJECT_CLASS(name) OS_##name +#if OS_OBJECT_USE_OBJC +#define OS_OBJECT_USES_XREF_DISPOSE() \ + - (oneway void)release { \ + _os_object_release(self); \ + } +#endif + +#if __has_attribute(objc_nonlazy_class) +#define OS_OBJECT_NONLAZY_CLASS __attribute__((objc_nonlazy_class)) +#define OS_OBJECT_NONLAZY_CLASS_LOAD +#else +#define OS_OBJECT_NONLAZY_CLASS +#define OS_OBJECT_NONLAZY_CLASS_LOAD + (void)load { } +#endif + #if OS_OBJECT_USE_OBJC && OS_OBJECT_SWIFT3 @interface OS_OBJECT_CLASS(object) (OSObjectPrivate) +// Note: objects who want _xref_dispose to be called need +// to use OS_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose; -- (void)_dispose; @end OS_OBJECT_DECL_PROTOCOL(object, ); typedef OS_OBJECT_CLASS(object) *_os_object_t; @@ -116,8 +132,9 @@ typedef OS_OBJECT_CLASS(object) *_os_object_t; API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT @interface OS_OBJECT_CLASS(object) : NSObject +// Note: objects who want _xref_dispose to be called need +// to use OS_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose; -- (void)_dispose; @end typedef OS_OBJECT_CLASS(object) *_os_object_t; #define _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ @@ -172,6 +189,12 @@ OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_release(_os_object_t object); +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") +void +_os_object_release_without_xref_dispose(_os_object_t object); + API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") diff --git a/private/channel_private.h b/private/channel_private.h new file mode 100644 index 000000000..9c2ecf626 --- /dev/null +++ b/private/channel_private.h @@ -0,0 +1,567 @@ +/* + * Copyright (c) 2017 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_CHANNEL_PRIVATE__ +#define __DISPATCH_CHANNEL_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +DISPATCH_ASSUME_NONNULL_BEGIN + +__BEGIN_DECLS + +#if DISPATCH_CHANNEL_SPI + +/*! + * @typedef dispatch_channel_t + * + * @abstract + */ +DISPATCH_DECL(dispatch_channel); + +typedef struct dispatch_channel_invoke_ctxt_s *dispatch_channel_invoke_ctxt_t; + +/*! @typedef dispatch_channel_callbacks_t + * + * @abstract + * Vtable used by dispatch channels (see dispatch_channel_create). + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +typedef struct dispatch_channel_callbacks_s { +#define DISPATCH_CHANNEL_CALLBACKS_VERSION 1ul + /*! @field dcc_version + * + * @abstract + * Version of the callbacks, used for binary compatibilty. + * This must be set to DISPATCH_CHANNEL_CALLBACKS_VERSION + */ + unsigned long dcc_version; + + /*! @field dcc_probe + * + * @abstract + * This callback is called when GCD is considering whether it should wakeup + * the channel. + * + * @discussion + * This function may be called from ANY context. It may be called + * concurrently from several threads, it may be called concurrently with + * a call to other channel callbacks. + * + * Reasons for this function to be called include: + * - the channel became non empty, + * - the channel is receiving a Quality of Service override to resolve + * a priority inversion, + * - dispatch_activate() or dispatch_resume() was called, + * - dispatch_channel_wakeup() was called. + * + * The implementation of this callback should be idempotent, and as cheap + * as possible, avoiding taking locks if possible. A typical implementation + * will perform a single atomic state look to determine what answer to + * return. Possible races or false positives can be later be debounced in + * dcc_invoke which is synchronized. + * + * Calling dispatch_channel_wakeup() from the context of this call is + * incorrect and will result in undefined behavior. Instead, it should be + * called in response to external events, in order to cause the channel to + * re-evaluate the `dcc_probe` hook. + * + * param channel + * The channel that is being probed. + * + * param context + * The context associated with the channel. + * + * returns + * - true if the dispatch channel can be woken up according to the other + * runtime rules + * + * - false if the dispatch channel would not be able to make progress if + * woken up. A subsequent explicit call to dispatch_channel_wakeup() will + * be required when this condition has changed. + */ + bool + (*_Nonnull dcc_probe)(dispatch_channel_t channel, void *_Nullable context); + + /*! @field dcc_invoke + * + * @abstract + * This callback is called when a dispatch channel is being drained. + * + * @discussion + * This callback is where the state machine for the channel can + * be implemented using dispatch_channel_foreach_work_item_peek() + * and dispatch_channel_drain(). + * + * Note that if this function returns true, it must have called + * dispatch_channel_drain() exactly once. It is valid not to call + * peek nor drain if false is returned. + * + * param channel + * The channel that has been invoked. + * + * param invoke_context + * An opaque data structure that must be passed back to + * dispatch_channel_foreach_work_item_peek() and dispatch_channel_drain(). + * + * param context + * The context associated with the channel. + * + * returns + * - true if the channel can drain further + * - false if an explicit call to dispatch_channel_wakeup() is required + * for the channel to be able to drain items again. A subsequent explicit + * call to dispatch_channel_wakeup() will be required when this condition + * has changed. + */ + bool + (*_Nonnull dcc_invoke)(dispatch_channel_t channel, + dispatch_channel_invoke_ctxt_t invoke_context, + void *_Nullable context); + + /*! @field dcc_acknowledge_cancel + * + * @abstract + * This optional callback is called when the channel has been cancelled + * until that cancellation is acknowledged. + * + * @discussion + * If this callback isn't set, the channel cancelation is implicit and can + * be tested with dispatch_channel_testcancel(). + * + * When this callback is set, it will be called as soon as cancelation has + * been noticed. When it is called, it is called from a context serialized + * with `dcc_invoke`, or from `dcc_invoke` itself. + * + * Returning `false` causes the dispatch channel to stop its invocation + * early. A subsequent explicit call to dispatch_channel_wakeup() will be + * required when the cancellation can be acknowledged. + * + * param channel + * The channel that has been invoked. + * + * param context + * The context associated with the channel. + * + * returns + * Whether the cancellation was acknowledged. + */ + bool + (*_Nullable dcc_acknowledge_cancel)(dispatch_channel_t channel, + void *_Nullable context); +} const *dispatch_channel_callbacks_t; + +/*! @function dispatch_channel_create + * + * @abstract + * Create a dispatch channel. + * + * @discussion + * A dispatch channel is similar to a dispatch serial queue, however it will + * accept arbitrary items into the queue, as well as regular dispatch blocks + * to execute. + * + * Unlike serial queues, this object cannot be targeted by other dispatch + * objects. + * + * Dispatch channels are created in an inactive state. After creating the + * channel and setting any desired property, a call must be made to + * dispatch_activate() in order to use the object. + * + * Calling dispatch_set_target_queue() on a channel after it has been activated + * is not allowed (see dispatch_activate() and dispatch_set_target_queue()). + * + * @param label + * A string label to attach to the channel. + * This parameter is optional and may be NULL. + * + * @param context + * A context to associated with the channel. It can be retrieved with + * dispatch_get_context() at any time, but should not mutated. + * + * @param target + * The target queue for the newly created channel. The target queue is retained. + * If this parameter is DISPATCH_TARGET_QUEUE_DEFAULT, sets the channel's target + * queue to the default target queue for the given channel type. + * + * @param callbacks + * Hooks for the created channel. + * + * @returns + * The newly created channel. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW DISPATCH_NONNULL4 +dispatch_channel_t +dispatch_channel_create(const char *_Nullable label, + dispatch_queue_t _Nullable target, + void *_Nullable context, dispatch_channel_callbacks_t callbacks); + +/*! @function dispatch_channel_wakeup + * + * @abstract + * Re-evaluate whether a dispatch channel needs to be woken up. + * + * @discussion + * Calling this function causes the GCD runtime to reevaluate whether + * the specified dispatch channel needs to be woken up. If a previous call to + * `dcc_probe`, `dcc_acknowledge_cancel` or `dcc_invoke` returned false then + * a channel may remain asleep until wakeup is called. + * + * It is valid to call this function from the context of any of the the `invoke` + * callbacks, but not from the `dcc_probe` callback. + * + * This function will have no effect if: + * - the dispatch channel is suspeneded, + * - the `dcc_probe` callback subsequently returns false, + * - the dispatch channel has no work items queued, nor a pending cancellation + * to acknowledge. + * + * @param channel + * The channel for which wakeup should be evaluated. + * + * @param qos_class + * The QoS override that should be applied to this channel because of this + * event. The override will persist until the channel has been drained of + * pending items. + * + * It is expected that most wakeups will not require an additional QoS + * override. In this case, passing QOS_CLASS_UNSPECIFIED indicates that no + * additional override should be applied as a result of this wakeup. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_wakeup(dispatch_channel_t channel, qos_class_t qos_class); + +/*! @typedef dispatch_channel_enumerator_handler_t + * + * Type of the callbacks used by dispatch_channel_foreach_work_item_peek_f(). + */ +typedef bool (*dispatch_channel_enumerator_handler_t)(void *_Nullable context, void *item); + +/*! @function dispatch_channel_foreach_work_item_peek_f + * + * @abstract + * Peek at opaque work items currently enqueued on the channel. + * + * @discussion + * This function will enumerate items enqueued on the channel, in order, until + * the first non-opaque work item is found. No work should be performed on + * behalf of the items enumerated. + * + * This function allows the caller to preflight items that will be processed + * when draining the channel (fex. counting items in order to pre-allocate + * storage, or batch items into groups). + * + * This function can only be called from the context of the `dcc_invoke` + * callback associated with this channel, and before any call to + * dispatch_channel_drain(). + * + * @param invoke_context + * The opaque invoke context passed to the channel `dcc_invoke` callback. + * + * @param context + * An application-defined context that will be passed to the handler. + * + * @param handler + * The handler that will be passed `context` and the opaque work item + * currently enumerated. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_foreach_work_item_peek_f( + dispatch_channel_invoke_ctxt_t invoke_context, + void *_Nullable context, dispatch_channel_enumerator_handler_t handler); + +#ifdef __BLOCKS__ +/*! @typedef dispatch_channel_enumerator_block_t + * + * Type of the callbacks used by dispatch_channel_foreach_work_item_peek(). + */ +typedef bool (^dispatch_channel_enumerator_block_t)(void *item); + +/*! @function dispatch_channel_foreach_work_item_peek_f + * + * @abstract + * Peek at the opaque work items currently enqueued on the channel. + * + * @discussion + * See dispatch_channel_foreach_work_item_peek_f() + * + * @param invoke_context + * The opaque invoke context passed to the channel `dcc_invoke` callback. + * + * @param block + * The block that will be passed the opaque work item currently enumerated. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_foreach_work_item_peek( + dispatch_channel_invoke_ctxt_t invoke_context, + dispatch_channel_enumerator_block_t block DISPATCH_NOESCAPE); +#endif + +/*! @typedef dispatch_channel_drain_handler_t + * + * @abstract + * Type of the callbacks used by dispatch_channel_drain_f(). + * + * @param context + * The application defined context passed to dispatch_channel_drain_f(). + * + * @param item + * The opaque work item to consume. + * + * @param rejected_item + * An out-parameter for an opaque work item to put back at the head of the + * queue. On return from this handler, if rejected_item is set then the handler + * must also return false (and, thus, interrupts the drain operation). + * + * @returns + * - true if the drain may enumerate the next item + * - false to cause dispatch_channel_drain_f() to return. + * in which case a rejected item can optionally be returned. + */ +typedef bool (*dispatch_channel_drain_handler_t)(void *_Nullable context, + void *_Nonnull item, void *_Nonnull *_Nullable rejected_item); + +/*! @function dispatch_channel_drain_f + * + * @abstract + * Drain the opaque work items enqueued on the channel. + * + * @discussion + * This function needs to be called by any `dcc_invoke` that returns true. + * + * Calling drain will cause every opaque work item that can be consumed to be + * passed to the handler. While the handler is called, the runtime environment + * matches the QOS and context captured at dispatch_channel_enqueue() time for + * this opaque work item. + * + * Note, this function can (through factors internal to the GCD runtime) can + * decide not to consume all items that are currently enqueued on the channel. + * Therefore it is possible for dispatch_channel_drain_f() to enumerate fewer + * items than dispatch_channel_foreach_work_item_peek_f() did when called + * immediately beforehand. + * + * It is also possible for dispatch_channel_drain_f() to observe *more* items + * than previously seen with peek, if enqueues are happening concurrently. + * + * Note that work items enqueued with dispatch_channel_async() act as + * "separators". If the opaque work item O1 is enqueued before a regular + * asynchronous work item A, and a new opaque work item O2 is then enqueued, + * then neither dispatch_channel_foreach_work_item_peek_f() nor + * dispatch_channel_drain_f() will ever return O1 and O2 as part of the same + * drain streak. + * + * @param invoke_context + * The opaque invoke context passed to the channel `dcc_invoke` callback. + * + * @param handler + * The handler that will be passed the context and opaque work item to invoke. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_drain_f(dispatch_channel_invoke_ctxt_t invoke_context, + void *_Nullable context, dispatch_channel_drain_handler_t handler); + +#ifdef __BLOCKS__ +/*! @typedef dispatch_channel_drain_block_t + * + * @abstract + * Type of the callbacks used by dispatch_channel_drain(). + * + * @description + * See dispatch_channel_drain_handler_t. + */ +typedef bool (^dispatch_channel_drain_block_t)(void *_Nonnull item, + void *_Nonnull *_Nullable rejected_item); + +/*! @function dispatch_channel_drain + * + * @abstract + * Drain the opaque work items enqueued on the channel. + * + * @discussion + * See dispatch_channel_drain_f() + * + * @param invoke_context + * The opaque invoke context passed to the channel `dcc_invoke` callback. + * + * @param block + * The block that will be passed the opaque work item to invoke. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_drain(dispatch_channel_invoke_ctxt_t invoke_context, + dispatch_channel_drain_block_t block DISPATCH_NOESCAPE); +#endif + +/*! + * @function dispatch_channel_cancel + * + * @abstract + * Asynchronously cancel the dispatch channel. + * + * @discussion + * Cancellation will cause the channel to repeatedly call the + * `dcc_acknowledge_cancel` handler until it returns true. This allows the + * associated state machine to handle cancellation asynchronously (and, if + * needed, in multiple phases). + * + * The precise semantics of cancellation are up to the dispatch channel + * associated state machine, and not all dispatch channels need to use + * cancellation. + * + * However, if the `dcc_acknowledge_cancel` callback is implemented, then an + * explicit call to dispatch_channel_cancel() is mandatory before the last + * reference to the dispatch channel is released. + * + * @param channel + * The dispatch channel to be canceled. + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_channel_cancel(dispatch_channel_t channel); + +/*! + * @function dispatch_channel_testcancel + * + * @abstract + * Tests whether the given dispatch channel has been canceled. + * + * @param channel + * The dispatch channel to be tested. + * The result of passing NULL in this parameter is undefined. + * + * @result + * Non-zero if canceled and zero if not canceled. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_NOTHROW +long +dispatch_channel_testcancel(dispatch_channel_t channel); + +/*! + * @function dispatch_channel_async + * + * @abstract + * Submits a block for asynchronous execution on a dispatch channel. + * + * @discussion + * See dispatch_async(). + * + * @param channel + * The target dispatch channel to which the block is submitted. + * The system will hold a reference on the target channel until the block + * has finished. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block to submit to the target dispatch channel. This function performs + * Block_copy() and Block_release() on behalf of callers. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_channel_async(dispatch_channel_t queue, dispatch_block_t block); +#endif + +/*! + * @function dispatch_channel_async_f + * + * @abstract + * Submits a function for asynchronous execution on a dispatch channel. + * + * @discussion + * See dispatch_async() for details. + * + * @param queue + * The target dispatch channel to which the function is submitted. + * The system will hold a reference on the target channel until the function + * has returned. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target channel. The first + * parameter passed to this function is the context provided to + * dispatch_channel_async_f(). + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_channel_async_f(dispatch_queue_t queue, + void *_Nullable context, dispatch_function_t work); + +/*! + * @function dispatch_channel_enqueue + * + * @abstract + * Enqueues an opaque work item for asynchronous dequeue on a dispatch channel. + * + * @discussion + * See dispatch_channel_async() for details. + * + * @param channel + * The target dispatch channel to which the work item is submitted. + * The system will hold a reference on the target channel until the work item + * is consumed. + * The result of passing NULL in this parameter is undefined. + * + * @param item + * The application-defined work item to enqueue on the target channel. + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_channel_enqueue(dispatch_channel_t channel, void *item); + +#endif // DISPATCH_CHANNEL_SPI + +__END_DECLS + +DISPATCH_ASSUME_NONNULL_END + +#endif diff --git a/private/mach_private.h b/private/mach_private.h index e311aee16..1474c163a 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -34,8 +34,6 @@ __BEGIN_DECLS -#if DISPATCH_MACH_SPI - #define DISPATCH_MACH_SPI_VERSION 20161026 #include @@ -129,6 +127,10 @@ DISPATCH_DECL(dispatch_mach); * an asynchronous reply to a message previously sent to the channel. Used * only if the channel is disconnected while waiting for a reply to a message * sent with dispatch_mach_send_with_result_and_async_reply_4libxpc(). + * + * @const DISPATCH_MACH_NO_SENDERS + * Sent when a no senders requested with dispatch_mach_request_no_senders() has + * been received. See dispatch_mach_request_no_senders(). */ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_CONNECTED = 1, @@ -143,6 +145,7 @@ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_NEEDS_DEFERRED_SEND, DISPATCH_MACH_SIGTERM_RECEIVED, DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED, + DISPATCH_MACH_NO_SENDERS, DISPATCH_MACH_REASON_LAST, /* unused */ ); @@ -351,6 +354,78 @@ dispatch_mach_create_f(const char *_Nullable label, dispatch_queue_t _Nullable queue, void *_Nullable context, dispatch_mach_handler_function_t handler); +/*! + * @function dispatch_mach_request_no_senders + * + * Configure the mach channel to receive no more senders notifications. + * + * @discussion + * This function must be called before dispatch_mach_connect() has been called. + * + * When a checkin message is passed to dispatch_mach_connect() or + * dispatch_mach_reconnect(), the notification is armed after the checkin + * message has been sent successfully. + * + * If no checkin message is passed, then the mach channel is assumed to be + * a "server" peer connection and the no more senders request is armed + * immediately. + * + * @param channel + * The mach channel to request no senders notifications on. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_mach_request_no_senders(dispatch_mach_t channel); + +/*! + * @typedef dispatch_mach_flags_t + * + * Flags that can be passed to the dispatch_mach_set_flags function. + * + * @const DMF_USE_STRICT_REPLY + * Instruct the dispatch mach channel to use strict reply port semantics. When + * using strict reply port semantics, the kernel will enforce that the port + * used as the reply port has precisely 1 extant send-once right, its receive + * right exists in the same space as the sender, and any voucher context, + * e.g., the persona in the bank attribute, used when sending the message is + * also used when replying. + * + * @const DMF_REQUEST_NO_SENDERS + * Configure the mach channel to receive no more senders notifications. + * When a checkin message is passed to dispatch_mach_connect() or + * dispatch_mach_reconnect(), the notification is armed after the checkin + * message has been sent successfully. If no checkin message is passed, then + * the mach channel is assumed to be a "server" peer connection and the no + * more senders request is armed immediately. + */ +DISPATCH_OPTIONS(dispatch_mach_flags, uint64_t, + DMF_NONE = 0x0, + DMF_USE_STRICT_REPLY = 0x1, + DMF_REQUEST_NO_SENDERS = 0x2, +); + +/*! + * @function dispatch_mach_set_flags + * + * Configure optional properties on the mach channel. + * + * @discussion + * This function must be called before dispatch_mach_connect() has been called. + * + * @param channel + * The mach channel to configure. + * + * @param flags + * Flags to configure the dispatch mach channel. + * + * @see dispatch_mach_flags_t + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_mach_set_flags(dispatch_mach_t channel, dispatch_mach_flags_t flags); + /*! * @function dispatch_mach_connect * Connect a mach channel to the specified receive and send rights. @@ -882,6 +957,8 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_port_t dispatch_mach_get_checkin_port(dispatch_mach_t channel); +#if DISPATCH_MACH_SPI + // SPI for libxpc /* * Type for the callback for receipt of asynchronous replies to @@ -914,6 +991,8 @@ typedef const struct dispatch_mach_xpc_hooks_s { /* Fields available in version 2. */ +#define DMXH_MSG_CONTEXT_REPLY_QUEUE_SELF ((dispatch_queue_t)NULL) + /* * Gets the queue to which a reply to a message sent using * dispatch_mach_send_with_result_and_async_reply_4libxpc() should be @@ -945,7 +1024,7 @@ typedef const struct dispatch_mach_xpc_hooks_s { dispatch_mach_async_reply_callback_t dmxh_async_reply_handler; /* Fields available in version 3. */ - /** + /* * Called once when the Mach channel has been activated. If this function * returns true, a DISPATCH_MACH_SIGTERM_RECEIVED notification will be * delivered to the channel's event handler when a SIGTERM is received. @@ -1101,6 +1180,8 @@ dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t channel, dispatch_mach_send_flags_t send_flags, dispatch_mach_reason_t *send_result, mach_error_t *send_error); +#endif // DISPATCH_MACH_SPI + /*! * @function dispatch_mach_handoff_reply_f * @@ -1125,7 +1206,7 @@ dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t channel, * @param port * The send once right that will be replied to. */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_mach_handoff_reply_f(dispatch_queue_t queue, mach_port_t port, @@ -1140,16 +1221,16 @@ dispatch_mach_handoff_reply_f(dispatch_queue_t queue, mach_port_t port, * * @see dispatch_mach_handoff_reply_f */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_mach_handoff_reply(dispatch_queue_t queue, mach_port_t port, dispatch_block_t block); +#endif /* __BLOCKS__ */ DISPATCH_ASSUME_NONNULL_END -#endif // DISPATCH_MACH_SPI - __END_DECLS #endif diff --git a/private/private.h b/private/private.h index 3b842d415..df93d9a9f 100644 --- a/private/private.h +++ b/private/private.h @@ -62,11 +62,12 @@ #include #include +#if DISPATCH_CHANNEL_SPI +#include +#endif #include #include -#if DISPATCH_MACH_SPI #include -#endif // DISPATCH_MACH_SPI #include #include #include @@ -76,7 +77,7 @@ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ // Check that public and private dispatch headers match -#if DISPATCH_API_VERSION != 20180109 // Keep in sync with +#if DISPATCH_API_VERSION != 20181008 // Keep in sync with #error "Dispatch header mismatch between /usr/include and /usr/local/include" #endif diff --git a/private/queue_private.h b/private/queue_private.h index d8885d6a0..65ef7e255 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -47,7 +47,6 @@ enum { DISPATCH_QUEUE_OVERCOMMIT = 0x2ull, }; - /*! * @function dispatch_set_qos_class * @@ -95,54 +94,6 @@ void dispatch_set_qos_class(dispatch_object_t object, dispatch_qos_class_t qos_class, int relative_priority); -/*! - * @function dispatch_set_qos_class_floor - * - * @abstract - * Sets the QOS class floor on a dispatch queue, source, workloop or mach - * channel. - * - * @discussion - * The QOS class of workitems submitted to this object asynchronously will be - * elevated to at least the specified QOS class floor. - * Unlike dispatch_set_qos_class(), the QOS of the workitem will be used if - * higher than the floor even when the workitem has been created without - * "ENFORCE" semantics. - * - * Setting the QOS class floor is equivalent to the QOS effects of configuring - * a target queue whose QOS class has been set with dispatch_set_qos_class(). - * - * Calling this function will supersede any prior calls to - * dispatch_set_qos_class() or dispatch_set_qos_class_floor(). - * - * @param object - * A dispatch queue, workloop, source or mach channel to configure. - * The object must be inactive. - * - * Passing another object type or an object that has been activated is undefined - * and will cause the process to be terminated. - * - * @param qos_class - * A QOS class value: - * - QOS_CLASS_USER_INTERACTIVE - * - QOS_CLASS_USER_INITIATED - * - QOS_CLASS_DEFAULT - * - QOS_CLASS_UTILITY - * - QOS_CLASS_BACKGROUND - * Passing any other value is undefined. - * - * @param relative_priority - * A relative priority within the QOS class. This value is a negative - * offset from the maximum supported scheduler priority for the given class. - * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY - * is undefined. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NOTHROW -void -dispatch_set_qos_class_floor(dispatch_object_t object, - dispatch_qos_class_t qos_class, int relative_priority); - /*! * @function dispatch_set_qos_class_fallback * @@ -372,7 +323,8 @@ dispatch_queue_set_width(dispatch_queue_t dq, long width); * @result * The newly created dispatch pthread root queue. */ -API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE() +API_DEPRECATED_WITH_REPLACEMENT("dispatch_workloop_set_scheduler_priority", + macos(10.9, 10.16), ios(6.0, 14.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_global_t diff --git a/private/source_private.h b/private/source_private.h index 6396c113f..bd5e47ebc 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -190,9 +190,15 @@ enum { * * @constant DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE * Received network channel flow advisory. + * @constant DISPATCH_NW_CHANNEL_CHANNEL_EVENT + * Received network channel event. + * @constant DISPATCH_NW_CHANNEL_INTF_ADV_UPDATE + * Received network channel interface advisory. */ enum { DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE = 0x00000001, + DISPATCH_NW_CHANNEL_CHANNEL_EVENT = 0x00000002, + DISPATCH_NW_CHANNEL_INTF_ADV_UPDATE = 0x00000004, }; /*! @@ -326,6 +332,18 @@ enum { DISPATCH_MACH_SEND_POSSIBLE = 0x8, }; +/*! + * @enum dispatch_source_mach_recv_flags_t + * + * @constant DISPATCH_MACH_RECV_SYNC_PEEK + * The receive source will participate in synchronous IPC priority inversion + * avoidance when possible. + */ +enum { + DISPATCH_MACH_RECV_SYNC_PEEK DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) = + 0x00008000, +}; + /*! * @enum dispatch_source_proc_flags_t * diff --git a/private/workloop_private.h b/private/workloop_private.h index 806411109..c06b498db 100644 --- a/private/workloop_private.h +++ b/private/workloop_private.h @@ -42,108 +42,7 @@ DISPATCH_ASSUME_NONNULL_BEGIN __BEGIN_DECLS -/*! - * @typedef dispatch_workloop_t - * - * @abstract - * Dispatch workloops invoke workitems submitted to them in priority order. - * - * @discussion - * A dispatch workloop is a flavor of dispatch_queue_t that is a priority - * ordered queue (using the QOS class of the submitted workitems as the - * ordering). - * - * Between each workitem invocation, the workloop will evaluate whether higher - * priority workitems have since been submitted and execute these first. - * - * Serial queues targeting a workloop maintain FIFO execution of their - * workitems. However, the workloop may reorder workitems submitted to - * independent serial queues targeting it with respect to each other, - * based on their priorities. - * - * A dispatch workloop is a "subclass" of dispatch_queue_t which can be passed - * to all APIs accepting a dispatch queue, except for functions from the - * dispatch_sync() family. dispatch_async_and_wait() must be used for workloop - * objects. Functions from the dispatch_sync() family on queues targeting - * a workloop are still permitted but discouraged for performance reasons. - */ -#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) -typedef struct dispatch_workloop_s *dispatch_workloop_t; -#else -DISPATCH_DECL_SUBCLASS(dispatch_workloop, dispatch_queue); -#endif - -/*! - * @function dispatch_workloop_create - * - * @abstract - * Creates a new dispatch workloop to which workitems may be submitted. - * - * @param label - * A string label to attach to the workloop. - * - * @result - * The newly created dispatch workloop. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT -DISPATCH_NOTHROW -dispatch_workloop_t -dispatch_workloop_create(const char *_Nullable label); - -/*! - * @function dispatch_workloop_create_inactive - * - * @abstract - * Creates a new inactive dispatch workloop that can be setup and then - * activated. - * - * @discussion - * Creating an inactive workloop allows for it to receive further configuration - * before it is activated, and workitems can be submitted to it. - * - * Submitting workitems to an inactive workloop is undefined and will cause the - * process to be terminated. - * - * @param label - * A string label to attach to the workloop. - * - * @result - * The newly created dispatch workloop. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT -DISPATCH_NOTHROW -dispatch_workloop_t -dispatch_workloop_create_inactive(const char *_Nullable label); - -/*! - * @function dispatch_workloop_set_autorelease_frequency - * - * @abstract - * Sets the autorelease frequency of the workloop. - * - * @discussion - * See dispatch_queue_attr_make_with_autorelease_frequency(). - * The default policy for a workloop is - * DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM. - * - * @param workloop - * The dispatch workloop to modify. - * - * This workloop must be inactive, passing an activated object is undefined - * and will cause the process to be terminated. - * - * @param frequency - * The requested autorelease frequency. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t workloop, - dispatch_autorelease_frequency_t frequency); - -DISPATCH_ENUM(dispatch_workloop_param_flags, uint64_t, +DISPATCH_OPTIONS(dispatch_workloop_param_flags, uint64_t, DISPATCH_WORKLOOP_NONE DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = 0x0, DISPATCH_WORKLOOP_FIXED_PRIORITY DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = 0x1, ); @@ -280,158 +179,6 @@ DISPATCH_EXPORT DISPATCH_NOTHROW bool _dispatch_workloop_should_yield_4NW(void); -/*! - * @function dispatch_async_and_wait - * - * @abstract - * Submits a block for synchronous execution on a dispatch queue. - * - * @discussion - * Submits a workitem to a dispatch queue like dispatch_async(), however - * dispatch_async_and_wait() will not return until the workitem has finished. - * - * Like functions of the dispatch_sync family, dispatch_async_and_wait() is - * subject to dead-lock (See dispatch_sync() for details). - * - * However, dispatch_async_and_wait() differs from functions of the - * dispatch_sync family in two fundamental ways: how it respects queue - * attributes and how it chooses the execution context invoking the workitem. - * - * Differences with dispatch_sync() - * - * Work items submitted to a queue with dispatch_async_and_wait() observe all - * queue attributes of that queue when invoked (inluding autorelease frequency - * or QOS class). - * - * When the runtime has brought up a thread to invoke the asynchronous workitems - * already submitted to the specified queue, that servicing thread will also be - * used to execute synchronous work submitted to the queue with - * dispatch_async_and_wait(). - * - * However, if the runtime has not brought up a thread to service the specified - * queue (because it has no workitems enqueued, or only synchronous workitems), - * then dispatch_async_and_wait() will invoke the workitem on the calling thread, - * similar to the behaviour of functions in the dispatch_sync family. - * - * As an exception, if the queue the work is submitted to doesn't target - * a global concurrent queue (for example because it targets the main queue), - * then the workitem will never be invoked by the thread calling - * dispatch_async_and_wait(). - * - * In other words, dispatch_async_and_wait() is similar to submitting - * a dispatch_block_create()d workitem to a queue and then waiting on it, as - * shown in the code example below. However, dispatch_async_and_wait() is - * significantly more efficient when a new thread is not required to execute - * the workitem (as it will use the stack of the submitting thread instead of - * requiring heap allocations). - * - * - * dispatch_block_t b = dispatch_block_create(0, block); - * dispatch_async(queue, b); - * dispatch_block_wait(b, DISPATCH_TIME_FOREVER); - * Block_release(b); - * - * - * @param queue - * The target dispatch queue to which the block is submitted. - * The result of passing NULL in this parameter is undefined. - * - * @param block - * The block to be invoked on the target dispatch queue. - * The result of passing NULL in this parameter is undefined. - */ -#ifdef __BLOCKS__ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_async_and_wait(dispatch_queue_t queue, - DISPATCH_NOESCAPE dispatch_block_t block); -#endif - -/*! - * @function dispatch_async_and_wait_f - * - * @abstract - * Submits a function for synchronous execution on a dispatch queue. - * - * @discussion - * See dispatch_async_and_wait() for details. - * - * @param queue - * The target dispatch queue to which the function is submitted. - * The result of passing NULL in this parameter is undefined. - * - * @param context - * The application-defined context parameter to pass to the function. - * - * @param work - * The application-defined function to invoke on the target queue. The first - * parameter passed to this function is the context provided to - * dispatch_async_and_wait_f(). - * The result of passing NULL in this parameter is undefined. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW -void -dispatch_async_and_wait_f(dispatch_queue_t queue, - void *_Nullable context, dispatch_function_t work); - -/*! - * @function dispatch_barrier_async_and_wait - * - * @abstract - * Submits a block for synchronous execution on a dispatch queue. - * - * @discussion - * Submits a block to a dispatch queue like dispatch_async_and_wait(), but marks - * that block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT - * queues). - * - * @param queue - * The target dispatch queue to which the block is submitted. - * The result of passing NULL in this parameter is undefined. - * - * @param work - * The application-defined block to invoke on the target queue. - * The result of passing NULL in this parameter is undefined. - */ -#ifdef __BLOCKS__ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_barrier_async_and_wait(dispatch_queue_t queue, - DISPATCH_NOESCAPE dispatch_block_t block); -#endif - -/*! - * @function dispatch_barrier_async_and_wait_f - * - * @abstract - * Submits a function for synchronous execution on a dispatch queue. - * - * @discussion - * Submits a function to a dispatch queue like dispatch_async_and_wait_f(), but - * marks that function as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT - * queues). - * - * @param queue - * The target dispatch queue to which the function is submitted. - * The result of passing NULL in this parameter is undefined. - * - * @param context - * The application-defined context parameter to pass to the function. - * - * @param work - * The application-defined function to invoke on the target queue. The first - * parameter passed to this function is the context provided to - * dispatch_barrier_async_and_wait_f(). - * The result of passing NULL in this parameter is undefined. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW -void -dispatch_barrier_async_and_wait_f(dispatch_queue_t queue, - void *_Nullable context, dispatch_function_t work); __END_DECLS diff --git a/src/allocator_internal.h b/src/allocator_internal.h index 5f8c2f068..ead653595 100644 --- a/src/allocator_internal.h +++ b/src/allocator_internal.h @@ -278,7 +278,7 @@ struct dispatch_magazine_s { }; #if DISPATCH_DEBUG -#define DISPATCH_ALLOCATOR_SCRIBBLE ((uintptr_t)0xAFAFAFAFAFAFAFAF) +#define DISPATCH_ALLOCATOR_SCRIBBLE ((int)0xAFAFAFAF) #endif diff --git a/src/block.cpp b/src/block.cpp index 55f83c27d..3d7432529 100644 --- a/src/block.cpp +++ b/src/block.cpp @@ -91,7 +91,7 @@ struct dispatch_block_private_data_s { if (dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) return; if (dbpd_group) { if (!dbpd_performed) dispatch_group_leave(dbpd_group); - _os_object_release(dbpd_group->_as_os_obj); + _os_object_release_without_xref_dispose(dbpd_group->_as_os_obj); } if (dbpd_queue) { _os_object_release_internal_n(dbpd_queue->_as_os_obj, 2); diff --git a/src/data.m b/src/data.m index 2a95d28f2..e0185a0cf 100644 --- a/src/data.m +++ b/src/data.m @@ -150,11 +150,9 @@ - (void)_activate { @end +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(data_empty) - -// Force non-lazy class realization rdar://10640168 -+ (void)load { -} +OS_OBJECT_NONLAZY_CLASS_LOAD - (id)retain { return (id)self; diff --git a/src/event/event.c b/src/event/event.c index d3ad14d30..937ca6ca2 100644 --- a/src/event/event.c +++ b/src/event/event.c @@ -39,7 +39,7 @@ _dispatch_unote_create(dispatch_source_type_t dst, return DISPATCH_UNOTE_NULL; } - if (dst->dst_mask && !mask) { + if (dst->dst_mask && !dst->dst_allow_empty_mask && !mask) { return DISPATCH_UNOTE_NULL; } @@ -227,7 +227,6 @@ const dispatch_source_type_s _dispatch_source_type_data_add = { .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_data_create, .dst_merge_evt = NULL, @@ -239,7 +238,6 @@ const dispatch_source_type_s _dispatch_source_type_data_or = { .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_data_create, .dst_merge_evt = NULL, @@ -251,7 +249,6 @@ const dispatch_source_type_s _dispatch_source_type_data_replace = { .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_data_create, .dst_merge_evt = NULL, @@ -271,7 +268,6 @@ const dispatch_source_type_s _dispatch_source_type_read = { #endif // DISPATCH_EVENT_BACKEND_KEVENT .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -289,7 +285,6 @@ const dispatch_source_type_s _dispatch_source_type_write = { #endif // DISPATCH_EVENT_BACKEND_KEVENT .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -313,7 +308,6 @@ const dispatch_source_type_s _dispatch_source_type_signal = { .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_ADD_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_signal_create, .dst_merge_evt = _dispatch_source_merge_evt, @@ -990,7 +984,6 @@ const dispatch_source_type_s _dispatch_source_type_timer = { .dst_timer_flags = 0, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER, .dst_size = sizeof(struct dispatch_timer_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_timer_create, .dst_merge_evt = _dispatch_source_merge_evt, @@ -1004,6 +997,7 @@ const dispatch_source_type_s _dispatch_source_type_timer_with_clock = { .dst_timer_flags = 0, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER, .dst_size = sizeof(struct dispatch_timer_source_refs_s), + .dst_strict = true, .dst_create = _dispatch_source_timer_create, .dst_merge_evt = _dispatch_source_merge_evt, diff --git a/src/event/event_config.h b/src/event/event_config.h index 4f4b6e5a3..f221d0922 100644 --- a/src/event/event_config.h +++ b/src/event/event_config.h @@ -148,8 +148,10 @@ # endif // VQ_FREE_SPACE_CHANGE # if !defined(EVFILT_NW_CHANNEL) && defined(__APPLE__) -# define EVFILT_NW_CHANNEL (-16) -# define NOTE_FLOW_ADV_UPDATE 0x1 +# define EVFILT_NW_CHANNEL (-16) +# define NOTE_FLOW_ADV_UPDATE 0x1 +# define NOTE_CHANNEL_EVENT 0x2 +# define NOTE_IF_ADV_UPD 0x4 # endif #else // DISPATCH_EVENT_BACKEND_KEVENT # define EV_ADD 0x0001 @@ -232,6 +234,10 @@ typedef unsigned int mach_msg_priority_t; # define MACH_SEND_SYNC_OVERRIDE 0x00100000 # endif // MACH_SEND_SYNC_OVERRIDE +# ifndef MACH_MSG_STRICT_REPLY +# define MACH_MSG_STRICT_REPLY 0x00000200 +# endif + # ifndef MACH_RCV_SYNC_WAIT # define MACH_RCV_SYNC_WAIT 0x00004000 # endif // MACH_RCV_SYNC_WAIT diff --git a/src/event/event_internal.h b/src/event/event_internal.h index 249d89f63..d59b303c4 100644 --- a/src/event/event_internal.h +++ b/src/event/event_internal.h @@ -99,7 +99,7 @@ typedef struct dispatch_wlh_s *dispatch_wlh_t; // opaque handle #define DISPATCH_WLH_ANON ((dispatch_wlh_t)(void*)(~0x3ul)) #define DISPATCH_WLH_MANAGER ((dispatch_wlh_t)(void*)(~0x7ul)) -DISPATCH_ENUM(dispatch_unote_timer_flags, uint8_t, +DISPATCH_OPTIONS(dispatch_unote_timer_flags, uint8_t, /* DISPATCH_TIMER_STRICT 0x1 */ /* DISPATCH_TIMER_BACKGROUND = 0x2, */ DISPATCH_TIMER_CLOCK_UPTIME = DISPATCH_CLOCK_UPTIME << 2, @@ -344,6 +344,7 @@ typedef struct dispatch_source_type_s { dispatch_unote_action_t dst_action; uint8_t dst_per_trigger_qos : 1; uint8_t dst_strict : 1; + uint8_t dst_allow_empty_mask : 1; uint8_t dst_timer_flags; uint16_t dst_flags; #if DISPATCH_EVENT_BACKEND_KEVENT diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c index 7ab4bf586..c2cab0fc2 100644 --- a/src/event/event_kevent.c +++ b/src/event/event_kevent.c @@ -323,6 +323,17 @@ _dispatch_kevent_mach_msg_size(dispatch_kevent_t ke) return (mach_msg_size_t)ke->ext[1]; } +static inline bool +_dispatch_kevent_has_machmsg_rcv_error(dispatch_kevent_t ke) +{ +#define MACH_ERROR_RCV_SUB 0x4 + mach_error_t kr = (mach_error_t) ke->fflags; + return (err_get_system(kr) == err_mach_ipc) && + (err_get_sub(kr) == MACH_ERROR_RCV_SUB); +#undef MACH_ERROR_RCV_SUB +} + +static inline bool _dispatch_kevent_has_machmsg_rcv_error(dispatch_kevent_t ke); static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke); static inline void _dispatch_mach_host_calendar_change_register(void); @@ -560,7 +571,8 @@ _dispatch_kevent_drain(dispatch_kevent_t ke) } #if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT && _dispatch_kevent_mach_msg_size(ke)) { + if (ke->filter == EVFILT_MACHPORT && (_dispatch_kevent_mach_msg_size(ke) || + _dispatch_kevent_has_machmsg_rcv_error(ke))) { return _dispatch_kevent_mach_msg_drain(ke); } #endif @@ -777,8 +789,9 @@ _dispatch_kq_drain(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, #if DISPATCH_USE_KEVENT_QOS size_t size; if (poll_for_events) { - size = DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + - DISPATCH_MACH_TRAILER_SIZE; + dispatch_assert(DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + + DISPATCH_MACH_TRAILER_SIZE <= 32 << 10); + size = 32 << 10; // match WQ_KEVENT_DATA_SIZE buf = alloca(size); avail = &size; } @@ -946,7 +959,7 @@ void _dispatch_sync_ipc_handoff_begin(dispatch_wlh_t wlh, mach_port_t port, uint64_t _Atomic *addr) { -#ifdef NOTE_WL_SYNC_IPC +#if DISPATCH_USE_WL_SYNC_IPC_HANDOFF dispatch_kevent_s ke = { .ident = port, .filter = EVFILT_WORKLOOP, @@ -958,18 +971,18 @@ _dispatch_sync_ipc_handoff_begin(dispatch_wlh_t wlh, mach_port_t port, .ext[EV_EXTIDX_WL_VALUE] = (uintptr_t)wlh, }; int rc = _dispatch_kq_immediate_update(wlh, &ke); - if (unlikely(rc)) { + if (unlikely(rc && rc != ENOENT)) { DISPATCH_INTERNAL_CRASH(rc, "Unexpected error from kevent"); } #else (void)wlh; (void)port; (void)addr; -#endif +#endif // DISPATCH_USE_WL_SYNC_IPC_HANDOFF } void _dispatch_sync_ipc_handoff_end(dispatch_wlh_t wlh, mach_port_t port) { -#ifdef NOTE_WL_SYNC_IPC +#if DISPATCH_USE_WL_SYNC_IPC_HANDOFF dispatch_kevent_s ke = { .ident = port, .filter = EVFILT_WORKLOOP, @@ -980,7 +993,7 @@ _dispatch_sync_ipc_handoff_end(dispatch_wlh_t wlh, mach_port_t port) _dispatch_kq_deferred_update(wlh, &ke); #else (void)wlh; (void)port; -#endif // NOTE_WL_SYNC_IPC +#endif // DISPATCH_USE_WL_SYNC_IPC_HANDOFF } #endif @@ -1652,6 +1665,13 @@ _dispatch_kevent_workloop_poke_drain(dispatch_kevent_t ke) dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); dispatch_wlh_t wlh = (dispatch_wlh_t)ke->udata; +#if DISPATCH_USE_WL_SYNC_IPC_HANDOFF + if (ke->fflags & NOTE_WL_SYNC_IPC) { + dispatch_assert((ke->flags & EV_ERROR) && ke->data == ENOENT); + return _dispatch_kevent_wlh_debug("ignoring", ke); + } +#endif // DISPATCH_USE_WL_SYNC_IPC_HANDOFF + dispatch_assert(ke->fflags & NOTE_WL_THREAD_REQUEST); if (ke->flags & EV_ERROR) { uint64_t dq_state = ke->ext[EV_EXTIDX_WL_VALUE]; @@ -1852,7 +1872,6 @@ _dispatch_kevent_workloop_poke_self(dispatch_deferred_items_t ddi, // will continue to apply the overrides in question until we acknowledge // them, so there's no rush. // - ddi->ddi_wlh_needs_update = true; if (flags & DISPATCH_EVENT_LOOP_CONSUME_2) { _dispatch_release_no_dispose(dq); } else { @@ -1868,6 +1887,7 @@ _dispatch_kevent_workloop_poke_self(dispatch_deferred_items_t ddi, } dispatch_assert(!ddi->ddi_stashed_dou._dq); ddi->ddi_wlh_needs_delete = true; + ddi->ddi_wlh_needs_update = true; ddi->ddi_stashed_rq = upcast(dq->do_targetq)._dgq; ddi->ddi_stashed_dou._dq = dq; ddi->ddi_stashed_qos = _dq_state_max_qos(dq_state); @@ -2161,7 +2181,7 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) int i, n = 0; dq_state = os_atomic_load2o((dispatch_queue_t)wlh, dq_state, relaxed); - if (dsc->dsc_wlh_was_first && !_dq_state_drain_locked(dq_state) && + if (!_dq_state_drain_locked(dq_state) && _dq_state_is_enqueued_on_target(dq_state)) { // // @@ -2182,6 +2202,13 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) // lower priority thread, so we need to drive it once to avoid priority // inversions. // + // + // + // Also, it is possible that a low priority async is ahead of us, + // and hasn't made its thread request yet. If this waiter is high + // priority this is a priority inversion, and we need to redrive the + // async. + // _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC, wlh, dq_state); } @@ -2417,7 +2444,6 @@ const dispatch_source_type_s _dispatch_source_type_proc = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_proc_create, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2438,7 +2464,6 @@ const dispatch_source_type_s _dispatch_source_type_vnode = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2471,7 +2496,6 @@ const dispatch_source_type_s _dispatch_source_type_vfs = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_without_handle, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2497,7 +2521,6 @@ const dispatch_source_type_s _dispatch_source_type_sock = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2509,10 +2532,9 @@ const dispatch_source_type_s _dispatch_source_type_nw_channel = { .dst_kind = "nw_channel", .dst_filter = EVFILT_NW_CHANNEL, .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED, - .dst_mask = NOTE_FLOW_ADV_UPDATE, + .dst_mask = NOTE_FLOW_ADV_UPDATE|NOTE_CHANNEL_EVENT|NOTE_IF_ADV_UPD, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2630,7 +2652,6 @@ const dispatch_source_type_s _dispatch_source_type_memorypressure = { |NOTE_MEMORYSTATUS_MSL_STATUS, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, #if TARGET_OS_SIMULATOR .dst_create = _dispatch_source_memorypressure_create, @@ -2661,7 +2682,6 @@ const dispatch_source_type_s _dispatch_source_type_vm = { .dst_mask = NOTE_VM_PRESSURE, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_vm_create, // redirected to _dispatch_source_type_memorypressure @@ -2712,6 +2732,18 @@ _dispatch_mach_msg_get_audit_trailer(mach_msg_header_t *hdr) return audit_tlr; } +bool +_dispatch_mach_msg_sender_is_kernel(mach_msg_header_t *hdr) +{ + mach_msg_audit_trailer_t *tlr; + tlr = _dispatch_mach_msg_get_audit_trailer(hdr); + if (!tlr) { + DISPATCH_INTERNAL_CRASH(0, "message received without expected trailer"); + } + + return tlr->msgh_audit.val[DISPATCH_MACH_AUDIT_TOKEN_PID] == 0; +} + DISPATCH_NOINLINE static void _dispatch_mach_notification_merge_msg(dispatch_unote_t du, uint32_t flags, @@ -2720,18 +2752,12 @@ _dispatch_mach_notification_merge_msg(dispatch_unote_t du, uint32_t flags, pthread_priority_t ovr_pp DISPATCH_UNUSED) { mig_reply_error_t reply; - mach_msg_audit_trailer_t *tlr = NULL; dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); dispatch_assert(sizeof(mig_reply_error_t) < DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE); - tlr = _dispatch_mach_msg_get_audit_trailer(hdr); - if (!tlr) { - DISPATCH_INTERNAL_CRASH(0, "message received without expected trailer"); - } if (hdr->msgh_id <= MACH_NOTIFY_LAST && - dispatch_assume_zero(tlr->msgh_audit.val[ - DISPATCH_MACH_AUDIT_TOKEN_PID])) { + !dispatch_assume(_dispatch_mach_msg_sender_is_kernel(hdr))) { mach_msg_destroy(hdr); goto out; } @@ -3075,7 +3101,6 @@ const dispatch_source_type_s _dispatch_source_type_mach_send = { .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_mach_send_create, .dst_update_mux = _dispatch_mach_send_update, @@ -3103,7 +3128,6 @@ const dispatch_source_type_s _dispatch_mach_type_send = { .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_send_refs_s), - .dst_strict = false, .dst_create = _dispatch_mach_send_create, .dst_update_mux = _dispatch_mach_send_update, @@ -3142,20 +3166,27 @@ static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) { mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke); + mach_msg_size_t siz = _dispatch_kevent_mach_msg_size(ke); dispatch_unote_t du = _dispatch_kevent_get_unote(ke); pthread_priority_t msg_pp = (pthread_priority_t)(ke->ext[2] >> 32); pthread_priority_t ovr_pp = (pthread_priority_t)ke->qos; uint32_t flags = ke->flags; - mach_msg_size_t siz; mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; - if (unlikely(!hdr)) { - DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); - } - if (likely(!kr)) { - return _dispatch_kevent_mach_msg_recv(du, flags, hdr, msg_pp, ovr_pp); - } - if (kr != MACH_RCV_TOO_LARGE) { + if (unlikely(kr == MACH_RCV_TOO_LARGE)) { + if (unlikely(!siz)) { + DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message size"); + } + } else if (unlikely(kr == MACH_RCV_INVALID_DATA)) { + dispatch_assert(siz == 0); + DISPATCH_CLIENT_CRASH(kr, "Unable to copyout msg, possible port leak"); + } else { + if (unlikely(!hdr)) { + DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); + } + if (likely(!kr)) { + return _dispatch_kevent_mach_msg_recv(du, flags, hdr, msg_pp, ovr_pp); + } goto out; } @@ -3166,9 +3197,14 @@ _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) DISPATCH_INTERNAL_CRASH(ke->ext[1], "EVFILT_MACHPORT with overlarge message"); } + + mach_msg_options_t extra_options = 0; + if (du._du->du_fflags & MACH_MSG_STRICT_REPLY) { + extra_options |= MACH_MSG_STRICT_REPLY; + } const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | - MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); - siz = _dispatch_kevent_mach_msg_size(ke) + DISPATCH_MACH_TRAILER_SIZE; + MACH_RCV_TIMEOUT | extra_options) & ~MACH_RCV_LARGE); + siz += DISPATCH_MACH_TRAILER_SIZE; hdr = malloc(siz); // mach_msg will return TOO_LARGE if hdr/siz is NULL/0 kr = mach_msg(hdr, options, 0, dispatch_assume(hdr) ? siz : 0, (mach_port_name_t)ke->data, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); @@ -3198,15 +3234,20 @@ const dispatch_source_type_s _dispatch_source_type_mach_recv = { .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, .dst_fflags = 0, + .dst_mask = 0 +#ifdef MACH_RCV_SYNC_PEEK + | MACH_RCV_SYNC_PEEK +#endif + , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_source_merge_evt, .dst_merge_msg = NULL, // never receives messages directly .dst_per_trigger_qos = true, + .dst_allow_empty_mask = true, }; static void @@ -3220,10 +3261,9 @@ const dispatch_source_type_s _dispatch_mach_type_notification = { .dst_kind = "mach_notification", .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, - .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS & ~MACH_RCV_VOUCHER, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_unote_class_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_mach_notification_event, @@ -3237,7 +3277,7 @@ _dispatch_mach_recv_direct_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data, pthread_priority_t pp) { if (flags & EV_VANISHED) { - DISPATCH_CLIENT_CRASH(du._du->du_ident, + DISPATCH_CLIENT_CRASH(0, "Unexpected EV_VANISHED (do not destroy random mach ports)"); } return _dispatch_source_merge_evt(du, flags, data, pp); @@ -3250,7 +3290,6 @@ const dispatch_source_type_s _dispatch_mach_type_recv = { .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_recv_refs_s), - .dst_strict = false, // without handle because the mach code will set the ident after connect .dst_create = _dispatch_unote_create_without_handle, @@ -3262,21 +3301,24 @@ const dispatch_source_type_s _dispatch_mach_type_recv = { DISPATCH_NORETURN static void -_dispatch_mach_reply_merge_evt(dispatch_unote_t du, - uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED, +_dispatch_mach_reply_merge_evt(dispatch_unote_t du DISPATCH_UNUSED, + uint32_t flags, uintptr_t data DISPATCH_UNUSED, pthread_priority_t pp DISPATCH_UNUSED) { - DISPATCH_INTERNAL_CRASH(du._du->du_ident, "Unexpected event"); + if (flags & EV_VANISHED) { + DISPATCH_CLIENT_CRASH(0, + "Unexpected EV_VANISHED (do not destroy random mach ports)"); + } + DISPATCH_INTERNAL_CRASH(flags, "Unexpected event"); } const dispatch_source_type_s _dispatch_mach_type_reply = { .dst_kind = "mach reply", .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_ONESHOT|EV_VANISHED, - .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS & ~MACH_RCV_VOUCHER, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_reply_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_mach_reply_merge_evt, @@ -3292,7 +3334,6 @@ const dispatch_source_type_s _dispatch_xpc_type_sigterm = { .dst_fflags = 0, .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_xpc_term_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_xpc_sigterm_merge_evt, diff --git a/src/firehose/firehose.defs b/src/firehose/firehose.defs index 83d46ef03..0f62d3adb 100644 --- a/src/firehose/firehose.defs +++ b/src/firehose/firehose.defs @@ -28,6 +28,7 @@ serverprefix firehose_server_; userprefix firehose_send_; UseSpecialReplyPort 1; +ConsumeOnSendError Timeout; simpleroutine register( server_port : mach_port_t; diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c index 4631755d1..f6331a32f 100644 --- a/src/firehose/firehose_buffer.c +++ b/src/firehose/firehose_buffer.c @@ -18,7 +18,19 @@ * @APPLE_APACHE_LICENSE_HEADER_END@ */ +#include #include // VM_MEMORY_GENEALOGY + +#ifndef __LP64__ +// libdispatch has too many Double-Wide loads for this to be practical +// so just rename everything to the wide variants +#undef os_atomic_load +#define os_atomic_load os_atomic_load_wide + +#undef os_atomic_store +#define os_atomic_store os_atomic_store_wide +#endif + #ifdef KERNEL #define OS_VOUCHER_ACTIVITY_SPI_TYPES 1 @@ -26,8 +38,12 @@ #define __OS_EXPOSE_INTERNALS_INDIRECT__ 1 #define DISPATCH_PURE_C 1 +#ifndef os_likely #define os_likely(x) __builtin_expect(!!(x), 1) +#endif +#ifndef os_unlikely #define os_unlikely(x) __builtin_expect(!!(x), 0) +#endif #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) @@ -74,7 +90,6 @@ static void _dispatch_firehose_gate_wait(dispatch_gate_t l, uint32_t flags); #include #include #include -#include // os/internal/atomic.h #include // #include // #include // @@ -295,7 +310,7 @@ firehose_buffer_update_limits_unlocked(firehose_buffer_t fb) if (old.fbs_atomic_state == new.fbs_atomic_state) { return; } - os_atomic_add2o(&fb->fb_header, fbh_bank.fbb_state.fbs_atomic_state, + os_atomic_add(&fb->fb_header.fbh_bank.fbb_state.fbs_atomic_state, new.fbs_atomic_state - old.fbs_atomic_state, relaxed); } #endif // !KERNEL @@ -511,11 +526,11 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, #endif } - if (firehose_atomic_maxv2o(fbh, fbh_bank.fbb_mem_flushed, + if (firehose_atomic_maxv(&fbh->fbh_bank.fbb_mem_flushed, reply.fpr_mem_flushed_pos, &old_flushed_pos, relaxed)) { mem_delta = (uint16_t)(reply.fpr_mem_flushed_pos - old_flushed_pos); } - if (firehose_atomic_maxv2o(fbh, fbh_bank.fbb_io_flushed, + if (firehose_atomic_maxv(&fbh->fbh_bank.fbb_io_flushed, reply.fpr_io_flushed_pos, &old_flushed_pos, relaxed)) { io_delta = (uint16_t)(reply.fpr_io_flushed_pos - old_flushed_pos); } @@ -527,14 +542,14 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, if (!mem_delta && !io_delta) { if (state_out) { - state_out->fbs_atomic_state = os_atomic_load2o(fbh, - fbh_bank.fbb_state.fbs_atomic_state, relaxed); + state_out->fbs_atomic_state = os_atomic_load( + &fbh->fbh_bank.fbb_state.fbs_atomic_state, relaxed); } return; } __firehose_critical_region_enter(); - os_atomic_rmw_loop2o(fbh, fbh_ring_tail.frp_atomic_tail, + os_atomic_rmw_loop(&fbh->fbh_ring_tail.frp_atomic_tail, otail.frp_atomic_tail, ntail.frp_atomic_tail, relaxed, { ntail = otail; // overflow handles the generation wraps @@ -544,18 +559,18 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) | ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1)); - state.fbs_atomic_state = os_atomic_add2o(fbh, - fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release); + state.fbs_atomic_state = os_atomic_add( + &fbh->fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release); __firehose_critical_region_leave(); if (state_out) *state_out = state; if (async_notif) { if (io_delta) { - os_atomic_inc2o(fbh, fbh_bank.fbb_io_notifs, relaxed); + os_atomic_add(&fbh->fbh_bank.fbb_io_notifs, 1u, relaxed); } if (mem_delta) { - os_atomic_inc2o(fbh, fbh_bank.fbb_mem_notifs, relaxed); + os_atomic_add(&fbh->fbh_bank.fbb_mem_notifs, 1u, relaxed); } } } @@ -676,8 +691,8 @@ firehose_client_send_push_and_wait(firehose_buffer_t fb, bool for_io, } if (state_out) { - state_out->fbs_atomic_state = os_atomic_load2o(&fb->fb_header, - fbh_bank.fbb_state.fbs_atomic_state, relaxed); + state_out->fbs_atomic_state = os_atomic_load( + &fb->fb_header.fbh_bank.fbb_state.fbs_atomic_state, relaxed); } return; @@ -689,9 +704,9 @@ firehose_client_send_push_and_wait(firehose_buffer_t fb, bool for_io, } if (for_io) { - os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_sync_pushes, relaxed); + os_atomic_inc(&fb->fb_header.fbh_bank.fbb_io_sync_pushes, relaxed); } else { - os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_mem_sync_pushes, relaxed); + os_atomic_inc(&fb->fb_header.fbh_bank.fbb_mem_sync_pushes, relaxed); } // TODO // @@ -808,7 +823,7 @@ firehose_buffer_chunk_init(firehose_chunk_t fc, stamp_and_len = stamp - fc->fc_timestamp; stamp_and_len |= (uint64_t)flp_size << 48; - os_atomic_store2o(*lft, ft_stamp_and_length, stamp_and_len, relaxed); + os_atomic_store(&(*lft)->ft_stamp_and_length, stamp_and_len, relaxed); (*lft)->ft_thread = thread; // not really meaningful @@ -828,7 +843,7 @@ firehose_buffer_chunk_init(firehose_chunk_t fc, // write the length before making the chunk visible stamp_and_len = ask->stamp - fc->fc_timestamp; stamp_and_len |= (uint64_t)ask->pubsize << 48; - os_atomic_store2o(ft, ft_stamp_and_length, stamp_and_len, relaxed); + os_atomic_store(&ft->ft_stamp_and_length, stamp_and_len, relaxed); ft->ft_thread = thread; @@ -863,7 +878,7 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, bool installed = false; firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref); - if (fc->fc_pos.fcp_atomic_pos) { + if (os_atomic_load(&fc->fc_pos.fcp_atomic_pos, relaxed)) { // Needed for process death handling (recycle-reuse): // No atomic fences required, we merely want to make sure the // observers will see memory effects in program (asm) order. @@ -880,7 +895,7 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, dispatch_compiler_barrier(); if (ask->stream == firehose_stream_metadata) { - os_atomic_or2o(fbh, fbh_bank.fbb_metadata_bitmap, 1ULL << ref, + os_atomic_or(&fbh->fbh_bank.fbb_metadata_bitmap, 1ULL << ref, relaxed); } @@ -898,13 +913,13 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, // event needs to be placed at the beginning of the chunk in addition to // the first actual tracepoint. state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); if (likely(!state.fss_loss)) { ft = firehose_buffer_chunk_init(fc, ask, privptr, thread, NULL, 0); // release to publish the chunk init - installed = os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + installed = os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, release, { if (state.fss_loss) { os_atomic_rmw_loop_give_up(break); @@ -921,14 +936,14 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, uint64_t loss_start, loss_end; // ensure we can see the start stamp - (void)os_atomic_load2o(fbs, fbs_state.fss_atomic_state, acquire); + (void)os_atomic_load(&fbs->fbs_state.fss_atomic_state, acquire); loss_start = fbs->fbs_loss_start; fbs->fbs_loss_start = 0; // reset under fss_gate loss_end = mach_continuous_time(); ft = firehose_buffer_chunk_init(fc, ask, privptr, thread, &lft, loss_start); - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, release, { // no giving up this time! new_state = (firehose_stream_state_u){ @@ -952,19 +967,19 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, } }; // publish the contents of the loss tracepoint - os_atomic_store2o(lft, ft_id.ftid_atomic_value, ftid.ftid_value, + os_atomic_store(&lft->ft_id.ftid_atomic_value, ftid.ftid_value, release); } } else { // the allocator gave up - just clear the allocator and waiter bits and // increment the loss count state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); if (!state.fss_timestamped) { fbs->fbs_loss_start = mach_continuous_time(); // release to publish the timestamp - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, release, { new_state = (firehose_stream_state_u){ @@ -975,7 +990,7 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, }; }); } else { - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, relaxed, { new_state = (firehose_stream_state_u){ @@ -1004,9 +1019,9 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, firehose_buffer_update_limits(fb); } - if (unlikely(os_atomic_load2o(fbh, fbh_quarantined_state, relaxed) == + if (unlikely(os_atomic_load(&fbh->fbh_quarantined_state, relaxed) == FBH_QUARANTINE_PENDING)) { - if (os_atomic_cmpxchg2o(fbh, fbh_quarantined_state, + if (os_atomic_cmpxchg(&fbh->fbh_quarantined_state, FBH_QUARANTINE_PENDING, FBH_QUARANTINE_STARTED, relaxed)) { firehose_client_start_quarantine(fb); } @@ -1190,7 +1205,7 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) firehose_chunk_t fc; bool for_io; - os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, + os_atomic_rmw_loop(&fb->fb_header.fbh_ring_tail.frp_atomic_tail, old.frp_atomic_tail, pos.frp_atomic_tail, relaxed, { pos = old; if (likely(old.frp_mem_tail != old.frp_mem_flushed)) { @@ -1228,13 +1243,13 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) fc = firehose_buffer_ref_to_chunk(fb, ref); if (!for_io && fc->fc_pos.fcp_stream == firehose_stream_metadata) { - os_atomic_and2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, + os_atomic_and(&fb->fb_header.fbh_bank.fbb_metadata_bitmap, ~(1ULL << ref), relaxed); } - os_atomic_store2o(fc, fc_pos.fcp_atomic_pos, + os_atomic_store(&fc->fc_pos.fcp_atomic_pos, FIREHOSE_CHUNK_POS_FULL_BIT, relaxed); dispatch_compiler_barrier(); - os_atomic_store(&fbh_ring[tail], gen | 0, relaxed); + os_atomic_store(&fbh_ring[tail], gen, relaxed); return ref; } @@ -1256,7 +1271,7 @@ firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(firehose_buffer_t f // first wait for our bank to have space, if needed if (unlikely(!ask->is_bank_ok)) { state.fbs_atomic_state = - os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); + os_atomic_load(&fbb->fbb_state.fbs_atomic_state, relaxed); while (!firehose_buffer_bank_try_reserve_slot(fb, for_io, &state)) { if (ask->quarantined) { __FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(fb, for_io, @@ -1334,7 +1349,7 @@ firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, #endif // KERNEL state.fbs_atomic_state = - os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); + os_atomic_load(&fbb->fbb_state.fbs_atomic_state, relaxed); reserved = firehose_buffer_bank_try_reserve_slot(fb, for_io, &state); #ifndef KERNEL diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h index a2c80c2b7..fd05801a1 100644 --- a/src/firehose/firehose_inline_internal.h +++ b/src/firehose/firehose_inline_internal.h @@ -26,14 +26,14 @@ __typeof__(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) #endif -#define firehose_atomic_maxv2o(p, f, v, o, m) \ - os_atomic_rmw_loop2o(p, f, *(o), (v), m, { \ +#define firehose_atomic_maxv(p, v, o, m) \ + os_atomic_rmw_loop(p, *(o), (v), m, { \ if (*(o) >= (v)) os_atomic_rmw_loop_give_up(break); \ }) -#define firehose_atomic_max2o(p, f, v, m) ({ \ - _os_atomic_basetypeof(&(p)->f) _old; \ - firehose_atomic_maxv2o(p, f, v, &_old, m); \ +#define firehose_atomic_max(p, v, m) ({ \ + _os_atomic_basetypeof(p) _old; \ + firehose_atomic_maxv(p, v, &_old, m); \ }) #ifndef KERNEL @@ -134,6 +134,7 @@ firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz, } if (unlikely(rc != KERN_SUCCESS && rc != MIG_NO_REPLY)) { // destroy the request - but not the reply port + // (MIG moved it into the msg_reply). hdr->msgh_remote_port = 0; mach_msg_destroy(hdr); } @@ -168,6 +169,7 @@ firehose_buffer_ref_to_chunk(firehose_buffer_t fb, firehose_chunk_ref_t ref) #ifndef FIREHOSE_SERVER #if DISPATCH_PURE_C +#if OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY OS_ALWAYS_INLINE static inline void firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) @@ -180,7 +182,7 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) long result; old_state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); ref = old_state.fss_current; if (!ref || ref == FIREHOSE_STREAM_STATE_PRISTINE) { // there is no installed page, nothing to flush, go away @@ -206,11 +208,11 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) // allocators know how to handle in the first place new_state = old_state; new_state.fss_current = 0; - (void)os_atomic_cmpxchg2o(fbs, fbs_state.fss_atomic_state, + (void)os_atomic_cmpxchg(&fbs->fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, relaxed); } -/** +/*! * @function firehose_buffer_tracepoint_reserve * * @abstract @@ -262,9 +264,9 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, long result; firehose_chunk_ref_t ref; - // cannot use os_atomic_rmw_loop2o, _page_try_reserve does a store + // cannot use os_atomic_rmw_loop, _page_try_reserve does a store old_state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); for (;;) { new_state = old_state; @@ -297,7 +299,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, new_state.fss_loss = MIN(old_state.fss_loss + 1, FIREHOSE_LOSS_COUNT_MAX); - success = os_atomic_cmpxchgv2o(fbs, fbs_state.fss_atomic_state, + success = os_atomic_cmpxchgv(&fbs->fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, &old_state.fss_atomic_state, relaxed); if (success) { @@ -318,7 +320,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, waited = true; old_state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); #else if (likely(reliable)) { new_state.fss_allocator |= FIREHOSE_GATE_RELIABLE_WAITERS_BIT; @@ -328,8 +330,8 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, bool already_equal = (new_state.fss_atomic_state == old_state.fss_atomic_state); - success = already_equal || os_atomic_cmpxchgv2o(fbs, - fbs_state.fss_atomic_state, old_state.fss_atomic_state, + success = already_equal || os_atomic_cmpxchgv( + &fbs->fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, &old_state.fss_atomic_state, relaxed); if (success) { @@ -341,8 +343,8 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, DLOCK_LOCK_DATA_CONTENTION); waited = true; - old_state.fss_atomic_state = os_atomic_load2o(fbs, - fbs_state.fss_atomic_state, relaxed); + old_state.fss_atomic_state = os_atomic_load( + &fbs->fbs_state.fss_atomic_state, relaxed); } #endif continue; @@ -354,11 +356,11 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, // firehose_buffer_stream_chunk_install()) __firehose_critical_region_enter(); #if KERNEL - new_state.fss_allocator = (uint32_t)cpu_number(); + new_state.fss_allocator = 1; #else new_state.fss_allocator = _dispatch_lock_value_for_self(); #endif - success = os_atomic_cmpxchgv2o(fbs, fbs_state.fss_atomic_state, + success = os_atomic_cmpxchgv(&fbs->fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, &old_state.fss_atomic_state, relaxed); if (likely(success)) { @@ -388,7 +390,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, return firehose_buffer_tracepoint_reserve_slow(fb, &ask, privptr); } -/** +/*! * @function firehose_buffer_tracepoint_flush * * @abstract @@ -440,7 +442,7 @@ firehose_buffer_bank_try_reserve_slot(firehose_buffer_t fb, bool for_io, new_state = old_state; new_state.fbs_banks[for_io]--; - success = os_atomic_cmpxchgvw(&fbb->fbb_state.fbs_atomic_state, + success = os_atomic_cmpxchgv(&fbb->fbb_state.fbs_atomic_state, old_state.fbs_atomic_state, new_state.fbs_atomic_state, &old_state.fbs_atomic_state, acquire); } while (unlikely(!success)); @@ -448,6 +450,7 @@ firehose_buffer_bank_try_reserve_slot(firehose_buffer_t fb, bool for_io, *state_in_out = new_state; return true; } +#endif // OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY #ifndef KERNEL OS_ALWAYS_INLINE @@ -459,12 +462,12 @@ firehose_buffer_stream_signal_waiting_for_logd(firehose_buffer_t fb, firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); if (!state.fss_timestamped) { fbs->fbs_loss_start = mach_continuous_time(); // release to publish the timestamp - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, release, { new_state = (firehose_stream_state_u){ @@ -477,7 +480,7 @@ firehose_buffer_stream_signal_waiting_for_logd(firehose_buffer_t fb, }; }); } else { - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, relaxed, { new_state = (firehose_stream_state_u){ @@ -506,7 +509,7 @@ firehose_buffer_clear_bank_flags(firehose_buffer_t fb, unsigned long bits) firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; unsigned long orig_flags; - orig_flags = os_atomic_and_orig2o(fbb, fbb_flags, ~bits, relaxed); + orig_flags = os_atomic_and_orig(&fbb->fbb_flags, ~bits, relaxed); if (orig_flags != (orig_flags & ~bits)) { firehose_buffer_update_limits(fb); } @@ -519,7 +522,7 @@ firehose_buffer_set_bank_flags(firehose_buffer_t fb, unsigned long bits) firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; unsigned long orig_flags; - orig_flags = os_atomic_or_orig2o(fbb, fbb_flags, bits, relaxed); + orig_flags = os_atomic_or_orig(&fbb->fbb_flags, bits, relaxed); if (orig_flags != (orig_flags | bits)) { firehose_buffer_update_limits(fb); } @@ -530,7 +533,7 @@ static inline void firehose_buffer_bank_relinquish_slot(firehose_buffer_t fb, bool for_io) { firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; - os_atomic_add2o(fbb, fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), + os_atomic_add(&fbb->fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), relaxed); } #endif // !KERNEL diff --git a/src/firehose/firehose_reply.defs b/src/firehose/firehose_reply.defs index caef7b43e..b5737c030 100644 --- a/src/firehose/firehose_reply.defs +++ b/src/firehose/firehose_reply.defs @@ -27,6 +27,7 @@ subsystem firehoseReply 11700; serverprefix firehose_client_; userprefix firehose_send_; +ConsumeOnSendError Timeout; skip; // firehose_register diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c index a674c8fc8..64cd2feae 100644 --- a/src/firehose/firehose_server.c +++ b/src/firehose/firehose_server.c @@ -41,6 +41,7 @@ static struct firehose_server_s { dispatch_mach_t fs_mach_channel; dispatch_queue_t fs_snapshot_gate_queue; dispatch_queue_t fs_io_drain_queue; + dispatch_workloop_t fs_io_wl; dispatch_queue_t fs_mem_drain_queue; firehose_handler_t fs_handler; @@ -212,9 +213,9 @@ firehose_client_notify(firehose_client_t fc, mach_port_t reply_port) }; kern_return_t kr; - firehose_atomic_max2o(fc, fc_mem_sent_flushed_pos, + firehose_atomic_max(&fc->fc_mem_sent_flushed_pos, push_reply.fpr_mem_flushed_pos, relaxed); - firehose_atomic_max2o(fc, fc_io_sent_flushed_pos, + firehose_atomic_max(&fc->fc_io_sent_flushed_pos, push_reply.fpr_io_flushed_pos, relaxed); if (!fc->fc_pid) { @@ -291,6 +292,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags firehose_buffer_t fb = fc->fc_buffer; firehose_chunk_t fbc; firehose_event_t evt; + firehose_snapshot_event_t sevt; uint16_t volatile *fbh_ring; uint16_t flushed, count = 0; firehose_chunk_ref_t ref; @@ -300,6 +302,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags if (for_io) { evt = FIREHOSE_EVENT_IO_BUFFER_RECEIVED; + sevt = FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER; _Static_assert(FIREHOSE_EVENT_IO_BUFFER_RECEIVED == FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, ""); fbh_ring = fb->fb_header.fbh_io_ring; @@ -308,6 +311,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags if (fc->fc_needs_io_snapshot) snapshot = server_config.fs_snapshot; } else { evt = FIREHOSE_EVENT_MEM_BUFFER_RECEIVED; + sevt = FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER; _Static_assert(FIREHOSE_EVENT_MEM_BUFFER_RECEIVED == FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, ""); fbh_ring = fb->fb_header.fbh_mem_ring; @@ -353,7 +357,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags } server_config.fs_handler(fc, evt, fbc, fc_pos); if (unlikely(snapshot)) { - snapshot->handler(fc, evt, fbc, fc_pos); + snapshot->handler(fc, sevt, fbc, fc_pos); } if (fc_pos.fcp_stream == firehose_stream_metadata) { os_unfair_lock_unlock(&fc->fc_lock); @@ -491,7 +495,7 @@ firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED) } fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS; fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS; - _os_object_release(&fc->fc_as_os_object); + _os_object_release_without_xref_dispose(&fc->fc_as_os_object); } OS_NOINLINE @@ -614,15 +618,15 @@ firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, break; } - msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); - if (msg_hdr->msgh_id == MACH_NOTIFY_NO_SENDERS) { - _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)", - firehose_client_get_unique_pid(fc, NULL)); - for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { - dispatch_mach_cancel(fc->fc_mach_channel[i]); - } + mach_msg_destroy(dispatch_mach_msg_get_msg(dmsg, NULL)); + break; + + case DISPATCH_MACH_NO_SENDERS: + _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)", + firehose_client_get_unique_pid(fc, NULL)); + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + dispatch_mach_cancel(fc->fc_mach_channel[i]); } - mach_msg_destroy(msg_hdr); break; case DISPATCH_MACH_DISCONNECTED: @@ -829,13 +833,6 @@ _firehose_client_dispose(firehose_client_t fc) (firehose_chunk_pos_u){ .fcp_pos = 0 }); } -void -_firehose_client_xref_dispose(firehose_client_t fc) -{ - _dispatch_debug("Cleaning up client info for unique_pid 0x%llx", - firehose_client_get_unique_pid(fc, NULL)); -} - uint64_t firehose_client_get_unique_pid(firehose_client_t fc, pid_t *pid_out) { @@ -911,7 +908,8 @@ firehose_server_init(mach_port_t comm_port, firehose_handler_t handler) { struct firehose_server_s *fs = &server_config; dispatch_queue_attr_t attr = DISPATCH_QUEUE_SERIAL_WITH_AUTORELEASE_POOL; - dispatch_queue_attr_t attr_inactive, attr_utility_inactive; + dispatch_queue_attr_t attr_inactive = + dispatch_queue_attr_make_initially_inactive(attr); dispatch_mach_t dm; dispatch_source_t ds; @@ -921,14 +919,12 @@ firehose_server_init(mach_port_t comm_port, firehose_handler_t handler) fs->fs_snapshot_gate_queue = dispatch_queue_create_with_target( "com.apple.firehose.snapshot-gate", attr, NULL); - attr_inactive = dispatch_queue_attr_make_initially_inactive(attr); - attr_utility_inactive = dispatch_queue_attr_make_with_qos_class( - attr_inactive, QOS_CLASS_UTILITY, 0); + fs->fs_io_wl = dispatch_workloop_create_inactive("com.apple.firehose.io-wl"); + dispatch_set_qos_class_fallback(fs->fs_io_wl, QOS_CLASS_UTILITY); + dispatch_activate(fs->fs_io_wl); fs->fs_io_drain_queue = dispatch_queue_create_with_target( - "com.apple.firehose.drain-io", attr_utility_inactive, NULL); - dispatch_set_qos_class_fallback(fs->fs_io_drain_queue, QOS_CLASS_UTILITY); - dispatch_activate(fs->fs_io_drain_queue); + "com.apple.firehose.drain-io", attr, (dispatch_queue_t)fs->fs_io_wl); fs->fs_mem_drain_queue = dispatch_queue_create_with_target( "com.apple.firehose.drain-mem", attr_inactive, NULL); @@ -1058,6 +1054,9 @@ firehose_server_copy_queue(firehose_server_queue_t which) case FIREHOSE_SERVER_QUEUE_MEMORY: dq = server_config.fs_mem_drain_queue; break; + case FIREHOSE_SERVER_QUEUE_IO_WL: + dq = (dispatch_queue_t)server_config.fs_io_wl; + break; default: DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type"); } @@ -1337,20 +1336,6 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, return KERN_INVALID_VALUE; } - /* - * Request a MACH_NOTIFY_NO_SENDERS notification for the mem_recvp. That - * should indicate the client going away. - */ - mach_port_t previous = MACH_PORT_NULL; - kr = mach_port_request_notification(mach_task_self(), comm_mem_recvp, - MACH_NOTIFY_NO_SENDERS, 0, comm_mem_recvp, - MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) { - return KERN_FAILURE; - } - dispatch_assert(previous == MACH_PORT_NULL); - /* Map the memory handle into the server address space */ kr = mach_vm_map(mach_task_self(), &base_addr, mem_size, 0, VM_FLAGS_ANYWHERE, mem_port, 0, FALSE, @@ -1381,6 +1366,12 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, fc = firehose_client_create((firehose_buffer_t)base_addr, (firehose_token_t)&atoken, comm_mem_recvp, comm_io_recvp, comm_sendp); + /* + * Request a no senders notification for the memory channel. + * That should indicate the client going away. + */ + dispatch_mach_request_no_senders( + fc->fc_mach_channel[FIREHOSE_BUFFER_PUSHPORT_MEM]); firehose_client_resume(fc, &fcci); if (fcci.fcci_size) { diff --git a/src/firehose/firehose_server_internal.h b/src/firehose/firehose_server_internal.h index 571cc2a0e..daba772b5 100644 --- a/src/firehose/firehose_server_internal.h +++ b/src/firehose/firehose_server_internal.h @@ -80,8 +80,6 @@ struct firehose_client_s { bool volatile fc_quarantined; } DISPATCH_ATOMIC64_ALIGN; -void -_firehose_client_xref_dispose(struct firehose_client_s *fc); void _firehose_client_dispose(struct firehose_client_s *fc); diff --git a/src/firehose/firehose_server_object.m b/src/firehose/firehose_server_object.m index 6965ca0f5..c5243c149 100644 --- a/src/firehose/firehose_server_object.m +++ b/src/firehose/firehose_server_object.m @@ -24,20 +24,15 @@ #error the firehose server requires the objc-runtime, no ARC #endif +OS_OBJECT_NONLAZY_CLASS @implementation OS_OBJECT_CLASS(firehose_client) +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() -+ (void)load { } -- (void)_xref_dispose -{ - _firehose_client_xref_dispose((struct firehose_client_s *)self); - [super _xref_dispose]; -} - -- (void)_dispose +- (void)dealloc { _firehose_client_dispose((struct firehose_client_s *)self); - [super _dispose]; + [super dealloc]; } - (NSString *)debugDescription diff --git a/src/firehose/firehose_types.defs b/src/firehose/firehose_types.defs index 9462fd808..56f60957b 100644 --- a/src/firehose/firehose_types.defs +++ b/src/firehose/firehose_types.defs @@ -21,6 +21,7 @@ #include #include +import ; import ; import ; diff --git a/src/init.c b/src/init.c index 50b744743..abaf55d26 100644 --- a/src/init.c +++ b/src/init.c @@ -32,7 +32,6 @@ #pragma mark - #pragma mark dispatch_init - #if USE_LIBDISPATCH_INIT_CONSTRUCTOR DISPATCH_NOTHROW __attribute__((constructor)) void @@ -443,7 +442,7 @@ _dispatch_queue_attr_to_info(dispatch_queue_attr_t dqa) dqai.dqai_concurrent = !(idx % DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT); idx /= DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT; - dqai.dqai_relpri = -(idx % DISPATCH_QUEUE_ATTR_PRIO_COUNT); + dqai.dqai_relpri = -(int)(idx % DISPATCH_QUEUE_ATTR_PRIO_COUNT); idx /= DISPATCH_QUEUE_ATTR_PRIO_COUNT; dqai.dqai_qos = idx % DISPATCH_QUEUE_ATTR_QOS_COUNT; @@ -634,8 +633,7 @@ DISPATCH_VTABLE_INSTANCE(disk, DISPATCH_NOINLINE static void -_dispatch_queue_no_activate(dispatch_queue_class_t dqu, - DISPATCH_UNUSED bool *allow_resume) +_dispatch_queue_no_activate(dispatch_queue_class_t dqu) { DISPATCH_INTERNAL_CRASH(dx_type(dqu._dq), "dq_activate called"); } @@ -757,6 +755,17 @@ DISPATCH_VTABLE_INSTANCE(source, .dq_push = _dispatch_lane_push, ); +DISPATCH_VTABLE_INSTANCE(channel, + .do_type = DISPATCH_CHANNEL_TYPE, + .do_dispose = _dispatch_channel_dispose, + .do_debug = _dispatch_channel_debug, + .do_invoke = _dispatch_channel_invoke, + + .dq_activate = _dispatch_lane_activate, + .dq_wakeup = _dispatch_channel_wakeup, + .dq_push = _dispatch_lane_push, +); + #if HAVE_MACH DISPATCH_VTABLE_INSTANCE(mach, .do_type = DISPATCH_MACH_CHANNEL_TYPE, @@ -1180,31 +1189,31 @@ _dispatch_vsyslog(const char *msg, va_list ap) static inline void _dispatch_syslog(const char *msg) { - OutputDebugStringA(msg); + OutputDebugStringA(msg); } static inline void _dispatch_vsyslog(const char *msg, va_list ap) { - va_list argp; + va_list argp; - va_copy(argp, ap); + va_copy(argp, ap); - int length = _vscprintf(msg, ap); - if (length == -1) - return; + int length = _vscprintf(msg, ap); + if (length == -1) + return; - char *buffer = malloc((size_t)length + 1); - if (buffer == NULL) - return; + char *buffer = malloc((size_t)length + 1); + if (buffer == NULL) + return; - _vsnprintf(buffer, (size_t)length + 1, msg, argp); + _vsnprintf(buffer, (size_t)length + 1, msg, argp); - va_end(argp); + va_end(argp); - _dispatch_syslog(buffer); + _dispatch_syslog(buffer); - free(buffer); + free(buffer); } #else // DISPATCH_USE_SIMPLE_ASL static inline void @@ -1348,7 +1357,7 @@ _dispatch_calloc(size_t num_items, size_t size) return buf; } -/** +/* * If the source string is mutable, allocates memory and copies the contents. * Otherwise returns the source string. */ diff --git a/src/inline_internal.h b/src/inline_internal.h index 37c57fc7c..f91e2fe7d 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -191,6 +191,16 @@ _dispatch_object_is_sync_waiter(dispatch_object_t dou) return (dou._dc->dc_flags & DC_FLAG_SYNC_WAITER); } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_channel_item(dispatch_object_t dou) +{ + if (_dispatch_object_has_vtable(dou)) { + return false; + } + return (dou._dc->dc_flags & DC_FLAG_CHANNEL_ITEM); +} + DISPATCH_ALWAYS_INLINE static inline bool _dispatch_object_is_sync_waiter_non_barrier(dispatch_object_t dou) @@ -901,7 +911,7 @@ DISPATCH_ALWAYS_INLINE static inline bool _dq_state_is_suspended(uint64_t dq_state) { - return dq_state >= DISPATCH_QUEUE_NEEDS_ACTIVATION; + return dq_state & DISPATCH_QUEUE_SUSPEND_BITS_MASK; } #define DISPATCH_QUEUE_IS_SUSPENDED(x) \ _dq_state_is_suspended(os_atomic_load2o(x, dq_state, relaxed)) @@ -910,14 +920,24 @@ DISPATCH_ALWAYS_INLINE static inline bool _dq_state_is_inactive(uint64_t dq_state) { - return dq_state & DISPATCH_QUEUE_INACTIVE; + return (dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK) == + DISPATCH_QUEUE_INACTIVE; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_activated(uint64_t dq_state) +{ + return (dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK) == + DISPATCH_QUEUE_ACTIVATED; } DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_needs_activation(uint64_t dq_state) +_dq_state_is_activating(uint64_t dq_state) { - return dq_state & DISPATCH_QUEUE_NEEDS_ACTIVATION; + return (dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK) == + DISPATCH_QUEUE_ACTIVATING; } DISPATCH_ALWAYS_INLINE @@ -1118,6 +1138,19 @@ static inline dispatch_priority_t _dispatch_set_basepri(dispatch_priority_t dbp) #if DISPATCH_PURE_C +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_setter_assert_inactive(dispatch_queue_class_t dq) +{ + uint64_t dq_state = os_atomic_load2o(dq._dq, dq_state, relaxed); + if (likely(_dq_state_is_inactive(dq_state))) return; +#ifndef __LP64__ + dq_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "dispatch queue/source property setter called after activation"); +} + // Note to later developers: ensure that any initialization changes are // made for statically allocated queues (i.e. _dispatch_main_q). static inline dispatch_queue_class_t @@ -1131,14 +1164,13 @@ _dispatch_queue_init(dispatch_queue_class_t dqu, dispatch_queue_flags_t dqf, DISPATCH_QUEUE_INACTIVE)) == 0); if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) { - dq_state |= DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION; dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_lane_resume if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) { dq->do_ref_cnt++; // released when DSF_DELETED is set } } - dq_state |= (initial_state_bits & DISPATCH_QUEUE_ROLE_MASK); + dq_state |= initial_state_bits; dq->do_next = DISPATCH_OBJECT_LISTLESS; dqf |= DQF_WIDTH(width); os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed); @@ -2500,6 +2532,25 @@ _dispatch_continuation_pop_inline(dispatch_object_t dou, if (observer_hooks) observer_hooks->queue_did_execute(dqu._dq); } +// used to forward the do_invoke of a continuation with a vtable to its real +// implementation. +// +// Unlike _dispatch_continuation_pop_forwarded, +// this doesn't free the continuation +#define _dispatch_continuation_pop_forwarded_no_free(dc, dc_flags, dq, ...) \ + ({ \ + dispatch_continuation_t _dc = (dc); \ + uintptr_t _dc_flags = (dc_flags); \ + _dispatch_continuation_voucher_adopt(_dc, _dc_flags); \ + if (!(_dc_flags & DC_FLAG_NO_INTROSPECTION)) { \ + _dispatch_trace_item_pop(dq, dc); \ + } \ + __VA_ARGS__; \ + if (!(_dc_flags & DC_FLAG_NO_INTROSPECTION)) { \ + _dispatch_trace_item_complete(_dc); \ + } \ + }) + // used to forward the do_invoke of a continuation with a vtable to its real // implementation. #define _dispatch_continuation_pop_forwarded(dc, dc_flags, dq, ...) \ diff --git a/src/internal.h b/src/internal.h index e029a0376..17ed1e628 100644 --- a/src/internal.h +++ b/src/internal.h @@ -61,6 +61,9 @@ #if !defined(DISPATCH_LAYOUT_SPI) && TARGET_OS_MAC #define DISPATCH_LAYOUT_SPI 1 #endif +#if !defined(DISPATCH_CHANNEL_SPI) +#define DISPATCH_CHANNEL_SPI 1 +#endif #if __has_include() #include @@ -138,6 +141,7 @@ typedef union { struct dispatch_queue_global_s *_dgq; struct dispatch_queue_pthread_root_s *_dpq; struct dispatch_source_s *_ds; + struct dispatch_channel_s *_dch; struct dispatch_mach_s *_dm; #ifdef __OBJC__ id _objc_dq; // unsafe cast for the sake of object.m @@ -153,6 +157,7 @@ typedef union { struct dispatch_queue_global_s *_dgq; struct dispatch_queue_pthread_root_s *_dpq; struct dispatch_source_s *_ds; + struct dispatch_channel_s *_dch; struct dispatch_mach_s *_dm; dispatch_lane_class_t _dlu; #ifdef __OBJC__ @@ -168,6 +173,7 @@ typedef union { struct dispatch_queue_attr_s *_dqa; struct dispatch_group_s *_dg; struct dispatch_source_s *_ds; + struct dispatch_channel_s *_dch; struct dispatch_mach_s *_dm; struct dispatch_mach_msg_s *_dmsg; struct dispatch_semaphore_s *_dsema; @@ -207,6 +213,7 @@ upcast(dispatch_object_t dou) #include #include #include +#include /* private.h must be included last to avoid picking up installed headers. */ #if !defined(_WIN32) @@ -214,6 +221,7 @@ upcast(dispatch_object_t dou) #endif #include "os/object_private.h" #include "queue_private.h" +#include "channel_private.h" #include "workloop_private.h" #include "source_private.h" #include "mach_private.h" @@ -253,7 +261,7 @@ upcast(dispatch_object_t dou) #include #endif #endif /* HAVE_MACH */ -#if __has_include() +#if __has_include() && __has_include() #define HAVE_OS_FAULT_WITH_PAYLOAD 1 #include #include @@ -319,6 +327,10 @@ upcast(dispatch_object_t dou) #include #endif +#if __has_include() +#include +#endif + /* More #includes at EOF (dependent on the contents of internal.h) ... */ __BEGIN_DECLS @@ -491,7 +503,7 @@ DISPATCH_NOINLINE DISPATCH_NORETURN DISPATCH_COLD void _dispatch_abort(size_t line, long val); #if !defined(DISPATCH_USE_OS_DEBUG_LOG) && DISPATCH_DEBUG -#if __has_include() +#if __has_include() && !TARGET_OS_DRIVERKIT #define DISPATCH_USE_OS_DEBUG_LOG 1 #include #endif @@ -749,6 +761,22 @@ _dispatch_fork_becomes_unsafe(void) #endif #endif // !defined(DISPATCH_USE_KEVENT_WORKLOOP) +#ifndef DISPATCH_USE_WL_SYNC_IPC_HANDOFF +#if DISPATCH_USE_KEVENT_WORKLOOP && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) +#define DISPATCH_USE_WL_SYNC_IPC_HANDOFF 1 +#else +#define DISPATCH_USE_WL_SYNC_IPC_HANDOFF 0 +#endif +#endif // !defined DISPATCH_USE_WL_SYNC_IPC_HANDOFF + +#ifndef DISPATCH_USE_KEVENT_SETUP +#if DISPATCH_USE_KEVENT_WORKLOOP && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) +#define DISPATCH_USE_KEVENT_SETUP 1 +#else +#define DISPATCH_USE_KEVENT_SETUP 0 +#endif +#endif // !defined(DISPATCH_USE_KEVENT_SETUP) + #ifdef EVFILT_MEMORYSTATUS #ifndef DISPATCH_USE_MEMORYSTATUS #define DISPATCH_USE_MEMORYSTATUS 1 @@ -794,7 +822,6 @@ extern bool _dispatch_memory_warn; #endif #endif // MACH_SEND_NOIMPORTANCE - #if HAVE_LIBPROC_INTERNAL_H #include #include @@ -1062,7 +1089,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define DISPATCH_NO_VOUCHER ((voucher_t)(void*)~0ul) #define DISPATCH_NO_PRIORITY ((pthread_priority_t)~0ul) -DISPATCH_ENUM(dispatch_thread_set_self, unsigned long, +DISPATCH_OPTIONS(dispatch_thread_set_self, unsigned long, DISPATCH_PRIORITY_ENFORCE = 0x1, DISPATCH_VOUCHER_REPLACE = 0x2, DISPATCH_VOUCHER_CONSUME = 0x4, @@ -1074,6 +1101,7 @@ static inline voucher_t _dispatch_adopt_priority_and_set_voucher( dispatch_thread_set_self_t flags); #if HAVE_MACH mach_port_t _dispatch_get_mach_host_port(void); +bool _dispatch_mach_msg_sender_is_kernel(mach_msg_header_t *hdr); #endif #if HAVE_PTHREAD_WORKQUEUE_QOS diff --git a/src/introspection.c b/src/introspection.c index f38f9e372..27a955be9 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -249,6 +249,10 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, waiter = pthread_from_mach_thread_np(dsc->dsc_waiter); ctxt = dsc->dsc_ctxt; func = dsc->dsc_func; + } else if (_dispatch_object_is_channel_item(dc)) { + dispatch_channel_callbacks_t callbacks = upcast(dq)._dch->dch_callbacks; + ctxt = dc->dc_ctxt; + func = (dispatch_function_t)callbacks->dcc_invoke; } else if (func == _dispatch_apply_invoke || func == _dispatch_apply_redirect_invoke) { dispatch_apply_t da = ctxt; @@ -389,7 +393,8 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t dq, } if (metatype == _DISPATCH_CONTINUATION_TYPE) { _dispatch_introspection_continuation_get_info(dq, dc, &diqi); - } else if (metatype == _DISPATCH_LANE_TYPE) { + } else if (metatype == _DISPATCH_LANE_TYPE || + type == DISPATCH_CHANNEL_TYPE) { diqi.type = dispatch_introspection_queue_item_type_queue; diqi.queue = _dispatch_introspection_lane_get_info(dou._dl); } else if (metatype == _DISPATCH_WORKLOOP_TYPE) { diff --git a/src/io.c b/src/io.c index 66ed2a6e2..0624fffd4 100644 --- a/src/io.c +++ b/src/io.c @@ -155,7 +155,7 @@ enum { _dispatch_io_log("fd[0x%x]: " msg, fd, ##__VA_ARGS__) #define _dispatch_op_debug(msg, op, ...) \ _dispatch_io_log("op[%p]: " msg, op, ##__VA_ARGS__) -#define _dispatch_channel_debug(msg, channel, ...) \ +#define _dispatch_io_channel_debug(msg, channel, ...) \ _dispatch_io_log("channel[%p]: " msg, channel, ##__VA_ARGS__) #define _dispatch_fd_entry_debug(msg, fd_entry, ...) \ _dispatch_io_log("fd_entry[%p]: " msg, fd_entry, ##__VA_ARGS__) @@ -261,7 +261,7 @@ _dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry, _dispatch_retain(queue); dispatch_async(!err ? fd_entry->close_queue : channel->queue, ^{ dispatch_async(queue, ^{ - _dispatch_channel_debug("cleanup handler invoke: err %d", + _dispatch_io_channel_debug("cleanup handler invoke: err %d", channel, err); cleanup_handler(err); }); @@ -355,7 +355,7 @@ dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, } dispatch_io_t channel = _dispatch_io_create(type); channel->fd = fd; - _dispatch_channel_debug("create", channel); + _dispatch_io_channel_debug("create", channel); channel->fd_actual = fd; dispatch_suspend(channel->queue); _dispatch_retain(queue); @@ -422,7 +422,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, } dispatch_io_t channel = _dispatch_io_create(type); channel->fd = -1; - _dispatch_channel_debug("create with path %s", channel, path); + _dispatch_io_channel_debug("create with path %s", channel, path); channel->fd_actual = -1; path_data->channel = channel; path_data->oflag = oflag; @@ -512,7 +512,7 @@ dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, return DISPATCH_BAD_INPUT; } dispatch_io_t channel = _dispatch_io_create(type); - _dispatch_channel_debug("create with channel %p", channel, in_channel); + _dispatch_io_channel_debug("create with channel %p", channel, in_channel); dispatch_suspend(channel->queue); _dispatch_retain(queue); _dispatch_retain(channel); @@ -630,7 +630,7 @@ dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_channel_debug("set high water: %zu", channel, high_water); + _dispatch_io_channel_debug("set high water: %zu", channel, high_water); if (channel->params.low > high_water) { channel->params.low = high_water; } @@ -644,7 +644,7 @@ dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_channel_debug("set low water: %zu", channel, low_water); + _dispatch_io_channel_debug("set low water: %zu", channel, low_water); if (channel->params.high < low_water) { channel->params.high = low_water ? low_water : 1; } @@ -659,7 +659,7 @@ dispatch_io_set_interval(dispatch_io_t channel, uint64_t interval, { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_channel_debug("set interval: %llu", channel, + _dispatch_io_channel_debug("set interval: %llu", channel, (unsigned long long)interval); channel->params.interval = interval < INT64_MAX ? interval : INT64_MAX; channel->params.interval_flags = flags; @@ -704,7 +704,7 @@ dispatch_io_get_descriptor(dispatch_io_t channel) static void _dispatch_io_stop(dispatch_io_t channel) { - _dispatch_channel_debug("stop", channel); + _dispatch_io_channel_debug("stop", channel); (void)os_atomic_or2o(channel, atomic_flags, DIO_STOPPED, relaxed); _dispatch_retain(channel); dispatch_async(channel->queue, ^{ @@ -712,7 +712,7 @@ _dispatch_io_stop(dispatch_io_t channel) _dispatch_object_debug(channel, "%s", __func__); dispatch_fd_entry_t fd_entry = channel->fd_entry; if (fd_entry) { - _dispatch_channel_debug("stop cleanup", channel); + _dispatch_io_channel_debug("stop cleanup", channel); _dispatch_fd_entry_cleanup_operations(fd_entry, channel); if (!(channel->atomic_flags & DIO_CLOSED)) { if (fd_entry->path_data) { @@ -726,7 +726,7 @@ _dispatch_io_stop(dispatch_io_t channel) _dispatch_retain(channel); dispatch_async(_dispatch_io_fds_lockq, ^{ _dispatch_object_debug(channel, "%s", __func__); - _dispatch_channel_debug("stop cleanup after close", + _dispatch_io_channel_debug("stop cleanup after close", channel); dispatch_fd_entry_t fdi; uintptr_t hash = DIO_HASH(channel->fd); @@ -762,7 +762,7 @@ dispatch_io_close(dispatch_io_t channel, unsigned long flags) dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ _dispatch_object_debug(channel, "%s", __func__); - _dispatch_channel_debug("close", channel); + _dispatch_io_channel_debug("close", channel); if (!(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { (void)os_atomic_or2o(channel, atomic_flags, DIO_CLOSED, relaxed); @@ -1048,7 +1048,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } else if (direction == DOP_DIR_WRITE && !err) { d = NULL; } - _dispatch_channel_debug("IO handler invoke: err %d", channel, + _dispatch_io_channel_debug("IO handler invoke: err %d", channel, err); handler(true, d, err); _dispatch_release(channel); @@ -1060,7 +1060,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } dispatch_operation_t op = _dispatch_object_alloc(DISPATCH_VTABLE(operation), sizeof(struct dispatch_operation_s)); - _dispatch_channel_debug("operation create: %p", channel, op); + _dispatch_io_channel_debug("operation create: %p", channel, op); op->do_next = DISPATCH_OBJECT_LISTLESS; op->do_xref_cnt = -1; // operation object is not exposed externally op->op_q = dispatch_queue_create_with_target("com.apple.libdispatch-io.opq", diff --git a/src/mach.c b/src/mach.c index 726368b01..877761a03 100644 --- a/src/mach.c +++ b/src/mach.c @@ -31,7 +31,7 @@ #define DM_CHECKIN_CANCELED ((dispatch_mach_msg_t)~0ul) -DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t, +DISPATCH_OPTIONS(dispatch_mach_send_invoke_flags, uint32_t, DM_SEND_INVOKE_NONE = 0x0, DM_SEND_INVOKE_MAKE_DIRTY = 0x1, DM_SEND_INVOKE_NEEDS_BARRIER = 0x2, @@ -64,6 +64,7 @@ static void _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm, static void _dispatch_mach_push_async_reply_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, dispatch_queue_t drq); static dispatch_queue_t _dispatch_mach_msg_context_async_reply_queue( + dispatch_mach_t dm, void *ctxt); static dispatch_continuation_t _dispatch_mach_msg_async_reply_wrap( dispatch_mach_msg_t dmsg, dispatch_mach_t dm); @@ -101,10 +102,17 @@ _dispatch_mach_hooks_install_default(void) #pragma mark - #pragma mark dispatch_mach_t +DISPATCH_OPTIONS(dispatch_mach_create_flags, unsigned, + DMCF_NONE = 0x00000000, + DMCF_HANDLER_IS_BLOCK = 0x00000001, + DMCF_IS_XPC = 0x00000002, + DMCF_USE_STRICT_REPLY = 0x00000004, +); + static dispatch_mach_t _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, - dispatch_mach_handler_function_t handler, bool handler_is_block, - bool is_xpc) + dispatch_mach_handler_function_t handler, + dispatch_mach_create_flags_t dmcf) { dispatch_mach_recv_refs_t dmrr; dispatch_mach_send_refs_t dmsr; @@ -113,14 +121,18 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, dm = _dispatch_queue_alloc(mach, DQF_MUTABLE, 1, DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER)._dm; dm->dq_label = label; - dm->dm_is_xpc = is_xpc; + dm->dm_is_xpc = (bool)(dmcf & DMCF_IS_XPC); + dm->dm_strict_reply = (bool)(dmcf & DMCF_USE_STRICT_REPLY); dmrr = dux_create(&_dispatch_mach_type_recv, 0, 0)._dmrr; dispatch_assert(dmrr->du_is_direct); dmrr->du_owner_wref = _dispatch_ptr2wref(dm); dmrr->dmrr_handler_func = handler; dmrr->dmrr_handler_ctxt = context; - dmrr->dmrr_handler_is_block = handler_is_block; + dmrr->dmrr_handler_is_block = (bool)(dmcf & DMCF_HANDLER_IS_BLOCK); + if (dm->dm_strict_reply) { + dmrr->du_fflags |= MACH_MSG_STRICT_REPLY; + } dm->dm_recv_refs = dmrr; dmsr = dux_create(&_dispatch_mach_type_send, 0, @@ -144,22 +156,22 @@ dispatch_mach_create(const char *label, dispatch_queue_t q, { dispatch_block_t bb = _dispatch_Block_copy((void*)handler); return _dispatch_mach_create(label, q, bb, - (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true, - false); + (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), + DMCF_HANDLER_IS_BLOCK); } dispatch_mach_t dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context, dispatch_mach_handler_function_t handler) { - return _dispatch_mach_create(label, q, context, handler, false, false); + return _dispatch_mach_create(label, q, context, handler, DMCF_NONE); } dispatch_mach_t dispatch_mach_create_4libxpc(const char *label, dispatch_queue_t q, void *context, dispatch_mach_handler_function_t handler) { - return _dispatch_mach_create(label, q, context, handler, false, true); + return _dispatch_mach_create(label, q, context, handler, DMCF_IS_XPC | DMCF_USE_STRICT_REPLY); } void @@ -177,6 +189,46 @@ _dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free) _dispatch_lane_class_dispose(dm, allow_free); } +void +dispatch_mach_request_no_senders(dispatch_mach_t dm) +{ + dm->dm_arm_no_senders = true; + _dispatch_queue_setter_assert_inactive(dm); +} + +void +dispatch_mach_set_flags(dispatch_mach_t dm, dispatch_mach_flags_t flags) +{ + dm->dm_strict_reply = !!(flags & DMF_USE_STRICT_REPLY); + dm->dm_arm_no_senders = !!(flags & DMF_REQUEST_NO_SENDERS); + + _dispatch_queue_setter_assert_inactive(dm); +} + +static void +_dispatch_mach_arm_no_senders(dispatch_mach_t dm, bool allow_previous) +{ + mach_port_t recvp = (mach_port_t)dm->dm_recv_refs->du_ident; + mach_port_t previous = MACH_PORT_NULL; + kern_return_t kr; + + if (MACH_PORT_VALID(recvp)) { + kr = mach_port_request_notification(mach_task_self(), recvp, + MACH_NOTIFY_NO_SENDERS, 0, recvp, + MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + if (unlikely(previous)) { + if (!allow_previous) { + DISPATCH_CLIENT_CRASH(previous, "Mach port notification collision"); + } + kr = mach_port_deallocate(mach_task_self(), previous); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } +} + void dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, mach_port_t send, dispatch_mach_msg_t checkin) @@ -197,6 +249,10 @@ dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, dmsr->dmsr_checkin = checkin; } + if (dm->dm_arm_no_senders && !dmsr->dmsr_checkin) { + _dispatch_mach_arm_no_senders(dm, false); + } + uint32_t disconnect_cnt = os_atomic_and_orig2o(dmsr, dmsr_disconnect_cnt, ~DISPATCH_MACH_NEVER_CONNECTED, relaxed); if (unlikely(!(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED))) { @@ -290,7 +346,7 @@ _dispatch_mach_reply_unregister(dispatch_mach_t dm, dispatch_queue_t drq = NULL; if (disconnected) { if (dm->dm_is_xpc && dmr->dmr_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmr->dmr_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmr->dmr_ctxt); } dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr, drq ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED @@ -367,9 +423,13 @@ _dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, dispatch_queue_t drq = NULL; if (dm->dm_is_xpc && dmsg->do_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt); } - if (unlikely(!drq && _dispatch_unote_wlh(dm->dm_recv_refs))) { + if (dm->dm_strict_reply) { + dmr->du_fflags |= MACH_MSG_STRICT_REPLY; + } + if (unlikely((!drq || drq == dm->_as_dq) && + _dispatch_unote_wlh(dm->dm_recv_refs))) { wlh = _dispatch_unote_wlh(dm->dm_recv_refs); pri = dm->dq_priority; } else if (dx_hastypeflag(drq, QUEUE_ROOT)) { @@ -599,6 +659,18 @@ _dispatch_mach_msg_create_recv(mach_msg_header_t *hdr, mach_msg_size_t siz, return dmsg; } +DISPATCH_NOINLINE +static void +_dispatch_mach_no_senders_invoke(dispatch_mach_t dm) +{ + if (!(_dispatch_queue_atomic_flags(dm) & DSF_CANCELED)) { + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, + DISPATCH_MACH_NO_SENDERS, NULL, 0, dmrr->dmrr_handler_func); + } + _dispatch_perfmon_workitem_inc(); +} + void _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, mach_msg_header_t *hdr, mach_msg_size_t siz, @@ -622,6 +694,19 @@ _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { free(hdr); } + } else if (hdr->msgh_id == MACH_NOTIFY_NO_SENDERS && dm->dm_arm_no_senders){ + if (dispatch_assume(_dispatch_mach_msg_sender_is_kernel(hdr))) { + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + (void)_dispatch_continuation_init_f(dc, dm, dm, + (dispatch_function_t)_dispatch_mach_no_senders_invoke, + DISPATCH_BLOCK_HAS_PRIORITY | DISPATCH_BLOCK_NO_VOUCHER, + DC_FLAG_CONSUME); + _dispatch_continuation_async(dm, dc, 0, dc->dc_flags); + } + mach_msg_destroy(hdr); + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(hdr); + } } else { // Once the mach channel disarming is visible, cancellation will switch // to immediately destroy messages. If we're preempted here, then the @@ -636,8 +721,13 @@ _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, _dispatch_mach_handle_or_push_received_msg(dm, dmsg, ovr_pp); } - if (unlikely(_dispatch_unote_needs_delete(du))) { + // Note: it is ok to do a relaxed load of the dq_state_bits as we only care + // about bits that are in the top bits of the 64bit dq_state. + // This avoids expensive CAS on 32bit acrhictures. + if (unlikely(_dispatch_unote_needs_delete(du) || + _dq_state_is_activating((uint64_t)dm->dq_state_bits << 32))) { return dx_wakeup(dm, 0, DISPATCH_WAKEUP_EVENT | + DISPATCH_WAKEUP_CLEAR_ACTIVATING | DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY); } return _dispatch_release_2_tailcall(dm); @@ -664,7 +754,7 @@ _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, if (dmsg) { dispatch_queue_t drq = NULL; if (dm->dm_is_xpc && dmsg->do_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt); } if (drq) { _dispatch_mach_push_async_reply_msg(dm, dmsg, drq); @@ -691,11 +781,7 @@ DISPATCH_ALWAYS_INLINE static void _dispatch_mach_stack_probe(void *addr, size_t size) { -#if TARGET_OS_MAC && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) && \ - (defined(__x86_64__) || defined(__arm64__)) - // there should be a __has_feature() macro test - // for this, for now we approximate it, for when the compiler - // is generating calls to ____chkstk_darwin on our behalf +#if __has_feature(stack_check) (void)addr; (void)size; #else for (mach_vm_address_t p = mach_vm_trunc_page(addr + vm_page_size); @@ -730,6 +816,9 @@ _dispatch_mach_msg_reply_recv(dispatch_mach_t dm, notify = send; options |= MACH_RCV_SYNC_WAIT; } + if (dm->dm_strict_reply) { + options |= MACH_MSG_STRICT_REPLY; + } retry: _dispatch_debug_machport(reply_port); @@ -944,7 +1033,7 @@ _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou, unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ? 0 : DISPATCH_MACH_MESSAGE_NOT_SENT; if (dm->dm_is_xpc && dmsg->do_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt); } dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, dwr ? &dwr->dwr_refs : NULL, @@ -992,6 +1081,9 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, dsrr->dmsr_checkin, NULL, qos, DM_SEND_INVOKE_NONE))) { goto out; } + if (dm->dm_arm_no_senders) { + _dispatch_mach_arm_no_senders(dm, true); + } dsrr->dmsr_checkin = NULL; } } @@ -1010,7 +1102,7 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, msg->msgh_remote_port); dispatch_assert(_dispatch_unote_registered(dsrr)); } - if (dsrr->dmsr_notification_armed) { + if (os_atomic_load(&dsrr->dmsr_notification_armed, relaxed)) { goto out; } opts |= MACH_SEND_NOTIFY; @@ -1034,6 +1126,9 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, _dispatch_priority_compute_propagated( _dispatch_qos_to_pp(qos), 0); } + if (reply_port && dm->dm_strict_reply) { + opts |= MACH_MSG_STRICT_REPLY; + } } _dispatch_debug_machport(msg->msgh_remote_port); if (reply_port) _dispatch_debug_machport(reply_port); @@ -1101,7 +1196,7 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, if (unlikely(kr)) { // Send failed, so reply was never registered if (dm->dm_is_xpc && dmsg->do_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt); } dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, dwr ? &dwr->dwr_refs : NULL, @@ -1296,7 +1391,7 @@ _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, qos = _dmsr_state_max_qos(new_state); if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) { os_atomic_thread_fence(dependency); - dmsr = os_atomic_force_dependency_on(dmsr, new_state); + dmsr = os_atomic_inject_dependency(dmsr, new_state); goto again; } @@ -1664,13 +1759,11 @@ _dispatch_mach_checkin_options(void) return options; } - - static inline mach_msg_option_t _dispatch_mach_send_options(void) { - mach_msg_option_t options = 0; - return options; + //rdar://problem/13740985&47300191&47605096 + return (_dispatch_is_background_thread() ? MACH_SEND_NOIMPORTANCE : 0); } DISPATCH_ALWAYS_INLINE @@ -1682,7 +1775,7 @@ _dispatch_mach_send_msg_prepare(dispatch_mach_t dm, if (dm->dm_is_xpc && (options & DISPATCH_MACH_WAIT_FOR_REPLY) == 0 && _dispatch_mach_msg_get_reply_port(dmsg)) { dispatch_assert( - _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt)); + _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt)); } #else (void)dm; @@ -2149,13 +2242,15 @@ _dispatch_mach_handoff_set_wlh(dispatch_ipc_handoff_t dih, dispatch_queue_t dq) { while (likely(dq->do_targetq)) { if (unlikely(_dispatch_queue_is_mutable(dq))) { - DISPATCH_CLIENT_CRASH(0, - "Trying to handoff IPC onto mutable hierarchy"); + _dispatch_queue_sidelock_lock(upcast(dq)._dl); + _dispatch_queue_atomic_flags_clear(dq, DQF_MUTABLE); + _dispatch_queue_sidelock_unlock(upcast(dq)._dl); } if (_dq_state_is_base_wlh(dq->dq_state)) { os_atomic_store(&dih->dih_wlh, (uint64_t)dq, relaxed); return; } + dq = dq->do_targetq; } /* unsupported hierarchy */ @@ -2169,12 +2264,13 @@ dispatch_mach_handoff_reply_f(dispatch_queue_t dq, _dispatch_ipc_handoff_ctxt_t dihc = _dispatch_mach_handoff_context(port); dispatch_ipc_handoff_t dih = dihc->dihc_dtc.dtc_dih; dispatch_continuation_t dc = &dih->dih_dc; + uintptr_t dc_flags = DC_FLAG_CONSUME; _dispatch_mach_handoff_set_wlh(dih, dq); _dispatch_retain(dq); dihc->dihc_dq = dq; - dihc->dihc_qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, 0, 0); - dc->dc_data = (void *)dc->dc_flags; + dihc->dihc_qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, + 0, dc_flags); dc->do_vtable = DC_VTABLE(MACH_IPC_HANDOFF); } @@ -2185,10 +2281,12 @@ dispatch_mach_handoff_reply(dispatch_queue_t dq, _dispatch_ipc_handoff_ctxt_t dihc = _dispatch_mach_handoff_context(port); dispatch_ipc_handoff_t dih = dihc->dihc_dtc.dtc_dih; dispatch_continuation_t dc = &dih->dih_dc; + uintptr_t dc_flags = DC_FLAG_CONSUME; + _dispatch_mach_handoff_set_wlh(dih, dq); _dispatch_retain(dq); dihc->dihc_dq = dq; - dihc->dihc_qos = _dispatch_continuation_init(dc, dq, block, 0, 0); + dihc->dihc_qos = _dispatch_continuation_init(dc, dq, block, 0, dc_flags); dc->dc_data = (void *)dc->dc_flags; dc->do_vtable = DC_VTABLE(MACH_IPC_HANDOFF); } @@ -2253,10 +2351,13 @@ _dispatch_mach_ipc_handoff_invoke(dispatch_continuation_t dc, _dispatch_thread_context_push(&dihc.dihc_dtc); - _dispatch_continuation_pop_forwarded(dc, dc_flags, cq, { + // DC_FLAG_CONSUME has been set, as we want the block and vouchers + // to be consumed, however the continuation is not from the continuation + // cache and its lifetime is managed explicitly by the handoff mechanism. + DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DC_FLAG_CONSUME); + _dispatch_continuation_pop_forwarded_no_free(dc, dc_flags, cq, { dispatch_invoke_with_autoreleasepool(flags, { _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - _dispatch_trace_item_complete(dc); }); }); @@ -2505,9 +2606,18 @@ _dispatch_mach_install(dispatch_mach_t dm, dispatch_wlh_t wlh, dispatch_assert(!dm->ds_is_installed); dm->ds_is_installed = true; - if (!cancelled && dmrr->du_ident) { - (void)_dispatch_unote_register(dmrr, wlh, pri); - dispatch_assert(dmrr->du_is_direct); + uint32_t disconnect_cnt = os_atomic_load2o(dm->dm_send_refs, + dmsr_disconnect_cnt, relaxed); + if (unlikely(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED)) { + DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel never connected"); + } + + if (!dm->dq_priority) { + // _dispatch_mach_reply_kevent_register assumes this has been done + // which is unlike regular sources or queues, the FALLBACK flag + // is used so that the priority of the channel doesn't act as + // a QoS floor for incoming messages (26761457) + dm->dq_priority = pri; } if (!cancelled && dm->dm_is_xpc && @@ -2519,32 +2629,28 @@ _dispatch_mach_install(dispatch_mach_t dm, dispatch_wlh_t wlh, dm->dm_xpc_term_refs = _dxtr; _dispatch_unote_register(dm->dm_xpc_term_refs, wlh, pri); } - if (!dm->dq_priority) { - // _dispatch_mach_reply_kevent_register assumes this has been done - // which is unlike regular sources or queues, the FALLBACK flag - // is used so that the priority of the channel doesn't act as - // a QoS floor for incoming messages (26761457) - dm->dq_priority = pri; - } - uint32_t disconnect_cnt = os_atomic_load2o(dm->dm_send_refs, - dmsr_disconnect_cnt, relaxed); - if (unlikely(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED)) { - DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel never connected"); + if (!cancelled && dmrr->du_ident) { + dispatch_assert(dmrr->du_is_direct); + // rdar://45419440 this absolutely needs to be done last + // as this can cause an event to be delivered + // and to finish the activation concurrently + (void)_dispatch_unote_register(dmrr, wlh, pri); } } void -_dispatch_mach_activate(dispatch_mach_t dm, bool *allow_resume) +_dispatch_mach_activate(dispatch_mach_t dm) { dispatch_priority_t pri; dispatch_wlh_t wlh; // call "super" - _dispatch_lane_activate(dm, allow_resume); + _dispatch_lane_activate(dm); if (!dm->ds_is_installed) { pri = _dispatch_queue_compute_priority_and_wlh(dm, &wlh); + // rdar://45419440 this needs to be last if (pri) _dispatch_mach_install(dm, wlh, pri); } } @@ -2639,7 +2745,8 @@ _dispatch_mach_invoke2(dispatch_mach_t dm, } if (dmsr->dmsr_tail) { - if (!dmsr->dmsr_notification_armed || dmsr->dmsr_disconnect_cnt) { + if (!os_atomic_load(&dmsr->dmsr_notification_armed, relaxed) || + dmsr->dmsr_disconnect_cnt) { bool requires_mgr = dmsr->dmsr_disconnect_cnt ? _dispatch_unote_registered(dmsr) : dm->dm_needs_mgr; // The channel has pending messages to send. @@ -2726,7 +2833,8 @@ _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, goto done; } - if (!dmsr->dmsr_notification_armed || dmsr->dmsr_disconnect_cnt) { + if (!os_atomic_load(&dmsr->dmsr_notification_armed, relaxed) || + dmsr->dmsr_disconnect_cnt) { bool requires_mgr = dmsr->dmsr_disconnect_cnt ? _dispatch_unote_registered(dmsr) : dm->dm_needs_mgr; if (unlikely(requires_mgr)) { @@ -2898,9 +3006,15 @@ _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) DISPATCH_ALWAYS_INLINE static dispatch_queue_t -_dispatch_mach_msg_context_async_reply_queue(void *msg_context) +_dispatch_mach_msg_context_async_reply_queue(dispatch_mach_t dm, + void *msg_context) { - return _dispatch_mach_xpc_hooks->dmxh_msg_context_reply_queue(msg_context); + dispatch_queue_t dq; + dq = _dispatch_mach_xpc_hooks->dmxh_msg_context_reply_queue(msg_context); + if (dq == DMXH_MSG_CONTEXT_REPLY_QUEUE_SELF) { + dq = dm->_as_dq; + } + return dq; } static dispatch_continuation_t @@ -2939,6 +3053,37 @@ _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, #pragma mark - #pragma mark dispatch_mig_server +static inline kern_return_t +_dispatch_mig_return_code(mig_reply_error_t *msg) +{ + if (msg->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { + return KERN_SUCCESS; + } + return msg->RetCode; +} + +static inline void +_dispatch_mig_consume_unsent_message(mach_msg_header_t *hdr) +{ + mach_port_t port = hdr->msgh_local_port; + if (MACH_PORT_VALID(port)) { + kern_return_t kr = KERN_SUCCESS; + switch (MACH_MSGH_BITS_LOCAL(hdr->msgh_bits)) { + case MACH_MSG_TYPE_MOVE_SEND: + case MACH_MSG_TYPE_MOVE_SEND_ONCE: + kr = mach_port_deallocate(mach_task_self(), port); + break; + case MACH_MSG_TYPE_MOVE_RECEIVE: + kr = mach_port_mod_refs(mach_task_self(), port, + MACH_PORT_RIGHT_RECEIVE, -1); + break; + } + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + mach_msg_destroy(hdr); +} + mach_msg_return_t dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback) @@ -2948,7 +3093,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER; mach_msg_options_t tmp_options; mig_reply_error_t *bufTemp, *bufRequest, *bufReply; - mach_msg_return_t kr = 0; + mach_msg_return_t kr = 0, skr; uint64_t assertion_token = 0; uint32_t cnt = 1000; // do not stall out serial queues boolean_t demux_success; @@ -2987,15 +3132,13 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, switch (kr) { case MACH_SEND_INVALID_DEST: case MACH_SEND_TIMED_OUT: - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { - mach_msg_destroy(&bufReply->Head); - } + _dispatch_mig_consume_unsent_message(&bufReply->Head); break; case MACH_RCV_TIMED_OUT: // Don't return an error if a message was sent this time or // a message was successfully received previously // rdar://problems/7363620&7791738 - if(bufReply->Head.msgh_remote_port || received) { + if (bufReply->Head.msgh_remote_port || received) { kr = MACH_MSG_SUCCESS; } break; @@ -3005,7 +3148,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, case MACH_RCV_TOO_LARGE: // receive messages that are too large and log their id and size // rdar://problem/8422992 - tmp_options &= ~MACH_RCV_LARGE; + tmp_options &= ~(MACH_RCV_LARGE | MACH_SEND_MSG); size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE; void *large_buf = malloc(large_size); if (large_buf) { @@ -3020,9 +3163,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, "requested size %zd: id = 0x%x, size = %d", maxmsgsz, bufReply->Head.msgh_id, bufReply->Head.msgh_size); - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { - mach_msg_destroy(&bufReply->Head); - } + mach_msg_destroy(&bufReply->Head); } if (large_buf) { free(large_buf); @@ -3069,21 +3210,21 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, demux_success = callback(&bufRequest->Head, &bufReply->Head); if (!demux_success) { + skr = MIG_BAD_ID; + } else { + skr = _dispatch_mig_return_code(bufReply); + } + switch (skr) { + case KERN_SUCCESS: + break; + case MIG_NO_REPLY: + bufReply->Head.msgh_remote_port = MACH_PORT_NULL; + break; + default: // destroy the request - but not the reply port + // (MIG moved it into the bufReply). bufRequest->Head.msgh_remote_port = 0; mach_msg_destroy(&bufRequest->Head); - } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { - // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode - // is present - if (unlikely(bufReply->RetCode)) { - if (bufReply->RetCode == MIG_NO_REPLY) { - continue; - } - - // destroy the request - but not the reply port - bufRequest->Head.msgh_remote_port = 0; - mach_msg_destroy(&bufRequest->Head); - } } if (bufReply->Head.msgh_remote_port) { @@ -3160,15 +3301,18 @@ dispatch_mach_mig_demux(void *context, desc->stub_routine(hdr, &bufReply->Head); - // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode is present - if (unlikely(!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) && - bufReply->RetCode)) { + switch (_dispatch_mig_return_code(bufReply)) { + case KERN_SUCCESS: + break; + case MIG_NO_REPLY: + bufReply->Head.msgh_remote_port = MACH_PORT_NULL; + break; + default: // destroy the request - but not the reply port + // (MIG moved it into the bufReply). hdr->msgh_remote_port = 0; - if (bufReply->RetCode != MIG_NO_REPLY && - (hdr->msgh_bits & MACH_MSGH_BITS_COMPLEX)) { - mach_msg_destroy(hdr); - } + mach_msg_destroy(hdr); + break; } if (bufReply->Head.msgh_remote_port) { @@ -3184,9 +3328,7 @@ dispatch_mach_mig_demux(void *context, break; case MACH_SEND_INVALID_DEST: case MACH_SEND_TIMED_OUT: - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { - mach_msg_destroy(&bufReply->Head); - } + _dispatch_mig_consume_unsent_message(&bufReply->Head); break; default: DISPATCH_VERIFY_MIG(kr); @@ -3228,7 +3370,7 @@ _dispatch_mach_debug_attr(dispatch_mach_t dm, char *buf, size_t bufsiz) target && target->dq_label ? target->dq_label : "", target, (mach_port_t)dmrr->du_ident, dmsr->dmsr_send, (mach_port_t)dmsr->du_ident, - dmsr->dmsr_notification_armed ? " (armed)" : "", + os_atomic_load(&dmsr->dmsr_notification_armed, relaxed) ? " (armed)" : "", dmsr->dmsr_checkin_port, dmsr->dmsr_checkin ? " (pending)" : "", dmsr->dmsr_state, dmsr->dmsr_disconnect_cnt, (bool)(dm->dq_atomic_flags & DSF_CANCELED)); diff --git a/src/mach_internal.h b/src/mach_internal.h index 90a59845a..9f1840eac 100644 --- a/src/mach_internal.h +++ b/src/mach_internal.h @@ -99,7 +99,7 @@ void _dispatch_mach_ipc_handoff_invoke(dispatch_continuation_t dc, void _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free); -void _dispatch_mach_activate(dispatch_mach_t dm, bool *allow_resume); +void _dispatch_mach_activate(dispatch_mach_t dm); void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, diff --git a/src/object.c b/src/object.c index 261e1996d..4eb49fda8 100644 --- a/src/object.c +++ b/src/object.c @@ -86,18 +86,37 @@ _os_object_retain_with_resurrect(_os_object_t obj) return obj; } -DISPATCH_NOINLINE -void -_os_object_release(_os_object_t obj) +DISPATCH_ALWAYS_INLINE +static inline bool +_os_object_release_inline(_os_object_t obj) { int xref_cnt = _os_object_xrefcnt_dec(obj); if (likely(xref_cnt >= 0)) { - return; + return false; } if (unlikely(xref_cnt < -1)) { _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); } - return _os_object_xref_dispose(obj); + return true; +} + + +DISPATCH_NOINLINE +void +_os_object_release(_os_object_t obj) +{ + if (_os_object_release_inline(obj)) { + return _os_object_xref_dispose(obj); + } +} + +DISPATCH_NOINLINE +void +_os_object_release_without_xref_dispose(_os_object_t obj) +{ + if (_os_object_release_inline(obj)) { + return _os_object_release_internal(obj); + } } bool @@ -183,32 +202,37 @@ void dispatch_release(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_release, dou); - _os_object_release(dou._os_obj); + if (_os_object_release_inline(dou._os_obj)) { + // bypass -_xref_dispose to avoid the dynamic dispatch + _os_object_xrefcnt_dispose_barrier(dou._os_obj); + _dispatch_xref_dispose(dou); + } } -#if !USE_OBJC void _dispatch_xref_dispose(dispatch_object_t dou) { if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { _dispatch_queue_xref_dispose(dou._dq); - } - switch (dx_type(dou._do)) { - case DISPATCH_SOURCE_KEVENT_TYPE: - _dispatch_source_xref_dispose(dou._ds); - break; + switch (dx_type(dou._do)) { + case DISPATCH_SOURCE_KEVENT_TYPE: + _dispatch_source_xref_dispose(dou._ds); + break; + case DISPATCH_CHANNEL_TYPE: + _dispatch_channel_xref_dispose(dou._dch); + break; #if HAVE_MACH - case DISPATCH_MACH_CHANNEL_TYPE: - _dispatch_mach_xref_dispose(dou._dm); - break; + case DISPATCH_MACH_CHANNEL_TYPE: + _dispatch_mach_xref_dispose(dou._dm); + break; #endif - case DISPATCH_QUEUE_RUNLOOP_TYPE: - _dispatch_runloop_queue_xref_dispose(dou._dl); - break; + case DISPATCH_QUEUE_RUNLOOP_TYPE: + _dispatch_runloop_queue_xref_dispose(dou._dl); + break; + } } return _dispatch_release_tailcall(dou._os_obj); } -#endif void _dispatch_dispose(dispatch_object_t dou) @@ -303,7 +327,7 @@ dispatch_activate(dispatch_object_t dou) return _dispatch_workloop_activate(dou._dwl); } if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { - return _dispatch_lane_resume(dou._dl, true); + return _dispatch_lane_resume(dou._dl, DISPATCH_ACTIVATE); } } @@ -329,7 +353,7 @@ dispatch_resume(dispatch_object_t dou) return; } if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { - _dispatch_lane_resume(dou._dl, false); + _dispatch_lane_resume(dou._dl, DISPATCH_RESUME); } } diff --git a/src/object.m b/src/object.m index 925fccc43..936795871 100644 --- a/src/object.m +++ b/src/object.m @@ -124,7 +124,7 @@ { struct _os_object_s *o = (struct _os_object_s *)obj; _os_object_refcnt_dispose_barrier(o); - [obj _dispose]; + _os_object_dealloc(obj); } #undef os_retain @@ -170,7 +170,7 @@ -(id)retain { } -(oneway void)release { - return _os_object_release(self); + return _os_object_release_without_xref_dispose(self); } -(NSUInteger)retainCount { @@ -194,10 +194,6 @@ - (void)_xref_dispose { return _os_object_release_internal(self); } -- (void)_dispose { - return _os_object_dealloc(self); -} - @end #pragma mark - @@ -281,16 +277,9 @@ - (void)_dispose { #pragma mark - #pragma mark _dispatch_object -// Force non-lazy class realization rdar://10640168 -#define DISPATCH_OBJC_LOAD() + (void)load {} - @implementation DISPATCH_CLASS(object) DISPATCH_UNAVAILABLE_INIT() -- (void)_dispose { - return _dispatch_dispose(self); // calls _os_object_dealloc() -} - - (NSString *)debugDescription { Class nsstring = objc_lookUpClass("NSString"); if (!nsstring) return nil; @@ -306,16 +295,20 @@ - (NSString *)debugDescription { return [nsstring stringWithFormat:format, object_getClassName(self), buf]; } -- (void)dealloc DISPATCH_NORETURN { - DISPATCH_INTERNAL_CRASH(0, "Calling dealloc on a dispatch object"); - [super dealloc]; // make clang happy +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wobjc-missing-super-calls" +- (void)dealloc { + return _dispatch_dispose(self); } +#pragma clang diagnostic pop @end +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(queue) -DISPATCH_OBJC_LOAD() +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() - (NSString *)description { Class nsstring = objc_lookUpClass("NSString"); @@ -333,9 +326,25 @@ - (void)_xref_dispose { @end +OS_OBJECT_NONLAZY_CLASS +@implementation DISPATCH_CLASS(channel) +OS_OBJECT_NONLAZY_CLASS_LOAD +DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() + +- (void)_xref_dispose { + _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); + _dispatch_channel_xref_dispose(self); + [super _xref_dispose]; +} + +@end + +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(source) -DISPATCH_OBJC_LOAD() +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose { _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); @@ -345,9 +354,11 @@ - (void)_xref_dispose { @end +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(mach) -DISPATCH_OBJC_LOAD() +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose { _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); @@ -357,9 +368,11 @@ - (void)_xref_dispose { @end +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(queue_runloop) -DISPATCH_OBJC_LOAD() +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose { _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); @@ -370,8 +383,9 @@ - (void)_xref_dispose { @end #define DISPATCH_CLASS_IMPL(name) \ + OS_OBJECT_NONLAZY_CLASS \ @implementation DISPATCH_CLASS(name) \ - DISPATCH_OBJC_LOAD() \ + OS_OBJECT_NONLAZY_CLASS_LOAD \ DISPATCH_UNAVAILABLE_INIT() \ @end @@ -395,9 +409,10 @@ - (void)_xref_dispose { DISPATCH_CLASS_IMPL(operation) DISPATCH_CLASS_IMPL(disk) +OS_OBJECT_NONLAZY_CLASS @implementation OS_OBJECT_CLASS(voucher) +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() -DISPATCH_OBJC_LOAD() -(id)retain { return (id)_voucher_retain_inline((struct voucher_s *)self); @@ -407,12 +422,9 @@ -(oneway void)release { return _voucher_release_inline((struct voucher_s *)self); } -- (void)_xref_dispose { - return _voucher_xref_dispose(self); // calls _os_object_release_internal() -} - -- (void)_dispose { - return _voucher_dispose(self); // calls _os_object_dealloc() +- (void)dealloc { + _voucher_dispose(self); + [super dealloc]; } - (NSString *)debugDescription { @@ -428,13 +440,10 @@ - (NSString *)debugDescription { @end #if VOUCHER_ENABLE_RECIPE_OBJECTS +OS_OBJECT_NONLAZY_CLASS @implementation OS_OBJECT_CLASS(voucher_recipe) +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() -DISPATCH_OBJC_LOAD() - -- (void)_dispose { - -} - (NSString *)debugDescription { return nil; // TODO: voucher_recipe debugDescription @@ -443,7 +452,6 @@ - (NSString *)debugDescription { @end #endif - #pragma mark - #pragma mark dispatch_last_resort_autorelease_pool diff --git a/src/object_internal.h b/src/object_internal.h index 6985decc7..e82c469e7 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -199,7 +199,7 @@ #define DISPATCH_QUEUE_VTABLE_HEADER(x); \ DISPATCH_OBJECT_VTABLE_HEADER(x); \ - void (*const dq_activate)(dispatch_queue_class_t, bool *allow_resume); \ + void (*const dq_activate)(dispatch_queue_class_t); \ void (*const dq_wakeup)(dispatch_queue_class_t, dispatch_qos_t, \ dispatch_wakeup_flags_t); \ void (*const dq_push)(dispatch_queue_class_t, dispatch_object_t, \ @@ -240,7 +240,7 @@ #define DISPATCH_OBJECT_LISTLESS ((void *)0x89abcdef) #endif -DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t, +DISPATCH_OPTIONS(dispatch_wakeup_flags, uint32_t, // The caller of dx_wakeup owns two internal refcounts on the object being // woken up. Two are needed for WLH wakeups where two threads need // the object to remain valid in a non-coordinated way @@ -262,6 +262,9 @@ DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t, // This wakeup may cause the source to leave its DSF_NEEDS_EVENT state DISPATCH_WAKEUP_EVENT = 0x00000010, + + // This wakeup is allowed to clear the ACTIVATING state of the object + DISPATCH_WAKEUP_CLEAR_ACTIVATING = 0x00000020, ); typedef struct dispatch_invoke_context_s { @@ -288,7 +291,7 @@ typedef struct dispatch_invoke_context_s { #define dispatch_with_disabled_narrowing(dic, ...) __VA_ARGS__ #endif -DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, +DISPATCH_OPTIONS(dispatch_invoke_flags, uint32_t, DISPATCH_INVOKE_NONE = 0x00000000, // Invoke modes @@ -335,7 +338,7 @@ DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, // @const DISPATCH_INVOKE_THREAD_BOUND // We're draining from the context of a thread-bound queue (main thread) // - // @const DISPATCH_INVOKE_WORKER_DRAIN + // @const DISPATCH_INVOKE_WORKLOOP_DRAIN // The queue at the bottom of this drain is a workloop that supports // reordering. // @@ -359,7 +362,7 @@ DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, #define _DISPATCH_INVOKE_AUTORELEASE_MASK 0x03000000u ); -DISPATCH_ENUM(dispatch_object_flags, unsigned long, +DISPATCH_OPTIONS(dispatch_object_flags, unsigned long, _DISPATCH_META_TYPE_MASK = 0x000000ff, // mask for object meta-types _DISPATCH_TYPE_CLUSTER_MASK = 0x000000f0, // mask for the cluster type _DISPATCH_SUB_TYPE_MASK = 0x0000ff00, // mask for object sub-types @@ -419,7 +422,8 @@ DISPATCH_ENUM(dispatch_object_flags, unsigned long, _DISPATCH_QUEUE_BASE_TYPEFLAG, DISPATCH_SOURCE_KEVENT_TYPE = DISPATCH_OBJECT_SUBTYPE(1, SOURCE), - DISPATCH_MACH_CHANNEL_TYPE = DISPATCH_OBJECT_SUBTYPE(2, SOURCE), + DISPATCH_CHANNEL_TYPE = DISPATCH_OBJECT_SUBTYPE(2, SOURCE), + DISPATCH_MACH_CHANNEL_TYPE = DISPATCH_OBJECT_SUBTYPE(3, SOURCE), ); typedef struct _os_object_vtable_s { @@ -467,6 +471,9 @@ typedef struct _os_object_s { return [super init]; \ } +#define DISPATCH_OBJECT_USES_XREF_DISPOSE() \ + OS_OBJECT_USES_XREF_DISPOSE() + _OS_OBJECT_DECL_PROTOCOL(dispatch_object, object); DISPATCH_CLASS_DECL_BARE(object, OBJECT); @@ -480,9 +487,7 @@ size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, void *_dispatch_object_alloc(const void *vtable, size_t size); void _dispatch_object_finalize(dispatch_object_t dou); void _dispatch_object_dealloc(dispatch_object_t dou); -#if !USE_OBJC void _dispatch_xref_dispose(dispatch_object_t dou); -#endif void _dispatch_dispose(dispatch_object_t dou); #if DISPATCH_COCOA_COMPAT #if USE_OBJC diff --git a/src/protocol.defs b/src/protocol.defs index 7a9cf1898..6129f3f1a 100644 --- a/src/protocol.defs +++ b/src/protocol.defs @@ -20,6 +20,7 @@ #include #include +import ; // '64' is used to align with Mach notifications and so that we don't fight // with the notify symbols in Libsystem @@ -28,6 +29,8 @@ subsystem libdispatch_internal_protocol 64; serverprefix _dispatch_; userprefix _dispatch_send_; +ConsumeOnSendError Timeout; + skip; /* was MACH_NOTIFY_FIRST: 64 */ /* MACH_NOTIFY_PORT_DELETED: 65 */ diff --git a/src/queue.c b/src/queue.c index 63db5f5fd..68dd28aa7 100644 --- a/src/queue.c +++ b/src/queue.c @@ -36,6 +36,9 @@ static inline void _dispatch_queue_wakeup_with_override( static void _dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, struct dispatch_object_s *dc, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, uint64_t owned); +static inline bool +_dispatch_async_and_wait_should_always_async(dispatch_queue_class_t dqu, + uint64_t dq_state); #pragma mark - #pragma mark dispatch_assert_queue @@ -123,7 +126,7 @@ void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, mach_voucher_t kv) { - _pthread_set_flags_t pflags = 0; + _pthread_set_flags_t pflags = (_pthread_set_flags_t)0; if (pp && _dispatch_set_qos_class_enabled) { pthread_priority_t old_pri = _dispatch_get_priority(); if (pp != old_pri) { @@ -937,7 +940,7 @@ _dispatch_lane_non_barrier_complete_finish(dispatch_lane_t dq, // dependency ordering for dq state changes that were flushed // and not acted upon os_atomic_thread_fence(dependency); - dq = os_atomic_force_dependency_on(dq, old_state); + dq = os_atomic_inject_dependency(dq, (unsigned long)old_state); } return _dispatch_lane_barrier_complete(dq, 0, flags); } @@ -1183,8 +1186,8 @@ _dispatch_non_barrier_waiter_redirect_or_wake(dispatch_lane_t dq, } if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - // _dispatch_barrier_async_and_wait_f_slow() expects dc_other to be the - // bottom queue of the graph + // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects + // dc_other to be the bottom queue of the graph dsc->dc_other = dq; } return _dispatch_waiter_wake_wlh_anon(dsc); @@ -1254,11 +1257,6 @@ _dispatch_barrier_waiter_redirect_or_wake(dispatch_queue_class_t dqu, return dx_push(tq, dsc, _dq_state_max_qos(old_state)); } - if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - // _dispatch_async_and_wait_f_slow() expects dc_other to be the - // bottom queue of the graph - dsc->dc_other = dq; - } #if DISPATCH_INTROSPECTION if (dsc->dsc_from_async) { _dispatch_trace_runtime_event(async_sync_handoff, dq, 0); @@ -1266,6 +1264,12 @@ _dispatch_barrier_waiter_redirect_or_wake(dispatch_queue_class_t dqu, _dispatch_trace_runtime_event(sync_sync_handoff, dq, 0); } #endif // DISPATCH_INTROSPECTION + + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // Falling into case (2) of _dispatch_async_and_wait_f_slow, dc_other is + // the bottom queue + dsc->dc_other = dq; + } return _dispatch_waiter_wake(dsc, wlh, old_state, new_state); } @@ -1724,6 +1728,8 @@ _dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt, __DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq); if (dsc.dsc_func == NULL) { + // dsc_func being cleared means that the block ran on another thread ie. + // case (2) as listed in _dispatch_async_and_wait_f_slow. dispatch_queue_t stop_dq = dsc.dc_other; return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags); } @@ -1981,6 +1987,34 @@ static void _dispatch_async_and_wait_f_slow(dispatch_queue_t dq, uintptr_t top_dc_flags, dispatch_sync_context_t dsc, dispatch_queue_t tq) { + /* dc_other is an in-out parameter. + * + * As an in-param, it specifies the top queue on which the blocking + * primitive is called. + * + * As an out-param, it refers to the queue up till which we have the drain + * lock. This is slightly different depending on how we come out of + * _WAIT_FOR_QUEUE. + * + * Case 1: + * If the continuation is to be invoked on another thread - for + * async_and_wait, or we ran on a thread bound main queue - then someone + * already called _dispatch_async_and_wait_invoke which invoked the block + * already. dc_other as an outparam here tells the enqueuer the queue up + * till which the enqueuer got the drain lock so that we know what to unlock + * on the way out. This is the case whereby the enqueuer owns part of the + * locks in the queue hierachy (but not all). + * + * Case 2: + * If the continuation is to be invoked on the enqueuing thread - because + * we were contending with another sync or async_and_wait - then enqueuer + * return from _WAIT_FOR_QUEUE without having invoked the block. The + * enqueuer has had the locks for the rest of the queue hierachy handed off + * to it so dc_other specifies the queue up till which it has the locks + * which in this case, is up till the bottom queue in the hierachy. So it + * needs to unlock everything up till the bottom queue, on the way out. + */ + __DISPATCH_WAIT_FOR_QUEUE__(dsc, tq); if (unlikely(dsc->dsc_func == NULL)) { @@ -2008,11 +2042,19 @@ _dispatch_async_and_wait_should_always_async(dispatch_queue_class_t dqu, DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_async_and_wait_recurse_one(dispatch_queue_t dq, dispatch_tid tid, - uintptr_t dc_flags) +_dispatch_async_and_wait_recurse_one(dispatch_queue_t dq, + dispatch_sync_context_t dsc, dispatch_tid tid, uintptr_t dc_flags) { uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); if (unlikely(_dispatch_async_and_wait_should_always_async(dq, dq_state))) { + // Remove the async_and_wait flag but drive down the slow path so that + // we do the synchronous wait. We are guaranteed that dq is the base + // queue. + // + // We're falling down to case (1) of _dispatch_async_and_wait_f_slow so + // set dc_other to dq + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + dsc->dc_other = dq; return false; } if (likely(dc_flags & DC_FLAG_BARRIER)) { @@ -2032,7 +2074,8 @@ _dispatch_async_and_wait_recurse(dispatch_queue_t top_dq, _dispatch_trace_item_push(top_dq, dsc); for (;;) { - if (unlikely(!_dispatch_async_and_wait_recurse_one(dq, tid, dc_flags))){ + if (unlikely(!_dispatch_async_and_wait_recurse_one(dq, dsc, tid, + dc_flags))) { return _dispatch_async_and_wait_f_slow(top_dq, top_flags, dsc, dq); } @@ -2528,19 +2571,6 @@ _dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, return DISPATCH_PRIORITY_FLAG_MANAGER; } -DISPATCH_ALWAYS_INLINE -static void -_dispatch_queue_setter_assert_inactive(dispatch_queue_class_t dq) -{ - uint64_t dq_state = os_atomic_load2o(dq._dq, dq_state, relaxed); - if (likely(dq_state & DISPATCH_QUEUE_INACTIVE)) return; -#ifndef __LP64__ - dq_state >>= 32; -#endif - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "dispatch queue/source property setter called after activation"); -} - DISPATCH_ALWAYS_INLINE static void _dispatch_workloop_attributes_alloc_if_needed(dispatch_workloop_t dwl) @@ -2835,9 +2865,17 @@ void _dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) { dispatch_lane_t dq = dqu._dl; - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); + if (unlikely(dq->dq_items_tail)) { + DISPATCH_CLIENT_CRASH(dq->dq_items_tail, + "Release of a queue while items are enqueued"); + } + dq->dq_items_head = (void *)0x200; + dq->dq_items_tail = (void *)0x200; + uint64_t orig_dq_state, dq_state; + dq_state = orig_dq_state = os_atomic_load2o(dq, dq_state, relaxed); + + uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); if (dx_hastypeflag(dq, QUEUE_ROOT)) { initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; } @@ -2846,23 +2884,15 @@ _dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) dq_state &= ~DISPATCH_QUEUE_ROLE_MASK; if (unlikely(dq_state != initial_state)) { if (_dq_state_drain_locked(dq_state)) { - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, "Release of a locked queue"); } #ifndef __LP64__ - dq_state >>= 32; + orig_dq_state >>= 32; #endif - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, "Release of a queue with corrupt state"); } - - if (unlikely(dq->dq_items_tail)) { - DISPATCH_CLIENT_CRASH(dq->dq_items_tail, - "Release of a queue while items are enqueued"); - } - dq->dq_items_head = (void *)0x200; - dq->dq_items_tail = (void *)0x200; - _dispatch_queue_dispose(dqu, allow_free); } @@ -2881,7 +2911,7 @@ _dispatch_queue_xref_dispose(dispatch_queue_t dq) if (unlikely(_dq_state_is_suspended(dq_state))) { long state = (long)dq_state; if (sizeof(long) < sizeof(uint64_t)) state = (long)(dq_state >> 32); - if (unlikely(_dq_state_is_inactive(dq_state))) { + if (unlikely(dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK)) { // Arguments for and against this assert are within 6705399 DISPATCH_CLIENT_CRASH(state, "Release of an inactive object"); } @@ -2982,26 +3012,23 @@ _dispatch_lane_resume_slow(dispatch_lane_t dq) retry: _dispatch_queue_sidelock_unlock(dq); - return _dispatch_lane_resume(dq, false); + return _dispatch_lane_resume(dq, DISPATCH_RESUME); } DISPATCH_NOINLINE static void _dispatch_lane_resume_activate(dispatch_lane_t dq) { - bool allow_resume = true; - // Step 2: run the activation finalizer if (dx_vtable(dq)->dq_activate) { - dx_vtable(dq)->dq_activate(dq, &allow_resume); - } - // Step 3: consume the suspend count - if (allow_resume) { - return _dispatch_lane_resume(dq, false); + dx_vtable(dq)->dq_activate(dq); } + + _dispatch_lane_resume(dq, DISPATCH_ACTIVATION_DONE); } +DISPATCH_NOINLINE void -_dispatch_lane_resume(dispatch_lane_t dq, bool activate) +_dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) { // covers all suspend and inactive bits, including side suspend bit const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK; @@ -3016,57 +3043,86 @@ _dispatch_lane_resume(dispatch_lane_t dq, bool activate) bool is_source = (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE); uint64_t old_state, new_state; + // // Activation is a bit tricky as it needs to finalize before the wakeup. // - // If after doing its updates to the suspend count and/or inactive bit, - // the last suspension related bit that would remain is the - // NEEDS_ACTIVATION one, then this function: + // The inactive bits have 4 states: + // - 11: INACTIVE + // - 10: ACTIVATED, but not activating yet + // - 01: ACTIVATING right now + // - 00: fully active + // + // ACTIVATED is only used when the queue is otherwise also suspended. + // In that case the last resume will take over the activation. // - // 1. moves the state to { sc:1 i:0 na:0 } (converts the needs-activate into - // a suspend count) - // 2. runs the activation finalizer - // 3. consumes the suspend count set in (1), and finishes the resume flow + // The ACTIVATING state is tricky because it may be cleared by sources + // firing, to avoid priority inversions problems such as rdar://45419440 + // where as soon as the kevent is installed, the source may fire + // before its activating state was cleared. // - // Concurrently, some property setters such as setting dispatch source - // handlers or _dispatch_lane_set_target_queue try to do in-place changes - // before activation. These protect their action by taking a suspend count. - // Step (1) above cannot happen if such a setter has locked the object. - if (activate) { + if (op == DISPATCH_ACTIVATE) { // relaxed atomic because this doesn't publish anything, this is only // about picking the thread that gets to finalize the activation os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - if ((old_state & suspend_bits) == - DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { - // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } - new_state = old_state - DISPATCH_QUEUE_INACTIVE - - DISPATCH_QUEUE_NEEDS_ACTIVATION - + DISPATCH_QUEUE_SUSPEND_INTERVAL; - } else if (_dq_state_is_inactive(old_state)) { - // { sc:>0 i:1 na:1 } -> { i:0 na:1 } - // simple activation because sc is not 0 - // resume will deal with na:1 later - new_state = old_state - DISPATCH_QUEUE_INACTIVE; - } else { - // object already active, this is a no-op, just exit + if (!_dq_state_is_inactive(old_state)) { + // object already active or activated os_atomic_rmw_loop_give_up(return); } + if (unlikely(_dq_state_suspend_cnt(old_state))) { + // { sc != 0, i = INACTIVE } -> i = ACTIVATED + new_state = old_state - DISPATCH_QUEUE_INACTIVE + + DISPATCH_QUEUE_ACTIVATED; + } else { + // { sc = 0, i = INACTIVE } -> i = ACTIVATING + new_state = old_state - DISPATCH_QUEUE_INACTIVE + + DISPATCH_QUEUE_ACTIVATING; + } + }); + } else if (op == DISPATCH_ACTIVATION_DONE) { + // release barrier needed to publish the effect of dq_activate() + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + if (unlikely(!(old_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK))) { + os_atomic_rmw_loop_give_up({ + // object activation was already concurrently done + // due to a concurrent DISPATCH_WAKEUP_CLEAR_ACTIVATING + // wakeup call. + // + // We still need to consume the internal refcounts because + // the wakeup doesn't take care of these. + return _dispatch_release_2_tailcall(dq); + }); + } + + new_state = old_state - DISPATCH_QUEUE_ACTIVATING; + if (!_dq_state_is_runnable(new_state)) { + // Out of width or still suspended. + // For the former, force _dispatch_lane_non_barrier_complete + // to reconsider whether it has work to do + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (_dq_state_drain_locked(new_state)) { + // still locked by someone else, make drain_try_unlock() fail + // and reconsider whether it has work to do + new_state |= DISPATCH_QUEUE_DIRTY; + } else { + // clear overrides and force a wakeup + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + } }); + if (unlikely(new_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK)) { + DISPATCH_CLIENT_CRASH(dq, "Corrupt activation state"); + } } else { // release barrier needed to publish the effect of // - dispatch_set_target_queue() // - dispatch_set_*_handler() - // - dq_activate() os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - if ((old_state & suspend_bits) == DISPATCH_QUEUE_SUSPEND_INTERVAL - + DISPATCH_QUEUE_NEEDS_ACTIVATION) { - // { sc:1 i:0 na:1 } -> { sc:1 i:0 na:0 } - new_state = old_state - DISPATCH_QUEUE_NEEDS_ACTIVATION; - } else if (is_source && (old_state & suspend_bits) == - DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { - // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } - new_state = old_state - DISPATCH_QUEUE_INACTIVE - - DISPATCH_QUEUE_NEEDS_ACTIVATION - + DISPATCH_QUEUE_SUSPEND_INTERVAL; + new_state = old_state; + if (is_source && (old_state & suspend_bits) == + DISPATCH_QUEUE_INACTIVE) { + // { sc = 0, i = INACTIVE } -> i = ACTIVATING + new_state -= DISPATCH_QUEUE_INACTIVE; + new_state += DISPATCH_QUEUE_ACTIVATING; } else if (unlikely(os_sub_overflow(old_state, DISPATCH_QUEUE_SUSPEND_INTERVAL, &new_state))) { // underflow means over-resume or a suspend count transfer @@ -3080,6 +3136,10 @@ _dispatch_lane_resume(dispatch_lane_t dq, bool activate) // // below this, new_state = old_state - DISPATCH_QUEUE_SUSPEND_INTERVAL // + } else if (_dq_state_is_activated(new_state)) { + // { sc = 1, i = ACTIVATED } -> i = ACTIVATING + new_state -= DISPATCH_QUEUE_ACTIVATED; + new_state += DISPATCH_QUEUE_ACTIVATING; } else if (!_dq_state_is_runnable(new_state)) { // Out of width or still suspended. // For the former, force _dispatch_lane_non_barrier_complete @@ -3107,20 +3167,10 @@ _dispatch_lane_resume(dispatch_lane_t dq, bool activate) }); } - if ((old_state ^ new_state) & DISPATCH_QUEUE_NEEDS_ACTIVATION) { - // we cleared the NEEDS_ACTIVATION bit and we have a valid suspend count + if (_dq_state_is_activating(new_state)) { return _dispatch_lane_resume_activate(dq); } - if (activate) { - // if we're still in an activate codepath here we should have - // { sc:>0 na:1 }, if not we've got a corrupt state - if (unlikely(!_dq_state_is_suspended(new_state))) { - DISPATCH_CLIENT_CRASH(dq, "Invalid suspension state"); - } - return; - } - if (_dq_state_is_suspended(new_state)) { return; } @@ -3130,7 +3180,7 @@ _dispatch_lane_resume(dispatch_lane_t dq, bool activate) // dependency ordering for dq state changes that were flushed // and not acted upon os_atomic_thread_fence(dependency); - dq = os_atomic_force_dependency_on(dq, old_state); + dq = os_atomic_inject_dependency(dq, (unsigned long)old_state); } // Balancing the retain_2 done in suspend() for rdar://8181908 dispatch_wakeup_flags_t flags = DISPATCH_WAKEUP_CONSUME_2; @@ -3257,6 +3307,11 @@ _dispatch_lane_legacy_set_target_queue(void *ctxt) // see _dispatch_queue_wakeup() _dispatch_queue_sidelock_lock(dq); #endif + if (unlikely(!_dispatch_queue_is_mutable(dq))) { + /* serialize with _dispatch_mach_handoff_set_wlh */ + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of this object " + "after it has been activated"); + } dq->do_targetq = tq; #if HAVE_PTHREAD_WORKQUEUE_QOS // see _dispatch_queue_wakeup() @@ -3278,7 +3333,7 @@ _dispatch_lane_set_target_queue(dispatch_lane_t dq, dispatch_queue_t tq) if (_dispatch_lane_try_inactive_suspend(dq)) { _dispatch_object_set_target_queue_inline(dq, tq); - return _dispatch_lane_resume(dq, false); + return _dispatch_lane_resume(dq, DISPATCH_RESUME); } #if !DISPATCH_ALLOW_NON_LEAF_RETARGET @@ -3345,8 +3400,10 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) } if (_dq_state_is_inactive(dq_state)) { offset += dsnprintf(&buf[offset], bufsiz - offset, ", inactive"); - } else if (_dq_state_needs_activation(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", needs-activation"); + } else if (_dq_state_is_activated(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", activated"); + } else if (_dq_state_is_activating(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", activating"); } if (_dq_state_is_enqueued(dq_state)) { offset += dsnprintf(&buf[offset], bufsiz - offset, ", enqueued"); @@ -3399,7 +3456,7 @@ static struct { uint64_t volatile time_total; uint64_t volatile count_total; uint64_t volatile thread_total; -} _dispatch_stats[DISPATCH_PERF_MON_BUCKETS]; +} _dispatch_stats[DISPATCH_PERF_MON_BUCKETS] DISPATCH_ATOMIC64_ALIGN; DISPATCH_USED static size_t _dispatch_stat_buckets = DISPATCH_PERF_MON_BUCKETS; void @@ -3767,8 +3824,7 @@ _dispatch_queue_invoke_finish(dispatch_queue_t dq, } void -_dispatch_lane_activate(dispatch_lane_class_t dq, - DISPATCH_UNUSED bool *allow_resume) +_dispatch_lane_activate(dispatch_lane_class_t dq) { dispatch_queue_t tq = dq._dl->do_targetq; dispatch_priority_t pri = dq._dl->dq_priority; @@ -4010,6 +4066,19 @@ dispatch_workloop_set_cpupercent(dispatch_workloop_t dwl, uint8_t percent, dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT; } +#if DISPATCH_IOHID_SPI +void +_dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t dwl, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + dwl->dwl_attr->dwla_observers = *observer_hooks; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS; +} +#endif + static void _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, pthread_attr_t *attr) @@ -4036,7 +4105,7 @@ static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue .do_ctxt = NULL, .dq_label = "com.apple.root.workloop-custom", .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), - .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER | + .dq_priority = _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT) | DISPATCH_PRIORITY_SATURATED_OVERRIDE, .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, .dgq_thread_pool_size = 1, @@ -4137,10 +4206,13 @@ _dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) void _dispatch_workloop_activate(dispatch_workloop_t dwl) { - uint64_t dq_state = os_atomic_and_orig2o(dwl, dq_state, - ~DISPATCH_QUEUE_INACTIVE, relaxed); + // This transitions either: + // - from INACTIVE to ACTIVATING + // - or from ACTIVE to ACTIVE + uint64_t old_state = os_atomic_and_orig2o(dwl, dq_state, + ~DISPATCH_QUEUE_ACTIVATED, relaxed); - if (likely(dq_state & DISPATCH_QUEUE_INACTIVE)) { + if (likely(_dq_state_is_inactive(old_state))) { if (dwl->dwl_attr) { // Activation of a workloop with attributes forces us to create // the workloop up front and register the attributes with the @@ -4152,10 +4224,8 @@ _dispatch_workloop_activate(dispatch_workloop_t dwl) _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT); } dwl->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - os_atomic_and2o(dwl, dq_state, ~DISPATCH_QUEUE_NEEDS_ACTIVATION, - relaxed); - _dispatch_workloop_wakeup(dwl, 0, DISPATCH_WAKEUP_CONSUME_2); - return; + os_atomic_and2o(dwl, dq_state, ~DISPATCH_QUEUE_ACTIVATING, relaxed); + return _dispatch_workloop_wakeup(dwl, 0, DISPATCH_WAKEUP_CONSUME_2); } } @@ -4197,9 +4267,15 @@ _dispatch_workloop_invoke2(dispatch_workloop_t dwl, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, uint64_t *owned) { + dispatch_workloop_attr_t dwl_attr = dwl->dwl_attr; dispatch_thread_frame_s dtf; struct dispatch_object_s *dc = NULL, *next_dc; + if (dwl_attr && + (dwl_attr->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS)) { + _dispatch_set_pthread_root_queue_observer_hooks( + &dwl_attr->dwla_observers); + } _dispatch_thread_frame_push(&dtf, dwl); for (;;) { @@ -4236,10 +4312,12 @@ _dispatch_workloop_invoke2(dispatch_workloop_t dwl, *owned = (*owned & DISPATCH_QUEUE_ENQUEUED) + DISPATCH_QUEUE_IN_BARRIER + DISPATCH_QUEUE_WIDTH_INTERVAL; _dispatch_thread_frame_pop(&dtf); + _dispatch_set_pthread_root_queue_observer_hooks(NULL); return NULL; out_with_barrier_waiter: _dispatch_thread_frame_pop(&dtf); + _dispatch_set_pthread_root_queue_observer_hooks(NULL); return dwl->do_targetq; } @@ -4249,7 +4327,7 @@ _dispatch_workloop_invoke(dispatch_workloop_t dwl, { flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; flags |= DISPATCH_INVOKE_WORKLOOP_DRAIN; - _dispatch_queue_class_invoke(dwl, dic, flags, 0,_dispatch_workloop_invoke2); + _dispatch_queue_class_invoke(dwl, dic, flags, 0, _dispatch_workloop_invoke2); } DISPATCH_ALWAYS_INLINE @@ -4527,7 +4605,7 @@ _dispatch_workloop_push_waiter(dispatch_workloop_t dwl, uint64_t old_state, new_state; os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { - new_state = _dq_state_merge_qos(old_state, qos); + new_state = _dq_state_merge_qos(old_state, qos); new_state |= DISPATCH_QUEUE_DIRTY; if (unlikely(_dq_state_drain_locked(old_state))) { // not runnable, so we should just handle overrides @@ -4540,6 +4618,12 @@ _dispatch_workloop_push_waiter(dispatch_workloop_t dwl, } }); + if ((dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) && + _dispatch_async_and_wait_should_always_async(dwl, new_state)) { + dsc->dc_other = dwl; + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + } + dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { @@ -4837,7 +4921,19 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, qos = _dispatch_queue_wakeup_qos(dq, qos); os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); - if (likely(!_dq_state_is_suspended(old_state) && + if (flags & DISPATCH_WAKEUP_CLEAR_ACTIVATING) { + // When an event is being delivered to a source because its + // unote was being registered before the ACTIVATING state + // had a chance to be cleared, we don't want to fail the wakeup + // which could lead to a priority inversion. + // + // Instead, these wakeups are allowed to finish the pending + // activation. + if (_dq_state_is_activating(old_state)) { + new_state &= ~DISPATCH_QUEUE_ACTIVATING; + } + } + if (likely(!_dq_state_is_suspended(new_state) && !_dq_state_is_enqueued(old_state) && (!_dq_state_drain_locked(old_state) || (enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR && @@ -4882,8 +4978,10 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, // uint64_t old_state, new_state; os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - if (!_dq_state_drain_locked(old_state) || - !_dq_state_is_enqueued(old_state)) { + // Avoid spurious override if the item was drained before we could + // apply an override + if (!_dq_state_drain_locked(old_state) && + !_dq_state_is_enqueued(old_state)) { os_atomic_rmw_loop_give_up(goto done); } new_state = _dq_state_merge_qos(old_state, qos); @@ -4947,6 +5045,10 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, if (unlikely(_dispatch_queue_push_item(dq, dsc))) { if (unlikely(_dispatch_lane_push_waiter_should_wakeup(dq, dsc))) { + // If this returns true, we know that we are pushing onto the base + // queue + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + dsc->dc_other = dq; return dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); } @@ -5065,6 +5167,379 @@ _dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, _dispatch_lane_push(dq, dou, qos); } +#pragma mark - +#pragma mark dispatch_channel_t + +void +_dispatch_channel_dispose(dispatch_channel_t dch, bool *allow_free) +{ + dch->dch_callbacks = NULL; + _dispatch_lane_class_dispose(dch, allow_free); +} + +void +_dispatch_channel_xref_dispose(dispatch_channel_t dch) +{ + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dch->_as_dq); + if (callbacks->dcc_acknowledge_cancel && !(dqf & DSF_CANCELED)) { + DISPATCH_CLIENT_CRASH(dch, "Release of a channel that has not been " + "cancelled, but has a cancel acknowledgement callback"); + } + dx_wakeup(dch, 0, DISPATCH_WAKEUP_MAKE_DIRTY); +} + +typedef struct dispatch_channel_invoke_ctxt_s { + dispatch_channel_t dcic_dch; + dispatch_thread_frame_s dcic_dtf; + dispatch_invoke_context_t dcic_dic; + dispatch_invoke_flags_t dcic_flags; + dispatch_queue_wakeup_target_t dcic_tq; + struct dispatch_object_s *dcic_next_dc; + bool dcic_called_drain; +} dispatch_channel_invoke_ctxt_s; + +static bool +_dispatch_channel_invoke_cancel_check(dispatch_channel_t dch, + dispatch_channel_invoke_ctxt_t ctxt, + dispatch_channel_callbacks_t callbacks) +{ + bool rc = true; + if (!dch->dm_cancel_handler_called) { + if (_dispatch_queue_atomic_flags(dch) & DSF_CANCELED) { + dispatch_invoke_with_autoreleasepool(ctxt->dcic_flags, { + rc = callbacks->dcc_acknowledge_cancel(dch, dch->do_ctxt); + }); + if (rc) { + dch->dm_cancel_handler_called = true; + _dispatch_release_no_dispose(dch); + } else { + ctxt->dcic_tq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + } + } + return rc; +} + +static bool +_dispatch_channel_invoke_checks(dispatch_channel_t dch, + dispatch_channel_invoke_ctxt_t dcic, + dispatch_channel_callbacks_t callbacks) +{ + if (!_dispatch_channel_invoke_cancel_check(dch, dcic, callbacks)) { + return false; + } + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } + if (likely(dcic->dcic_flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { + dispatch_workloop_t dwl = (dispatch_workloop_t)_dispatch_get_wlh(); + if (unlikely(_dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos)) { + dcic->dcic_tq = dch->do_targetq; + return false; + } + } + if (unlikely(_dispatch_queue_drain_should_narrow(dcic->dcic_dic))) { + dcic->dcic_tq = dch->do_targetq; + return false; + } + uint64_t dq_state = os_atomic_load(&dch->dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + dcic->dcic_tq = dch->do_targetq; + return false; + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_wakeup_target_t +_dispatch_channel_invoke2(dispatch_channel_t dch, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned DISPATCH_UNUSED) +{ + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + dispatch_channel_invoke_ctxt_s dcic = { + .dcic_dch = dch, + .dcic_dic = dic, + .dcic_flags = flags & + ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN, + .dcic_tq = DISPATCH_QUEUE_WAKEUP_NONE, + }; + + _dispatch_thread_frame_push(&dcic.dcic_dtf, dch); + + if (!_dispatch_channel_invoke_cancel_check(dch, &dcic, callbacks)) { + goto out; + } + + do { + struct dispatch_object_s *dc = dcic.dcic_next_dc; + + if (unlikely(!dc)) { + if (!dch->dq_items_tail) { + break; + } + dc = _dispatch_queue_get_head(dch); + } + + if (unlikely(_dispatch_object_is_sync_waiter(dc))) { + DISPATCH_CLIENT_CRASH(0, "sync waiter found on channel"); + } + + if (_dispatch_object_is_channel_item(dc)) { + dcic.dcic_next_dc = dc; + dcic.dcic_called_drain = false; + dispatch_invoke_with_autoreleasepool(dcic.dcic_flags, { + if (callbacks->dcc_invoke(dch, &dcic, dch->do_ctxt)) { + if (unlikely(!dcic.dcic_called_drain)) { + DISPATCH_CLIENT_CRASH(0, "Channel didn't call " + "dispatch_channel_drain"); + } + } else { + dcic.dcic_tq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + }); + } else { + dcic.dcic_next_dc = _dispatch_queue_pop_head(dch, dc); + _dispatch_continuation_pop_inline(dc, dic, flags, dch); + if (!_dispatch_channel_invoke_checks(dch, &dcic, callbacks)) { + break; + } + } + } while (dcic.dcic_tq == DISPATCH_QUEUE_WAKEUP_NONE); + +out: + _dispatch_thread_frame_pop(&dcic.dcic_dtf); + return dcic.dcic_tq; +} + +void +_dispatch_channel_invoke(dispatch_channel_t dch, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) +{ + _dispatch_queue_class_invoke(dch, dic, flags, + DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS, _dispatch_channel_invoke2); +} + +void +dispatch_channel_foreach_work_item_peek_f( + dispatch_channel_invoke_ctxt_t dcic, + void *ctxt, dispatch_channel_enumerator_handler_t f) +{ + if (dcic->dcic_called_drain) { + DISPATCH_CLIENT_CRASH(0, "Called peek after drain"); + } + + dispatch_channel_t dch = dcic->dcic_dch; + struct dispatch_object_s *dc = dcic->dcic_next_dc; + + for (;;) { + dispatch_continuation_t dci = (dispatch_continuation_t)dc; + if (!_dispatch_object_is_channel_item(dc)) { + break; + } + if (!f(ctxt, dci->dc_ctxt)) { + break; + } + if (dc == dch->dq_items_tail) { + break; + } + dc = os_mpsc_get_next(dc, do_next); + } +} + +void +dispatch_channel_drain_f(dispatch_channel_invoke_ctxt_t dcic, + void *_Nullable ctxt, dispatch_channel_drain_handler_t f) +{ + dispatch_channel_t dch = dcic->dcic_dch; + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + struct dispatch_object_s *dc; + uintptr_t dcf = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; + void *unpop_item = NULL; + bool stop_invoke = false; + + if (dcic->dcic_called_drain) { + DISPATCH_CLIENT_CRASH(0, "Called drain twice in the same invoke"); + } + dcic->dcic_called_drain = true; + + do { + dc = dcic->dcic_next_dc; + if (unlikely(!dc)) { + if (!dch->dq_items_tail) { + break; + } + dc = _dispatch_queue_get_head(dch); + } + if (!_dispatch_object_is_channel_item(dc)) { + break; + } + + dcic->dcic_next_dc = _dispatch_queue_pop_head(dch, dc); + + _dispatch_continuation_pop_forwarded(upcast(dc)._dc, dcf, dch, { + dispatch_invoke_with_autoreleasepool(dcic->dcic_flags, { + stop_invoke = !f(ctxt, upcast(dc)._dc->dc_ctxt, &unpop_item); + }); + }); + if (unlikely(stop_invoke)) { + break; + } + } while (_dispatch_channel_invoke_checks(dch, dcic, callbacks)); + + if (unlikely(unpop_item)) { + dispatch_continuation_t dci = _dispatch_continuation_alloc(); + _dispatch_continuation_init_f(dci, dch, unpop_item, NULL, 0, dcf); + os_mpsc_undo_pop_head(os_mpsc(dch, dq_items), upcast(dci)._do, + dcic->dcic_next_dc, do_next); + dcic->dcic_next_dc = upcast(dci)._do; + } +} + +#ifdef __BLOCKS__ +void +dispatch_channel_foreach_work_item_peek( + dispatch_channel_invoke_ctxt_t dcic, + dispatch_channel_enumerator_block_t block) +{ + dispatch_channel_enumerator_handler_t f; + f = (dispatch_channel_enumerator_handler_t)_dispatch_Block_invoke(block); + dispatch_channel_foreach_work_item_peek_f(dcic, block, f); +} + +void +dispatch_channel_drain(dispatch_channel_invoke_ctxt_t dcic, + dispatch_channel_drain_block_t block) +{ + dispatch_channel_drain_handler_t f; + f = (dispatch_channel_drain_handler_t)_dispatch_Block_invoke(block); + dispatch_channel_drain_f(dcic, block, f); +} +#endif // __BLOCKS__ + +DISPATCH_NOINLINE +void +_dispatch_channel_wakeup(dispatch_channel_t dch, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_queue_t dq = dch->_as_dq; + + if (unlikely(!callbacks->dcc_probe(dch, dch->do_ctxt))) { + target = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } else if (_dispatch_queue_class_probe(dch)) { + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } else if (_dispatch_queue_atomic_flags(dq) & DSF_CANCELED) { + if (!dch->dm_cancel_handler_called) { + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } + } + + return _dispatch_queue_wakeup(dch, qos, flags, target); +} + +size_t +_dispatch_channel_debug(dispatch_channel_t dch, char *buf, size_t bufsiz) +{ + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dch); + size_t offset = 0; + + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + _dispatch_object_class_name(dch), dch); + offset += _dispatch_object_debug_attr(dch, &buf[offset], bufsiz - offset); + offset += _dispatch_queue_debug_attr(dch->_as_dq, &buf[offset], bufsiz - offset); + offset += dsnprintf(buf, bufsiz, "%s%s%s", + (dqf & DSF_CANCELED) ? "cancelled, " : "", + (dqf & DSF_NEEDS_EVENT) ? "needs-event, " : "", + (dqf & DSF_DELETED) ? "deleted, " : ""); + + return offset; +} + +dispatch_channel_t +dispatch_channel_create(const char *label, dispatch_queue_t tq, + void *ctxt, dispatch_channel_callbacks_t callbacks) +{ + dispatch_channel_t dch; + dispatch_queue_flags_t dqf = DSF_STRICT; + + if (callbacks->dcc_version < 1) { + DISPATCH_CLIENT_CRASH(callbacks->dcc_version, + "Unsupported callbacks version"); + } + + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } + } + + if (unlikely(!tq)) { + tq = _dispatch_get_default_queue(true); + } else { + _dispatch_retain((dispatch_queue_t _Nonnull)tq); + } + + dch = _dispatch_queue_alloc(channel, dqf, 1, + DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER)._dch; + dch->dq_label = label; + dch->do_targetq = tq; + dch->dch_callbacks = callbacks; + dch->do_ctxt = ctxt; + if (!callbacks->dcc_acknowledge_cancel) { + dch->dm_cancel_handler_called = true; + dch->do_ref_cnt--; + } + return dch; +} + +DISPATCH_NOINLINE +static void +_dispatch_channel_enqueue_slow(dispatch_channel_t dch, void *ctxt) +{ + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; + dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); + dispatch_qos_t qos; + + qos = _dispatch_continuation_init_f(dc, dch, ctxt, NULL, 0, dc_flags); + _dispatch_continuation_async(dch, dc, qos, dc->dc_flags); +} + +DISPATCH_NOINLINE +void +dispatch_channel_enqueue(dispatch_channel_t dch, void *ctxt) +{ + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; + dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); + dispatch_qos_t qos; + + if (unlikely(!dc)) { + return _dispatch_channel_enqueue_slow(dch, ctxt); + } + qos = _dispatch_continuation_init_f(dc, dch, ctxt, NULL, 0, dc_flags); + _dispatch_continuation_async(dch, dc, qos, dc->dc_flags); +} + +#ifndef __APPLE__ +#if __BLOCKS__ +void typeof(dispatch_channel_async) dispatch_channel_async + __attribute__((__alias__("dispatch_async"))); +#endif + +void typeof(dispatch_channel_async_f) dispatch_channel_async_f + __attribute__((__alias__("dispatch_async_f"))); +#endif + +void +dispatch_channel_wakeup(dispatch_channel_t dch, qos_class_t qos_class) +{ + dispatch_qos_t oqos = _dispatch_qos_from_qos_class(qos_class); + dx_wakeup(dch, oqos, DISPATCH_WAKEUP_MAKE_DIRTY); +} + #pragma mark - #pragma mark dispatch_mgr_queue @@ -5090,15 +5565,15 @@ DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mgr_sched_pred); static int _dispatch_mgr_sched_qos2prio(qos_class_t qos) { + if (qos == QOS_CLASS_MAINTENANCE) return 4; switch (qos) { - case QOS_CLASS_MAINTENANCE: return 4; case QOS_CLASS_BACKGROUND: return 4; case QOS_CLASS_UTILITY: return 20; case QOS_CLASS_DEFAULT: return 31; case QOS_CLASS_USER_INITIATED: return 37; case QOS_CLASS_USER_INTERACTIVE: return 47; + default: return 0; } - return 0; } #endif // HAVE_PTHREAD_WORKQUEUE_QOS @@ -5339,12 +5814,15 @@ _dispatch_mgr_queue_drain(void) _dispatch_perfmon_end(perfmon_thread_manager); } +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunreachable-code" #if DISPATCH_USE_KEVENT_WORKQUEUE if (!_dispatch_kevent_workqueue_enabled) #endif { _dispatch_force_cache_cleanup(); } +#pragma clang diagnostic pop } void @@ -5595,11 +6073,14 @@ DISPATCH_NOINLINE static void _dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents) { - if (!events || !nevents) { + if (!dispatch_assume(events && nevents)) { + return; + } + if (*nevents == 0 || *events == NULL) { // events for worker thread request have already been delivered earlier + // or got cancelled before point of no return concurrently return; } - if (!dispatch_assume(*nevents && *events)) return; _dispatch_adopt_wlh_anon(); _dispatch_wlh_worker_thread(DISPATCH_WLH_ANON, *events, nevents); _dispatch_reset_wlh(); @@ -5611,14 +6092,17 @@ static void _dispatch_workloop_worker_thread(uint64_t *workloop_id, dispatch_kevent_t *events, int *nevents) { - if (!workloop_id || !dispatch_assume(*workloop_id != 0)) { + if (!dispatch_assume(workloop_id && events && nevents)) { + return; + } + if (!dispatch_assume(*workloop_id != 0)) { return _dispatch_kevent_worker_thread(events, nevents); } - if (!events || !nevents) { + if (*nevents == 0 || *events == NULL) { // events for worker thread request have already been delivered earlier + // or got cancelled before point of no return concurrently return; } - if (!dispatch_assume(*nevents && *events)) return; dispatch_wlh_t wlh = (dispatch_wlh_t)*workloop_id; _dispatch_adopt_wlh(wlh); _dispatch_wlh_worker_thread(wlh, *events, nevents); @@ -5715,7 +6199,7 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) "%p", dq); return; } - } while (!os_atomic_cmpxchgvw2o(dq, dgq_thread_pool_size, t_count, + } while (!os_atomic_cmpxchgv2o(dq, dgq_thread_pool_size, t_count, t_count - remaining, &t_count, acquire)); #if !defined(_WIN32) @@ -6955,7 +7439,7 @@ static void _dispatch_sigsuspend(void) { static const sigset_t mask; - + pthread_sigmask(SIG_SETMASK, &mask, NULL); for (;;) { sigsuspend(&mask); } @@ -7038,9 +7522,18 @@ _dispatch_queue_cleanup2(void) // See dispatch_main for call to _dispatch_sig_thread on linux. #ifndef __linux__ if (_dispatch_program_is_probably_callback_driven) { - _dispatch_barrier_async_detached_f(_dispatch_get_default_queue(true), - NULL, _dispatch_sig_thread); - sleep(1); // workaround 6778970 + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + pthread_t tid; + int r = pthread_create(&tid, &attr, (void*)_dispatch_sig_thread, NULL); + if (unlikely(r)) { + DISPATCH_CLIENT_CRASH(r, "Unable to create signal thread"); + } + pthread_attr_destroy(&attr); + // this used to be here as a workaround for 6778970 + // but removing it had bincompat fallouts :'( + sleep(1); } #endif @@ -7119,28 +7612,63 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) "QoS Maintenance support required"); } +#if DISPATCH_USE_KEVENT_SETUP + struct pthread_workqueue_config cfg = { + .version = PTHREAD_WORKQUEUE_CONFIG_VERSION, + .flags = 0, + .workq_cb = 0, + .kevent_cb = 0, + .workloop_cb = 0, + .queue_serialno_offs = dispatch_queue_offsets.dqo_serialnum, +#if PTHREAD_WORKQUEUE_CONFIG_VERSION >= 2 + .queue_label_offs = dispatch_queue_offsets.dqo_label, +#endif + }; +#endif + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunreachable-code" if (unlikely(!_dispatch_kevent_workqueue_enabled)) { +#if DISPATCH_USE_KEVENT_SETUP + cfg.workq_cb = _dispatch_worker_thread2; + r = pthread_workqueue_setup(&cfg, sizeof(cfg)); +#else r = _pthread_workqueue_init(_dispatch_worker_thread2, offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif // DISPATCH_USE_KEVENT_SETUP #if DISPATCH_USE_KEVENT_WORKLOOP } else if (wq_supported & WORKQ_FEATURE_WORKLOOP) { +#if DISPATCH_USE_KEVENT_SETUP + cfg.workq_cb = _dispatch_worker_thread2; + cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; + cfg.workloop_cb = (pthread_workqueue_function_workloop_t) _dispatch_workloop_worker_thread; + r = pthread_workqueue_setup(&cfg, sizeof(cfg)); +#else r = _pthread_workqueue_init_with_workloop(_dispatch_worker_thread2, (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread, (pthread_workqueue_function_workloop_t) _dispatch_workloop_worker_thread, offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif // DISPATCH_USE_KEVENT_SETUP #endif // DISPATCH_USE_KEVENT_WORKLOOP #if DISPATCH_USE_KEVENT_WORKQUEUE } else if (wq_supported & WORKQ_FEATURE_KEVENT) { +#if DISPATCH_USE_KEVENT_SETUP + cfg.workq_cb = _dispatch_worker_thread2; + cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; + r = pthread_workqueue_setup(&cfg, sizeof(cfg)); +#else r = _pthread_workqueue_init_with_kevent(_dispatch_worker_thread2, (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread, offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif // DISPATCH_USE_KEVENT_SETUP #endif } else { DISPATCH_INTERNAL_CRASH(wq_supported, "Missing Kevent WORKQ support"); } +#pragma clang diagnostic pop if (r != 0) { DISPATCH_INTERNAL_CRASH((r << 16) | wq_supported, @@ -7413,7 +7941,7 @@ DISPATCH_NOINLINE void _dispatch_fork_becomes_unsafe_slow(void) { - uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + uint8_t value = (uint8_t)os_atomic_or(&_dispatch_unsafe_fork, _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed); if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) { DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); @@ -7425,7 +7953,7 @@ void _dispatch_prohibit_transition_to_multithreaded(bool prohibit) { if (prohibit) { - uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + uint8_t value = (uint8_t)os_atomic_or(&_dispatch_unsafe_fork, _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) { DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded"); diff --git a/src/queue_internal.h b/src/queue_internal.h index 46864911e..713677301 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -35,7 +35,7 @@ #pragma mark - #pragma mark dispatch_queue_flags, dq_state -DISPATCH_ENUM(dispatch_queue_flags, uint32_t, +DISPATCH_OPTIONS(dispatch_queue_flags, uint32_t, DQF_NONE = 0x00000000, DQF_AUTORELEASE_ALWAYS = 0x00010000, DQF_AUTORELEASE_NEVER = 0x00020000, @@ -121,20 +121,16 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, */ #define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT 0x0200000000000000ull /* - * i: inactive bit (bit 56) + * i: inactive state (bit 56-55) * This bit means that the object is inactive (see dispatch_activate) */ -#define DISPATCH_QUEUE_INACTIVE 0x0100000000000000ull +#define DISPATCH_QUEUE_INACTIVE 0x0180000000000000ull +#define DISPATCH_QUEUE_ACTIVATED 0x0100000000000000ull +#define DISPATCH_QUEUE_ACTIVATING 0x0080000000000000ull /* - * na: needs activation (bit 55) - * This bit is set if the object is created inactive. It tells - * dispatch_queue_wakeup to perform various tasks at first wakeup. - * - * This bit is cleared as part of the first wakeup. Having that bit prevents - * the object from being woken up (because _dq_state_should_wakeup will say - * no), except in the dispatch_activate/dispatch_resume codepath. + * This mask covers the inactive bits state */ -#define DISPATCH_QUEUE_NEEDS_ACTIVATION 0x0080000000000000ull +#define DISPATCH_QUEUE_INACTIVE_BITS_MASK 0x0180000000000000ull /* * This mask covers the suspend count (sc), side suspend count bit (ssc), * inactive (i) and needs activation (na) bits @@ -461,11 +457,12 @@ typedef struct dispatch_queue_specific_head_s { TAILQ_HEAD(, dispatch_queue_specific_s) dqsh_entries; } *dispatch_queue_specific_head_t; -#define DISPATCH_WORKLOOP_ATTR_HAS_SCHED 0x1u -#define DISPATCH_WORKLOOP_ATTR_HAS_POLICY 0x2u -#define DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT 0x4u -#define DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS 0x8u -#define DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY 0x10u +#define DISPATCH_WORKLOOP_ATTR_HAS_SCHED 0x0001u +#define DISPATCH_WORKLOOP_ATTR_HAS_POLICY 0x0002u +#define DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT 0x0004u +#define DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS 0x0008u +#define DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY 0x0010u +#define DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS 0x0020u typedef struct dispatch_workloop_attr_s *dispatch_workloop_attr_t; typedef struct dispatch_workloop_attr_s { uint32_t dwla_flags; @@ -476,6 +473,7 @@ typedef struct dispatch_workloop_attr_s { uint8_t percent; uint32_t refillms; } dwla_cpupercent; + dispatch_pthread_root_queue_observer_hooks_s dwla_observers; } dispatch_workloop_attr_s; /* @@ -501,6 +499,7 @@ typedef struct dispatch_workloop_attr_s { * '--> dispatch_lane_class_t * +--> struct dispatch_lane_s * | +--> struct dispatch_source_s + * | +--> struct dispatch_channel_s * | '--> struct dispatch_mach_s * +--> struct dispatch_queue_static_s * '--> struct dispatch_queue_global_s @@ -598,6 +597,7 @@ typedef struct dispatch_workloop_attr_s { struct dispatch_source_refs_s *ds_refs; \ struct dispatch_timer_source_refs_s *ds_timer_refs; \ struct dispatch_mach_recv_refs_s *dm_recv_refs; \ + struct dispatch_channel_callbacks_s const *dch_callbacks; \ }; \ int volatile dq_sref_cnt @@ -671,6 +671,10 @@ bool _dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( dispatch_queue_t queue); +DISPATCH_EXPORT DISPATCH_NOTHROW +void +_dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t workloop, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks); #endif // __APPLE__ #if DISPATCH_USE_PTHREAD_POOL @@ -777,12 +781,18 @@ void _dispatch_queue_invoke_finish(dispatch_queue_t dq, dispatch_priority_t _dispatch_queue_compute_priority_and_wlh( dispatch_queue_class_t dq, dispatch_wlh_t *wlh_out); +DISPATCH_ENUM(dispatch_resume_op, int, + DISPATCH_RESUME, + DISPATCH_ACTIVATE, + DISPATCH_ACTIVATION_DONE, +); +void _dispatch_lane_resume(dispatch_lane_class_t dq, dispatch_resume_op_t how); + void _dispatch_lane_set_target_queue(dispatch_lane_t dq, dispatch_queue_t tq); void _dispatch_lane_class_dispose(dispatch_queue_class_t dq, bool *allow_free); void _dispatch_lane_dispose(dispatch_lane_class_t dq, bool *allow_free); void _dispatch_lane_suspend(dispatch_lane_class_t dq); -void _dispatch_lane_resume(dispatch_lane_class_t dq, bool activate); -void _dispatch_lane_activate(dispatch_lane_class_t dq, bool *allow_resume); +void _dispatch_lane_activate(dispatch_lane_class_t dq); void _dispatch_lane_invoke(dispatch_lane_class_t dq, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_lane_push(dispatch_lane_class_t dq, dispatch_object_t dou, @@ -1040,6 +1050,8 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); // continuation is an internal implementation detail that should not be // introspected #define DC_FLAG_NO_INTROSPECTION 0x200ul +// The item is a channel item, not a continuation +#define DC_FLAG_CHANNEL_ITEM 0x400ul typedef struct dispatch_continuation_s { DISPATCH_CONTINUATION_HEADER(continuation); diff --git a/src/semaphore.c b/src/semaphore.c index 30cde9278..af597ee04 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -72,7 +72,7 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) _dispatch_object_class_name(dsema), dsema); offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset); #if USE_MACH_SEM - offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", + offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%x, ", dsema->dsema_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, @@ -121,7 +121,7 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, case DISPATCH_TIME_NOW: orig = dsema->dsema_value; while (orig < 0) { - if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1, + if (os_atomic_cmpxchgv2o(dsema, dsema_value, orig, orig + 1, &orig, relaxed)) { return _DSEMA4_TIMEOUT(); } @@ -158,7 +158,7 @@ _dispatch_group_create_with_count(uint32_t n) dg->do_targetq = _dispatch_get_default_queue(false); if (n) { os_atomic_store2o(dg, dg_bits, - -n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed); + (uint32_t)-n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed); os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // } return dg; diff --git a/src/shims.h b/src/shims.h index ea5e09812..6cb159b62 100644 --- a/src/shims.h +++ b/src/shims.h @@ -213,9 +213,7 @@ void __builtin_trap(void); #endif -#ifndef __OS_INTERNAL_ATOMIC__ #include "shims/atomic.h" -#endif #define DISPATCH_ATOMIC64_ALIGN __attribute__((aligned(8))) #include "shims/atomic_sfb.h" diff --git a/src/shims/atomic.h b/src/shims/atomic.h index 0bb27d3de..88cbb3408 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -35,19 +35,55 @@ #if defined(__cplusplus) && (defined(__FreeBSD__) || defined(_WIN32)) #define _Bool bool #endif -#include - -#define memory_order_ordered memory_order_seq_cst -#define memory_order_dependency memory_order_acquire -#define os_atomic(type) type _Atomic +#ifndef os_atomic +#define os_atomic(type) type _Atomic volatile +#endif +#ifndef _os_atomic_c11_atomic #define _os_atomic_c11_atomic(p) \ ((__typeof__(*(p)) _Atomic *)(p)) +#endif // This removes the _Atomic and volatile qualifiers on the type of *p +#ifndef _os_atomic_basetypeof #define _os_atomic_basetypeof(p) \ __typeof__(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) +#endif + +#if __has_include() +#include + +#ifndef __LP64__ +// libdispatch has too many Double-Wide loads for this to be practical +// so just rename everything to the wide variants +#undef os_atomic_load +#define os_atomic_load os_atomic_load_wide + +#undef os_atomic_store +#define os_atomic_store os_atomic_store_wide +#endif + +#if defined(__arm__) || defined(__arm64__) +#define memory_order_ordered memory_order_relaxed +#define memory_order_ordered_smp memory_order_relaxed +#define _os_atomic_mo_ordered memory_order_relaxed +#define _os_atomic_mo_ordered_smp memory_order_relaxed +#else +#define memory_order_ordered memory_order_seq_cst +#define memory_order_ordered_smp memory_order_seq_cst +#define _os_atomic_mo_ordered memory_order_seq_cst +#define _os_atomic_mo_ordered_smp memory_order_seq_cst +#endif + +#define _os_rel_barrier_ordered memory_order_release +#define _os_acq_barrier_ordered memory_order_acquire + +#else // __has_include() +#include + +#define memory_order_ordered memory_order_seq_cst +#define memory_order_dependency memory_order_acquire #define os_atomic_load(p, m) \ atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_##m) @@ -96,14 +132,41 @@ #define os_atomic_xor_orig(p, v, m) \ _os_atomic_c11_op_orig((p), (v), m, xor, ^) -#define os_atomic_force_dependency_on(p, e) (p) +typedef struct { unsigned long __opaque_zero; } os_atomic_dependency_t; + +#define OS_ATOMIC_DEPENDENCY_NONE ((os_atomic_dependency_t){ 0UL }) +#define os_atomic_make_dependency(v) ((void)(v), OS_ATOMIC_DEPENDENCY_NONE) +#define os_atomic_inject_dependency(p, e) \ + ((typeof(*(p)) *)((p) + _os_atomic_auto_dependency(e).__opaque_zero)) #define os_atomic_load_with_dependency_on(p, e) \ - os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed) -#define os_atomic_load_with_dependency_on2o(p, f, e) \ - os_atomic_load_with_dependency_on(&(p)->f, e) + os_atomic_load(os_atomic_inject_dependency(p, e), dependency) #define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m) +#define os_atomic_inc(p, m) \ + os_atomic_add((p), 1, m) +#define os_atomic_inc_orig(p, m) \ + os_atomic_add_orig((p), 1, m) +#define os_atomic_dec(p, m) \ + os_atomic_sub((p), 1, m) +#define os_atomic_dec_orig(p, m) \ + os_atomic_sub_orig((p), 1, m) + +#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ + bool _result = false; \ + __typeof__(p) _p = (p); \ + ov = os_atomic_load(_p, relaxed); \ + do { \ + __VA_ARGS__; \ + _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ + } while (unlikely(!_result)); \ + _result; \ + }) +#define os_atomic_rmw_loop_give_up(expr) \ + os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) + +#endif // !__has_include() + #define os_atomic_load2o(p, f, m) \ os_atomic_load(&(p)->f, m) #define os_atomic_store2o(p, f, v, m) \ @@ -114,8 +177,6 @@ os_atomic_cmpxchg(&(p)->f, (e), (v), m) #define os_atomic_cmpxchgv2o(p, f, e, v, g, m) \ os_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m) -#define os_atomic_cmpxchgvw2o(p, f, e, v, g, m) \ - os_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m) #define os_atomic_add2o(p, f, v, m) \ os_atomic_add(&(p)->f, (v), m) #define os_atomic_add_orig2o(p, f, v, m) \ @@ -137,38 +198,22 @@ #define os_atomic_xor_orig2o(p, f, v, m) \ os_atomic_xor_orig(&(p)->f, (v), m) -#define os_atomic_inc(p, m) \ - os_atomic_add((p), 1, m) -#define os_atomic_inc_orig(p, m) \ - os_atomic_add_orig((p), 1, m) +#define os_atomic_load_with_dependency_on2o(p, f, e) \ + os_atomic_load_with_dependency_on(&(p)->f, e) + #define os_atomic_inc2o(p, f, m) \ os_atomic_add2o(p, f, 1, m) #define os_atomic_inc_orig2o(p, f, m) \ os_atomic_add_orig2o(p, f, 1, m) -#define os_atomic_dec(p, m) \ - os_atomic_sub((p), 1, m) -#define os_atomic_dec_orig(p, m) \ - os_atomic_sub_orig((p), 1, m) #define os_atomic_dec2o(p, f, m) \ os_atomic_sub2o(p, f, 1, m) #define os_atomic_dec_orig2o(p, f, m) \ os_atomic_sub_orig2o(p, f, 1, m) -#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ - bool _result = false; \ - __typeof__(p) _p = (p); \ - ov = os_atomic_load(_p, relaxed); \ - do { \ - __VA_ARGS__; \ - _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ - } while (unlikely(!_result)); \ - _result; \ - }) #define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \ os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__) + #define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) -#define os_atomic_rmw_loop_give_up(expr) \ - os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) #endif // __DISPATCH_SHIMS_ATOMIC__ diff --git a/src/shims/lock.c b/src/shims/lock.c index ea701f4eb..f0e493796 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -569,9 +569,7 @@ _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, } rc = _dispatch_unfair_lock_wait(&dul->dul_lock, new_value, 0, flags); if (rc == ENOTEMPTY) { - next = value_self | DLOCK_WAITERS_BIT; - } else { - next = value_self; + next |= DLOCK_WAITERS_BIT; } } } diff --git a/src/shims/lock.h b/src/shims/lock.h index 0fd956f5a..f32ca5057 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -29,7 +29,7 @@ #pragma mark - platform macros -DISPATCH_ENUM(dispatch_lock_options, uint32_t, +DISPATCH_OPTIONS(dispatch_lock_options, uint32_t, DLOCK_LOCK_NONE = 0x00000000, DLOCK_LOCK_DATA_CONTENTION = 0x00010000, ); @@ -252,7 +252,7 @@ int _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value, void _dispatch_wake_by_address(uint32_t volatile *address); #pragma mark - thread event -/** +/*! * @typedef dispatch_thread_event_t * * @abstract @@ -301,7 +301,7 @@ static inline void _dispatch_thread_event_signal(dispatch_thread_event_t dte) { #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX - if (os_atomic_inc_orig(&dte->dte_value, release) == 0) { + if (os_atomic_add_orig(&dte->dte_value, 1u, release) == 0) { // 0 -> 1 transition doesn't need a signal // force a wake even when the value is corrupt, // waiters do the validation @@ -319,7 +319,7 @@ static inline void _dispatch_thread_event_wait(dispatch_thread_event_t dte) { #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX - if (os_atomic_dec(&dte->dte_value, acquire) == 0) { + if (os_atomic_sub(&dte->dte_value, 1u, acquire) == 0) { // 1 -> 0 is always a valid transition, so we can return // for any other value, take the slow path which checks it's not corrupt return; diff --git a/src/shims/target.h b/src/shims/target.h index 8e996aa73..a59dd3c3b 100644 --- a/src/shims/target.h +++ b/src/shims/target.h @@ -49,7 +49,7 @@ # endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) #else # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 1 -# if __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 +# if !TARGET_OS_DRIVERKIT && __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 # error "iOS hosts older than iOS 9.0 aren't supported anymore" # endif #endif diff --git a/src/source.c b/src/source.c index abdbd6bcf..96c0eca43 100644 --- a/src/source.c +++ b/src/source.c @@ -90,7 +90,7 @@ _dispatch_source_xref_dispose(dispatch_source_t ds) dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); if (unlikely((dqf & DSF_STRICT) && !(dqf & DSF_CANCELED) && _dispatch_source_get_cancel_handler(ds->ds_refs))) { - DISPATCH_CLIENT_CRASH(ds, "Release of a source that has not been " + DISPATCH_CLIENT_CRASH(dqf, "Release of a source that has not been " "cancelled, but has a mandatory cancel handler"); } dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY); @@ -327,7 +327,7 @@ _dispatch_source_set_handler(dispatch_source_t ds, void *func, if (_dispatch_lane_try_inactive_suspend(ds)) { _dispatch_source_handler_replace(ds, kind, dc); - return _dispatch_lane_resume(ds, false); + return _dispatch_lane_resume(ds, DISPATCH_RESUME); } dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); @@ -511,7 +511,7 @@ _dispatch_source_timer_data(dispatch_timer_source_refs_t dr, uint64_t prev) // We hence need dependency ordering to pair with the release barrier // done by _dispatch_timers_run2() when setting the DISARMED_MARKER bit. os_atomic_thread_fence(dependency); - dr = os_atomic_force_dependency_on(dr, data); + dr = os_atomic_inject_dependency(dr, data); if (dr->dt_timer.target < INT64_MAX) { uint64_t now = _dispatch_time_now(DISPATCH_TIMER_CLOCK(dr->du_ident)); @@ -637,7 +637,7 @@ _dispatch_source_install(dispatch_source_t ds, dispatch_wlh_t wlh, } void -_dispatch_source_activate(dispatch_source_t ds, bool *allow_resume) +_dispatch_source_activate(dispatch_source_t ds) { dispatch_continuation_t dc; dispatch_source_refs_t dr = ds->ds_refs; @@ -667,7 +667,7 @@ _dispatch_source_activate(dispatch_source_t ds, bool *allow_resume) } // call "super" - _dispatch_lane_activate(ds, allow_resume); + _dispatch_lane_activate(ds); if ((dr->du_is_direct || dr->du_is_timer) && !ds->ds_is_installed) { pri = _dispatch_queue_compute_priority_and_wlh(ds, &wlh); @@ -686,6 +686,7 @@ _dispatch_source_activate(dispatch_source_t ds, bool *allow_resume) _dispatch_unote_state_set(dr, wlh, 0); } #endif + // rdar://45419440 this needs to be last _dispatch_source_install(ds, wlh, pri); } } @@ -808,10 +809,8 @@ _dispatch_source_invoke2(dispatch_source_t ds, dispatch_invoke_context_t dic, avoid_starvation = dq->do_targetq || !(dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); } - if (avoid_starvation && - os_atomic_load2o(dr, ds_pending_data, relaxed)) { - retq = ds->do_targetq; - } + + ds->ds_latched = true; } else { // there is no point trying to be eager, the next thing to do is // to deliver the event @@ -863,21 +862,61 @@ _dispatch_source_invoke2(dispatch_source_t ds, dispatch_invoke_context_t dic, // from the source handler return ds->do_targetq; } - if (avoid_starvation && _dispatch_unote_wlh(dr) == DISPATCH_WLH_ANON) { - // keep the old behavior to force re-enqueue to our target queue - // for the rearm. + if (dr->du_is_direct && _dispatch_unote_wlh(dr) == DISPATCH_WLH_ANON) { // - // if the handler didn't run, or this is a pending delete - // or our target queue is a global queue, then starvation is - // not a concern and we can rearm right away. - return ds->do_targetq; - } - _dispatch_unote_resume(dr); - if (!avoid_starvation && _dispatch_wlh_should_poll_unote(dr)) { - // try to redrive the drain from under the lock for sources - // targeting an overcommit root queue to avoid parking - // when the next event has already fired - _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + // for legacy, direct event delivery, + // _dispatch_source_install above could cause a worker thread to + // deliver an event, and disarm the knote before we're through. + // + // This can lead to a double fire of the event handler for the same + // event with the following ordering: + // + //------------------------------------------------------------------ + // Thread1 Thread2 + // + // _dispatch_source_invoke() + // _dispatch_source_install() + // _dispatch_kevent_worker_thread() + // _dispatch_source_merge_evt() + // + // _dispatch_unote_resume() + // _dispatch_kevent_worker_thread() + // < re-enqueue due DIRTY > + // + // _dispatch_source_invoke() + // ..._latch_and_call() + // _dispatch_unote_resume() + // _dispatch_source_merge_evt() + // + // _dispatch_source_invoke() + // ..._latch_and_call() + // + //------------------------------------------------------------------ + // + // To avoid this situation, we should never resume a direct source + // for which we haven't fired an event. + // + // Note: this isn't a concern for kqworkloops as event delivery is + // serial with draining it by design. + // + if (ds->ds_latched) { + ds->ds_latched = false; + _dispatch_unote_resume(dr); + } + if (avoid_starvation) { + // To avoid starvation of a source firing immediately when we + // rearm it, force a round-trip through the end of the target + // queue no matter what. + return ds->do_targetq; + } + } else { + _dispatch_unote_resume(dr); + if (!avoid_starvation && _dispatch_wlh_should_poll_unote(dr)) { + // try to redrive the drain from under the lock for sources + // targeting an overcommit root queue to avoid parking + // when the next event has already fired + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + } } } @@ -1132,6 +1171,7 @@ _dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, _dispatch_debug("kevent-source[%p]: merged kevent[%p]", ds, du._dr); _dispatch_object_debug(ds, "%s", __func__); dx_wakeup(ds, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_EVENT | + DISPATCH_WAKEUP_CLEAR_ACTIVATING | DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY); } diff --git a/src/source_internal.h b/src/source_internal.h index f38c2e9d4..d953629eb 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -35,15 +35,20 @@ _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_source, dispatch_object) DISPATCH_CLASS_DECL_BARE(source, QUEUE); +DISPATCH_CLASS_DECL(channel, QUEUE); + #define DISPATCH_SOURCE_CLASS_HEADER(x) \ DISPATCH_LANE_CLASS_HEADER(x); \ uint16_t \ /* set under the drain lock */ \ ds_is_installed:1, \ + ds_latched:1, \ dm_connect_handler_called:1, \ dm_cancel_handler_called:1, \ dm_is_xpc:1, \ - __ds_flags_pad : 12; \ + dm_arm_no_senders:1, \ + dm_strict_reply:1, \ + __ds_flags_pad : 9; \ uint16_t __dq_flags_separation[0]; \ uint16_t \ /* set under the send queue lock */ \ @@ -57,9 +62,15 @@ struct dispatch_source_s { dispatch_assert_valid_lane_type(dispatch_source_s); dispatch_static_assert(sizeof(struct dispatch_source_s) <= 128); +struct dispatch_channel_s { + DISPATCH_SOURCE_CLASS_HEADER(channel); +} DISPATCH_ATOMIC64_ALIGN; +dispatch_assert_valid_lane_type(dispatch_channel_s); +dispatch_static_assert(sizeof(struct dispatch_channel_s) <= 128); + void _dispatch_source_xref_dispose(dispatch_source_t ds); void _dispatch_source_dispose(dispatch_source_t ds, bool *allow_free); -void _dispatch_source_activate(dispatch_source_t ds, bool *allow_resume); +void _dispatch_source_activate(dispatch_source_t ds); void _dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, @@ -67,6 +78,15 @@ void _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, void _dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data, pthread_priority_t pp); DISPATCH_COLD -size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); +size_t _dispatch_source_debug(dispatch_source_t ds, char *buf, size_t bufsiz); + +void _dispatch_channel_xref_dispose(dispatch_channel_t dch); +void _dispatch_channel_dispose(dispatch_channel_t dch, bool *allow_free); +void _dispatch_channel_invoke(dispatch_channel_t dch, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +void _dispatch_channel_wakeup(dispatch_channel_t dch, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags); +DISPATCH_COLD +size_t _dispatch_channel_debug(dispatch_channel_t dch, char *buf, size_t bufsiz); #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/voucher.c b/src/voucher.c index fa1b8e130..1ba87032f 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -24,8 +24,6 @@ #define PERSONA_ID_NONE ((uid_t)-1) #endif -#if !DISPATCH_VARIANT_DYLD_STUB - #if VOUCHER_USE_MACH_VOUCHER #if !HAVE_PTHREAD_WORKQUEUE_QOS #error Unsupported configuration, workqueue QoS support is required @@ -796,7 +794,9 @@ _voucher_dispose(voucher_t voucher) voucher->v_recipe_extra_size = 0; voucher->v_recipe_extra_offset = 0; #endif +#if !USE_OBJC return _os_object_dealloc((_os_object_t)voucher); +#endif // !USE_OBJC } void @@ -913,13 +913,10 @@ mach_voucher_persona_self(mach_voucher_t *persona_mach_voucher) mach_voucher_t bkv = MACH_VOUCHER_NULL; kern_return_t kr = KERN_NOT_SUPPORTED; #if VOUCHER_USE_PERSONA - mach_voucher_t kv = _voucher_get_task_mach_voucher(); - const mach_voucher_attr_recipe_data_t bank_send_recipe[] = { [0] = { .key = MACH_VOUCHER_ATTR_KEY_BANK, - .command = MACH_VOUCHER_ATTR_COPY, - .previous_voucher = kv, + .command = MACH_VOUCHER_ATTR_BANK_CREATE, }, [1] = { .key = MACH_VOUCHER_ATTR_KEY_BANK, @@ -2024,17 +2021,3 @@ _voucher_debug(voucher_t v, char* buf, size_t bufsiz) } #endif // VOUCHER_USE_MACH_VOUCHER - -#else // DISPATCH_VARIANT_DYLD_STUB - -firehose_activity_id_t -voucher_get_activity_id_4dyld(void) -{ -#if VOUCHER_USE_MACH_VOUCHER - return _voucher_get_activity_id(_voucher_get(), NULL); -#else - return 0; -#endif -} - -#endif // DISPATCH_VARIANT_DYLD_STUB diff --git a/src/voucher_internal.h b/src/voucher_internal.h index ec8874346..37d0935ac 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -309,7 +309,7 @@ _voucher_release_inline(struct voucher_s *voucher) if (unlikely(xref_cnt < -1)) { _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); } - return _os_object_xref_dispose((_os_object_t)voucher); + return _voucher_xref_dispose((voucher_t)voucher); } #if DISPATCH_PURE_C diff --git a/xcodeconfig/libdispatch-dyld-stub.xcconfig b/xcodeconfig/libdispatch-dyld-stub.xcconfig deleted file mode 100644 index 763bafe1e..000000000 --- a/xcodeconfig/libdispatch-dyld-stub.xcconfig +++ /dev/null @@ -1,28 +0,0 @@ -// -// Copyright (c) 2016 Apple Inc. All rights reserved. -// -// @APPLE_APACHE_LICENSE_HEADER_START@ -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// @APPLE_APACHE_LICENSE_HEADER_END@ -// - -PRODUCT_NAME = libdispatch_dyld_stub -INSTALL_PATH = /usr/local/lib/dyld_stub -BUILD_VARIANTS = normal -GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_VARIANT_DYLD_STUB=1 $(STATICLIB_PREPROCESSOR_DEFINITIONS) -OTHER_LDFLAGS = -VERSIONING_SYSTEM = -EXCLUDED_SOURCE_FILE_NAMES = * -INCLUDED_SOURCE_FILE_NAMES = voucher.c // minimal with DISPATCH_VARIANT_DYLD_STUB diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases index d8a5113a2..24cbc6b2b 100644 --- a/xcodeconfig/libdispatch.aliases +++ b/xcodeconfig/libdispatch.aliases @@ -23,5 +23,9 @@ __dispatch_queue_attrs __dispatch_queue_attr_concurrent __dispatch_source_type_memorypressure __dispatch_source_type_memorystatus _dispatch_assert_queue$V2 _dispatch_assert_queue _dispatch_assert_queue_not$V2 _dispatch_assert_queue_not +_dispatch_async _dispatch_channel_async +_dispatch_async_f _dispatch_channel_async_f _dispatch_queue_create_with_target$V2 _dispatch_queue_create_with_target +_dispatch_source_cancel _dispatch_channel_cancel _dispatch_source_set_timer __dispatch_source_set_runloop_timer_4CF +_dispatch_source_testcancel _dispatch_channel_testcancel diff --git a/xcodeconfig/libdispatch.clean b/xcodeconfig/libdispatch.clean index c6ba14c4b..25a5711a2 100644 --- a/xcodeconfig/libdispatch.clean +++ b/xcodeconfig/libdispatch.clean @@ -18,6 +18,7 @@ # @APPLE_APACHE_LICENSE_HEADER_END@ # +__MergedGlobals __dispatch_bug.last_seen __dispatch_bug_deprecated.last_seen __dispatch_bug_kevent_client.last_seen diff --git a/xcodeconfig/libdispatch.dirty b/xcodeconfig/libdispatch.dirty index d8d1a0d6e..b10789292 100644 --- a/xcodeconfig/libdispatch.dirty +++ b/xcodeconfig/libdispatch.dirty @@ -48,6 +48,8 @@ _OBJC_CLASS_$_OS_dispatch_queue_attr __OS_dispatch_queue_attr_vtable _OBJC_CLASS_$_OS_dispatch_source __OS_dispatch_source_vtable +_OBJC_CLASS_$_OS_dispatch_channel +__OS_dispatch_channel_vtable _OBJC_CLASS_$_OS_dispatch_mach __OS_dispatch_mach_vtable _OBJC_CLASS_$_OS_dispatch_mach_msg @@ -80,6 +82,7 @@ _OBJC_METACLASS_$_OS_dispatch_queue_runloop _OBJC_METACLASS_$_OS_dispatch_queue_mgr _OBJC_METACLASS_$_OS_dispatch_queue_attr _OBJC_METACLASS_$_OS_dispatch_source +_OBJC_METACLASS_$_OS_dispatch_channel _OBJC_METACLASS_$_OS_dispatch_mach _OBJC_METACLASS_$_OS_dispatch_mach_msg _OBJC_METACLASS_$_OS_dispatch_io diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index b586837d5..8ea917e20 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -48,6 +48,8 @@ _OBJC_CLASS_$_OS_dispatch_queue_attr __OS_dispatch_queue_attr_vtable _OBJC_CLASS_$_OS_dispatch_source __OS_dispatch_source_vtable +_OBJC_CLASS_$_OS_dispatch_channel +__OS_dispatch_channel_vtable _OBJC_CLASS_$_OS_dispatch_mach __OS_dispatch_mach_vtable _OBJC_CLASS_$_OS_dispatch_mach_msg @@ -80,6 +82,7 @@ _OBJC_METACLASS_$_OS_dispatch_queue_runloop _OBJC_METACLASS_$_OS_dispatch_queue_mgr _OBJC_METACLASS_$_OS_dispatch_queue_attr _OBJC_METACLASS_$_OS_dispatch_source +_OBJC_METACLASS_$_OS_dispatch_channel _OBJC_METACLASS_$_OS_dispatch_mach _OBJC_METACLASS_$_OS_dispatch_mach_msg _OBJC_METACLASS_$_OS_dispatch_io diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index f473b8ffb..2e97d81f6 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -25,17 +25,31 @@ SDKROOT = macosx.internal SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator PRODUCT_NAME = libdispatch EXECUTABLE_PREFIX = -INSTALL_PATH = /usr/lib/system -PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch -PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch -OS_PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os -OS_PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os + +SDK_INSTALL_VARIANT = $(SDK_INSTALL_VARIANT_$(DRIVERKIT)) +SDK_INSTALL_VARIANT_1 = driverkit +SDK_INSTALL_VARIANT_ = default +SDK_INSTALL_ROOT = $(SDK_INSTALL_ROOT_$(SDK_INSTALL_VARIANT)) +SDK_INSTALL_ROOT_driverkit = $(DRIVERKITROOT) +SDK_INSTALL_HEADERS_ROOT = $(SDK_INSTALL_HEADERS_ROOT_$(SDK_INSTALL_VARIANT)) +SDK_INSTALL_HEADERS_ROOT_driverkit = $(SDK_INSTALL_ROOT)/$(SDK_RUNTIME_HEADERS_PREFIX) +SDK_RUNTIME_HEADERS_PREFIX = Runtime + +INSTALL_PATH = $(SDK_INSTALL_ROOT)/usr/lib/system +PUBLIC_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/include/dispatch +PRIVATE_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/local/include/dispatch +OS_PUBLIC_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/include/os +OS_PRIVATE_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/local/include/os HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/src -LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system $(SDKROOT)/usr/local/lib +LIBRARY_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_ROOT)/usr/lib/system $(SDKROOT)/$(SDK_INSTALL_ROOT)/usr/local/lib +SYSTEM_HEADER_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/local/include $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/include +SYSTEM_FRAMEWORK_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks + INSTALLHDRS_SCRIPT_PHASE = YES ALWAYS_SEARCH_USER_PATHS = NO USE_HEADERMAP = NO BUILD_VARIANTS = normal debug profile + ONLY_ACTIVE_ARCH = NO CLANG_LINK_OBJC_RUNTIME = NO GCC_C_LANGUAGE_STANDARD = gnu11 @@ -77,11 +91,67 @@ CLANG_WARN_UNGUARDED_AVAILABILITY = YES GCC_TREAT_WARNINGS_AS_ERRORS = YES GCC_OPTIMIZATION_LEVEL = s GCC_NO_COMMON_BLOCKS = YES -GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 +GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY=1 +GCC_PREPROCESSOR_DEFINITIONS[sdk=driverkit*] = $(GCC_PREPROCESSOR_DEFINITIONS) USE_OBJC=0 STATICLIB_PREPROCESSOR_DEFINITIONS = DISPATCH_VARIANT_STATIC=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 -WARNING_CFLAGS = -Wall -Wextra -Warray-bounds-pointer-arithmetic -Watomic-properties -Wcomma -Wconditional-uninitialized -Wcovered-switch-default -Wdate-time -Wdeprecated -Wdouble-promotion -Wduplicate-enum -Wexpansion-to-defined -Wfloat-equal -Widiomatic-parentheses -Wignored-qualifiers -Wnullable-to-nonnull-conversion -Wobjc-interface-ivars -Wover-aligned -Wpacked -Wpointer-arith -Wselector -Wstatic-in-inline -Wsuper-class-method-mismatch -Wswitch-enum -Wtautological-compare -Wunused -Wno-unknown-warning-option $(NO_WARNING_CFLAGS) -NO_WARNING_CFLAGS = -Wno-pedantic -Wno-bad-function-cast -Wno-c++-compat -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-cast-align -Wno-cast-qual -Wno-disabled-macro-expansion -Wno-documentation-unknown-command -Wno-format-nonliteral -Wno-missing-variable-declarations -Wno-old-style-cast -Wno-padded -Wno-reserved-id-macro -Wno-shift-sign-overflow -Wno-undef -Wno-unreachable-code-aggressive -Wno-unused-macros -Wno-used-but-marked-unused -Wno-vla -Wno-unguarded-availability-new -OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(PLATFORM_CFLAGS) + +WARNING_CFLAGS = + +// warnings we want +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wall +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wextra +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wmost +WARNING_CFLAGS = $(WARNING_CFLAGS) -Warray-bounds-pointer-arithmetic +WARNING_CFLAGS = $(WARNING_CFLAGS) -Watomic-properties +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wcomma +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wconditional-uninitialized +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wcovered-switch-default +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wdate-time +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wdeprecated +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wdouble-promotion +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wduplicate-enum +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wexpansion-to-defined +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wfloat-equal +WARNING_CFLAGS = $(WARNING_CFLAGS) -Widiomatic-parentheses +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wignored-qualifiers +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wnullable-to-nonnull-conversion +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wobjc-interface-ivars +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wover-aligned +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wpacked +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wpointer-arith +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wselector +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wstatic-in-inline +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wsuper-class-method-mismatch +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wswitch +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wtautological-compare +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wunused + +// silenced warnings +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unknown-warning-option +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-pedantic +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-bad-function-cast +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-c++-compat +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-c++98-compat +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-c++98-compat-pedantic +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-cast-align +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-cast-qual +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-disabled-macro-expansion +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-documentation-unknown-command +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-format-nonliteral +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-missing-variable-declarations +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-old-style-cast +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-padded +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-reserved-id-macro +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-shift-sign-overflow +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-undef +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unreachable-code-aggressive +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unused-macros +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-used-but-marked-unused +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-vla +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unguarded-availability-new +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-switch-enum // -Wswitch is enough, this forces explicit listing of all cases mandatory + +OTHER_CFLAGS = -fverbose-asm $(PLATFORM_CFLAGS) OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions OTHER_CFLAGS_normal = -momit-leaf-frame-pointer OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 -DDISPATCH_PERF_MON=1 @@ -89,15 +159,25 @@ OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 -DOS_D GENERATE_PROFILING_CODE = NO DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) SIM_SUFFIX[sdk=*simulator*] = _sim -DYLIB_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem$(SIM_SUFFIX)_kernel -lsystem$(SIM_SUFFIX)_platform -lsystem$(SIM_SUFFIX)_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind +DYLIB_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem$(SIM_SUFFIX)_kernel -lsystem$(SIM_SUFFIX)_platform -lsystem$(SIM_SUFFIX)_pthread -lsystem_malloc -lsystem_c -lsystem_blocks +UNWIND_LDFLAGS = -lunwind +UNWIND_LDFLAGS[sdk=driverkit*] = OBJC_LDFLAGS = -Wl,-upward-lobjc +OBJC_LDFLAGS[sdk=driverkit*] = LIBDARWIN_LDFLAGS = -Wl,-upward-lsystem_darwin LIBDARWIN_LDFLAGS[sdk=*simulator*] = +LIBDARWIN_LDFLAGS[sdk=driverkit*] = ORDER_LDFLAGS = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-dirty_data_list,$(SRCROOT)/xcodeconfig/libdispatch.dirty ORDER_LDFLAGS[sdk=macosx*] = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order +ORDER_LDFLAGS[sdk=driverkit*] = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order ALIASES_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(LIBDARWIN_LDFLAGS) $(DYLIB_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) $(ORDER_LDFLAGS) -OTHER_MIGFLAGS = -novouchers +OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(DYLIB_LDFLAGS) $(LIBDARWIN_LDFLAGS) $(CR_LDFLAGS) $(UNWIND_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) $(ORDER_LDFLAGS) +OTHER_MIGFLAGS = -novouchers -I$(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks/System.framework/PrivateHeaders -I${SDKROOT}/${SDK_INSTALL_HEADERS_ROOT}/usr/include -I${SDKROOT}/${SDK_INSTALL_HEADERS_ROOT}/usr/local/include + +OBJC_SOURCE_FILE_NAMES = *.m +EXCLUDED_SOURCE_FILE_NAMES = $(EXCLUDED_SOURCE_FILE_NAMES_$(SDK_INSTALL_VARIANT)) +EXCLUDED_SOURCE_FILE_NAMES_driverkit = $(EXCLUDED_SOURCE_FILE_NAMES_default) $(OBJC_SOURCE_FILE_NAMES) + COPY_HEADERS_RUN_UNIFDEF = YES COPY_HEADERS_UNIFDEF_FLAGS = -U__DISPATCH_BUILDING_DISPATCH__ -U__linux__ -DTARGET_OS_WIN32=0 -U__ANDROID__ diff --git a/xcodeconfig/libfirehose_kernel.xcconfig b/xcodeconfig/libfirehose_kernel.xcconfig index e6d83a3aa..b21315812 100644 --- a/xcodeconfig/libfirehose_kernel.xcconfig +++ b/xcodeconfig/libfirehose_kernel.xcconfig @@ -21,7 +21,7 @@ SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos PRODUCT_NAME = $(TARGET_NAME) INSTALL_PATH = /usr/local/lib/kernel/ -GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) KERNEL=1 DISPATCH_USE_DTRACE=0 +GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) KERNEL=1 DISPATCH_USE_DTRACE=0 OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY=1 OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY=1 OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY=0 OTHER_MIGFLAGS = -novouchers OTHER_LDFLAGS = OTHER_CFLAGS = -mkernel -nostdinc -Wno-packed diff --git a/xcodescripts/check-order.sh b/xcodescripts/check-order.sh index 60cb9ebff..3801df0ee 100644 --- a/xcodescripts/check-order.sh +++ b/xcodescripts/check-order.sh @@ -23,12 +23,16 @@ test "$ACTION" = install || exit 0 list_objc_syms () { - nm -arch $1 -nU ${DSTROOT}/usr/lib/system/libdispatch.dylib | grep _OBJC | cut -d' ' -f3 + nm -arch $1 -jnU ${DSTROOT}/usr/lib/system/libdispatch.dylib | grep -E '^_OBJC_(CLASS|METACLASS)_\$' } list_mutable_data_syms () { - nm -arch $1 -m ${DSTROOT}/usr/lib/system/libdispatch.dylib |grep __DATA|egrep -v '(__const|__crash_info)'|sed 's/^.* //' + nm -arch $1 -m ${DSTROOT}/usr/lib/system/libdispatch.dylib | awk ' + /__DATA.* _OBJC_(CLASS|METACLASS)_\$/{ print $NF; next } + /__const|__crash_info| _OBJC| __OBJC/{ next } + /__DATA/{ print $NF } + ' } list_objc_order () diff --git a/xcodescripts/mig-headers.sh b/xcodescripts/mig-headers.sh index bd477c027..e2aff4c59 100755 --- a/xcodescripts/mig-headers.sh +++ b/xcodescripts/mig-headers.sh @@ -19,6 +19,7 @@ # @APPLE_APACHE_LICENSE_HEADER_END@ # + export MIGCC="$(xcrun -find cc)" export MIGCOM="$(xcrun -find migcom)" export PATH="${PLATFORM_DEVELOPER_BIN_DIR}:${DEVELOPER_BIN_DIR}:${PATH}" From f49eafb05b0e431c542e4c2fb24006e89c1c5ec0 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 2 Oct 2018 15:26:31 -0700 Subject: [PATCH 002/249] build: honour `BUILD_SHARED_LIBS` This is needed for proper shared linkage to the BlocksRuntime (e.g. when building SourceKit). We now build BlocksRuntime shared or static as requested. Signed-off-by: Kim Topley --- CMakeLists.txt | 24 ++++++++++++++++-------- dispatch/generic/module.modulemap | 1 + 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 89e985951..628cf9585 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -87,6 +87,9 @@ endif() option(ENABLE_DTRACE "enable dtrace support" "") +# NOTE(abdulras) this is the CMake supported way to control whether we generate +# shared or static libraries. This impacts the behaviour of `add_library` in +# what type of library it generates. option(BUILD_SHARED_LIBS "build shared libraries" ON) option(ENABLE_TESTING "build libdispatch tests" ON) @@ -131,21 +134,22 @@ endif() option(INSTALL_PRIVATE_HEADERS "installs private headers in the same location as the public ones" OFF) -find_package(BlocksRuntime QUIET) -if(NOT BlocksRuntime_FOUND) +if(NOT CMAKE_SYSTEM_NAME STREQUAL Darwin) set(BlocksRuntime_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/src/BlocksRuntime) + # NOTE(compnerd) use the `BUILD_SHARED_LIBS` variable to determine what type + # of library to build. If it is true, we will generate shared libraries, + # otherwise we will generate static libraries. add_library(BlocksRuntime - STATIC - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/data.c - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/runtime.c) + ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/data.c + ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/runtime.c) set_target_properties(BlocksRuntime PROPERTIES POSITION_INDEPENDENT_CODE TRUE) if(HAVE_OBJC AND CMAKE_DL_LIBS) - set_target_properties(BlocksRuntime - PROPERTIES - INTERFACE_LINK_LIBRARIES ${CMAKE_DL_LIBS}) + target_link_libraries(BlocksRuntime + PUBLIC + ${CMAKE_DL_LIBS}) endif() add_library(BlocksRuntime::BlocksRuntime ALIAS BlocksRuntime) @@ -160,6 +164,10 @@ if(NOT BlocksRuntime_FOUND) DESTINATION "${INSTALL_BLOCK_HEADERS_DIR}") endif() + install(TARGETS + BlocksRuntime + DESTINATION + ${INSTALL_TARGET_DIR}) endif() check_symbol_exists(__GNU_LIBRARY__ "features.h" _GNU_SOURCE) diff --git a/dispatch/generic/module.modulemap b/dispatch/generic/module.modulemap index 8c3e7d016..f7fdaae76 100644 --- a/dispatch/generic/module.modulemap +++ b/dispatch/generic/module.modulemap @@ -2,6 +2,7 @@ module Dispatch { requires blocks export * link "dispatch" + link "BlocksRuntime" } module DispatchIntrospection [system] [extern_c] { From ac83a4ade810ac2ac917f7f62164cfcb355de705 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 4 Oct 2018 15:31:52 -0700 Subject: [PATCH 003/249] Merge pull request #396 from compnerd/build-type-blocks build: honour `BUILD_SHARED_LIBS` Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index b4483135a..536f1f5bc 100644 --- a/PATCHES +++ b/PATCHES @@ -434,3 +434,4 @@ github commits starting with 29bdc2f from [3975b58] APPLIED rdar://44568645 [81dc900] APPLIED rdar://44568645 [6162a1d] APPLIED rdar://44568645 +[c55ff6f] APPLIED rdar://54572081 From 83b5a20402972c0a868d9ccdaf358562b8732fd8 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Wed, 3 Oct 2018 10:20:36 -0700 Subject: [PATCH 004/249] build: enable usage on Windows Adjust the build for Windows to permit the isa pointer to successfully link. They will be off by a level of indirection and need to be patched up at runtime. Signed-off-by: Kim Topley --- CMakeLists.txt | 10 ++++++++++ src/BlocksRuntime/Block.h | 23 +++++++++++++++++++++-- src/BlocksRuntime/BlocksRuntime.def | 4 ++++ src/BlocksRuntime/data.c | 20 +++++++++++++------- 4 files changed, 48 insertions(+), 9 deletions(-) create mode 100644 src/BlocksRuntime/BlocksRuntime.def diff --git a/CMakeLists.txt b/CMakeLists.txt index 628cf9585..7bb9dce5a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -143,6 +143,16 @@ if(NOT CMAKE_SYSTEM_NAME STREQUAL Darwin) add_library(BlocksRuntime ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/data.c ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/runtime.c) + if(CMAKE_SYSTEM_NAME STREQUAL Windows) + target_sources(BlocksRuntime + PRIVATE + ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/BlocksRuntime.def) + if(NOT BUILD_SHARED_LIBS) + target_compile_definitions(BlocksRuntime + PRIVATE + BlocksRuntime_STATIC) + endif() + endif() set_target_properties(BlocksRuntime PROPERTIES POSITION_INDEPENDENT_CODE TRUE) diff --git a/src/BlocksRuntime/Block.h b/src/BlocksRuntime/Block.h index d0898ff49..32f27f4f9 100644 --- a/src/BlocksRuntime/Block.h +++ b/src/BlocksRuntime/Block.h @@ -11,11 +11,25 @@ #ifndef _Block_H_ #define _Block_H_ +#if defined(_WIN32) +# if defined(BlocksRuntime_STATIC) +# define BLOCK_ABI +# else +# if defined(BlocksRuntime_EXPORTS) +# define BLOCK_ABI __declspec(dllexport) +# else +# define BLOCK_ABI __declspec(dllimport) +# endif +# endif +#else +# define BLOCK_ABI __attribute__((__visibility__("default"))) +#endif + #if !defined(BLOCK_EXPORT) # if defined(__cplusplus) -# define BLOCK_EXPORT extern "C" __attribute__((visibility("default"))) +# define BLOCK_EXPORT extern "C" BLOCK_ABI # else -# define BLOCK_EXPORT extern __attribute__((visibility("default"))) +# define BLOCK_EXPORT extern BLOCK_ABI # endif #endif @@ -38,8 +52,13 @@ BLOCK_EXPORT void _Block_object_assign(void *, const void *, const int); BLOCK_EXPORT void _Block_object_dispose(const void *, const int); // Used by the compiler. Do not use these variables yourself. +#if defined(_WIN32) +extern void * _NSConcreteGlobalBlock[32]; +extern void * _NSConcreteStackBlock[32]; +#else BLOCK_EXPORT void * _NSConcreteGlobalBlock[32]; BLOCK_EXPORT void * _NSConcreteStackBlock[32]; +#endif #if __cplusplus } diff --git a/src/BlocksRuntime/BlocksRuntime.def b/src/BlocksRuntime/BlocksRuntime.def new file mode 100644 index 000000000..a3b1aabeb --- /dev/null +++ b/src/BlocksRuntime/BlocksRuntime.def @@ -0,0 +1,4 @@ +LIBRARY BlocksRuntime +EXPORTS + _NSConcreteGlobalBlock CONSTANT + _NSConcreteStackBlock CONSTANT diff --git a/src/BlocksRuntime/data.c b/src/BlocksRuntime/data.c index 03de71b41..fe4745b04 100644 --- a/src/BlocksRuntime/data.c +++ b/src/BlocksRuntime/data.c @@ -14,11 +14,17 @@ We allocate space and export a symbol to be used as the Class for the on-stack a We keep these in a separate file so that we can include the runtime code in test subprojects but not include the data so that compiled code that sees the data in libSystem doesn't get confused by a second copy. Somehow these don't get unified in a common block. **********************/ -#define BLOCK_EXPORT __attribute__((visibility("default"))) +#include "Block.h" -BLOCK_EXPORT void * _NSConcreteStackBlock[32] = { 0 }; -BLOCK_EXPORT void * _NSConcreteMallocBlock[32] = { 0 }; -BLOCK_EXPORT void * _NSConcreteAutoBlock[32] = { 0 }; -BLOCK_EXPORT void * _NSConcreteFinalizingBlock[32] = { 0 }; -BLOCK_EXPORT void * _NSConcreteGlobalBlock[32] = { 0 }; -BLOCK_EXPORT void * _NSConcreteWeakBlockVariable[32] = { 0 }; +#if defined(_WIN32) +void * _NSConcreteStackBlock[32] = { 0 }; +void * _NSConcreteGlobalBlock[32] = { 0 }; +#else +BLOCK_ABI void * _NSConcreteStackBlock[32] = { 0 }; +BLOCK_ABI void * _NSConcreteGlobalBlock[32] = { 0 }; +#endif + +BLOCK_ABI void * _NSConcreteMallocBlock[32] = { 0 }; +BLOCK_ABI void * _NSConcreteAutoBlock[32] = { 0 }; +BLOCK_ABI void * _NSConcreteFinalizingBlock[32] = { 0 }; +BLOCK_ABI void * _NSConcreteWeakBlockVariable[32] = { 0 }; From cf9fd0569b2922bcab32bd2f59eb1a68ceb85c46 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 5 Oct 2018 10:22:10 -0700 Subject: [PATCH 005/249] Merge pull request #362 from compnerd/shared Windows BlocksRuntime Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 536f1f5bc..71ec0185f 100644 --- a/PATCHES +++ b/PATCHES @@ -435,3 +435,4 @@ github commits starting with 29bdc2f from [81dc900] APPLIED rdar://44568645 [6162a1d] APPLIED rdar://44568645 [c55ff6f] APPLIED rdar://54572081 +[c4a7149] APPLIED rdar://54572081 From 86935c88db21e9e9913b57a83ca862d02737e230 Mon Sep 17 00:00:00 2001 From: Doug Gregor Date: Tue, 9 Oct 2018 16:20:28 -0700 Subject: [PATCH 006/249] [CMake] Make sure to link swiftrt.o when building the Swift Dispatch overlay. Because we are linking libdispatch through clang rather than swift, we need to explicitly add swiftrt.o, which is needed by ELF and COFF to register metadata sections. Fixes rdar://problem/44941707. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 80bbd54b1..9782fed83 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -115,7 +115,8 @@ if(ENABLE_SWIFT) target_sources(dispatch PRIVATE swift/DispatchStubs.cc - ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o) + ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o + ${SWIFT_RUNTIME_LIBDIR}/swiftrt.o) if(CMAKE_BUILD_TYPE MATCHES Debug) target_link_libraries(dispatch PRIVATE From a79e5c1a79b4ce63905a1106f146d4d6fabdca1b Mon Sep 17 00:00:00 2001 From: Doug Gregor Date: Tue, 9 Oct 2018 17:55:27 -0700 Subject: [PATCH 007/249] Merge pull request #398 from DougGregor/link-swiftrt [CMake] Make sure to link swiftrt.o when building the Swift Dispatch overlay. Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 71ec0185f..da0bc34e5 100644 --- a/PATCHES +++ b/PATCHES @@ -436,3 +436,4 @@ github commits starting with 29bdc2f from [6162a1d] APPLIED rdar://44568645 [c55ff6f] APPLIED rdar://54572081 [c4a7149] APPLIED rdar://54572081 +[edce1fe] APPLIED rdar://54572081 From 8e746aac8be6cdbc9c86ed13c827964ad99129c5 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 11 Oct 2018 14:09:43 -0700 Subject: [PATCH 008/249] swift: cast to the appropriate type This fixes an error from the swift compiler: error: cannot convert value of type 'UInt32' to expected argument type 'UInt' Signed-off-by: Kim Topley --- src/swift/Block.swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/swift/Block.swift b/src/swift/Block.swift index e90396bb1..0afbb265c 100644 --- a/src/swift/Block.swift +++ b/src/swift/Block.swift @@ -40,14 +40,14 @@ public class DispatchWorkItem { internal var _block: _DispatchBlock public init(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @escaping @convention(block) () -> ()) { - _block = dispatch_block_create_with_qos_class(dispatch_block_flags_t(UInt32(flags.rawValue)), + _block = dispatch_block_create_with_qos_class(dispatch_block_flags_t(UInt(flags.rawValue)), qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority), block) } // Used by DispatchQueue.synchronously to provide a path through // dispatch_block_t, as we know the lifetime of the block in question. internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: () -> ()) { - _block = _swift_dispatch_block_create_noescape(dispatch_block_flags_t(UInt32(flags.rawValue)), noescapeBlock) + _block = _swift_dispatch_block_create_noescape(dispatch_block_flags_t(UInt(flags.rawValue)), noescapeBlock) } public func perform() { From 9fbd493780ac1e7bb3737ecb3651274037a9c9b9 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 11 Oct 2018 17:09:58 -0700 Subject: [PATCH 009/249] Merge pull request #399 from compnerd/casting swift: cast to the appropriate type Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index da0bc34e5..5e9730fff 100644 --- a/PATCHES +++ b/PATCHES @@ -437,3 +437,4 @@ github commits starting with 29bdc2f from [c55ff6f] APPLIED rdar://54572081 [c4a7149] APPLIED rdar://54572081 [edce1fe] APPLIED rdar://54572081 +[ac525a4] APPLIED rdar://54572081 From 1ce501ced13851401407195ed03832af309c3850 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 11 Oct 2018 17:10:07 -0700 Subject: [PATCH 010/249] Merge pull request #400 from compnerd/cleanup queue: clean up a covered switch Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 5e9730fff..8fdb0a474 100644 --- a/PATCHES +++ b/PATCHES @@ -438,3 +438,4 @@ github commits starting with 29bdc2f from [c4a7149] APPLIED rdar://54572081 [edce1fe] APPLIED rdar://54572081 [ac525a4] APPLIED rdar://54572081 +[0710b29] APPLIED rdar://54572081 From f2e0ebf7a4490a8e6d6c89035a080401076f9498 Mon Sep 17 00:00:00 2001 From: Adam Thayer Date: Sun, 14 Oct 2018 14:36:22 -0700 Subject: [PATCH 011/249] Linux i686 Build Support This unblocks building on 32-bit Linux using the i686 triple. Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index bae1f9f57..2a3f4d61a 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -101,6 +101,8 @@ function(get_swift_host_arch result_var_name) set("${result_var_name}" "itanium" PARENT_SCOPE) elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86") set("${result_var_name}" "i686" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "i686") + set("${result_var_name}" "i686" PARENT_SCOPE) else() message(FATAL_ERROR "Unrecognized architecture on host system: ${CMAKE_SYSTEM_PROCESSOR}") endif() From cad27f85a10d776228d10ab47ed78d6938e53128 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 16 Oct 2018 09:36:53 -0700 Subject: [PATCH 012/249] Merge pull request #402 from Kaiede/i686Support Linux i686 Build Support Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 8fdb0a474..b785cef06 100644 --- a/PATCHES +++ b/PATCHES @@ -439,3 +439,4 @@ github commits starting with 29bdc2f from [edce1fe] APPLIED rdar://54572081 [ac525a4] APPLIED rdar://54572081 [0710b29] APPLIED rdar://54572081 +[e99de71] APPLIED rdar://54572081 From 2edda3ce8aeb70045c62dd332a5ea7b9d0a6dc5e Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Mon, 15 Oct 2018 16:37:42 -0700 Subject: [PATCH 013/249] Fix the signature of _dispatch_install_thread_detach_callback() This function is declared as accepting a `dispatch_function_t` callback, which is a function pointer with a `void *` argument. However, the implementation and Swift overlay declare the callback without arguments, causing a conflict which Clang warns about. Change the function signature to accept the correct type. Signed-off-by: Kim Topley --- private/queue_private.h | 2 +- src/queue.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/private/queue_private.h b/private/queue_private.h index 65ef7e255..2a3abe32c 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -438,7 +438,7 @@ dispatch_async_enforce_qos_class_f(dispatch_queue_t queue, * "detached" before the thread exits or the application will crash. */ DISPATCH_EXPORT -void _dispatch_install_thread_detach_callback(dispatch_function_t cb); +void _dispatch_install_thread_detach_callback(void (*cb)(void)); #endif __END_DECLS diff --git a/src/queue.c b/src/queue.c index 68dd28aa7..e3ea832fb 100644 --- a/src/queue.c +++ b/src/queue.c @@ -7809,7 +7809,7 @@ gettid(void) static void (*_dispatch_thread_detach_callback)(void); void -_dispatch_install_thread_detach_callback(dispatch_function_t cb) +_dispatch_install_thread_detach_callback(void (*cb)(void)) { if (os_atomic_xchg(&_dispatch_thread_detach_callback, cb, relaxed)) { DISPATCH_CLIENT_CRASH(0, "Installing a thread detach callback twice"); From 8cefcf3e61b4aa83896fcb0ac1aec48c5ef5e35b Mon Sep 17 00:00:00 2001 From: Pierre Habouzit Date: Wed, 17 Oct 2018 06:35:52 +0200 Subject: [PATCH 014/249] Merge pull request #403 from adierking/detach Fix the signature of _dispatch_install_thread_detach_callback() Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index b785cef06..5b762510b 100644 --- a/PATCHES +++ b/PATCHES @@ -440,3 +440,4 @@ github commits starting with 29bdc2f from [ac525a4] APPLIED rdar://54572081 [0710b29] APPLIED rdar://54572081 [e99de71] APPLIED rdar://54572081 +[6d83ad5] APPLIED rdar://54572081 From 42e69317c5105144f75df57604246ce51a94e01d Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 4 Oct 2018 10:47:33 -0700 Subject: [PATCH 015/249] build: use the exported target instead of reconstructing paths Use the exported target from swift rather than recreate the paths locally. This almost works to replace the use of the paths. Unfortunately, swiftrt is not currently exported. Signed-off-by: Kim Topley --- CMakeLists.txt | 45 +++++++++++++++++---------------------------- src/CMakeLists.txt | 7 ++++++- 2 files changed, 23 insertions(+), 29 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7bb9dce5a..ec1ca0e9c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -44,38 +44,32 @@ dispatch_common_warnings() option(ENABLE_DISPATCH_INIT_CONSTRUCTOR "enable libdispatch_init as a constructor" ON) set(USE_LIBDISPATCH_INIT_CONSTRUCTOR ${ENABLE_DISPATCH_INIT_CONSTRUCTOR}) +# NOTE(abdulras) this is the CMake supported way to control whether we generate +# shared or static libraries. This impacts the behaviour of `add_library` in +# what type of library it generates. +option(BUILD_SHARED_LIBS "build shared libraries" ON) + option(ENABLE_SWIFT "enable libdispatch swift overlay" OFF) if(ENABLE_SWIFT) if(NOT CMAKE_SWIFT_COMPILER) message(FATAL_ERROR "CMAKE_SWIFT_COMPILER must be defined to enable swift") endif() - get_filename_component(SWIFT_TOOLCHAIN ${CMAKE_SWIFT_COMPILER} DIRECTORY) - get_filename_component(SWIFT_TOOLCHAIN ${SWIFT_TOOLCHAIN} DIRECTORY) - - string(TOLOWER ${CMAKE_SYSTEM_NAME} SWIFT_OS) - get_swift_host_arch(SWIFT_HOST_ARCH) + find_package(Swift REQUIRED CONFIG) - set(SWIFT_RUNTIME_LIBDIR ${SWIFT_TOOLCHAIN}/${SWIFT_LIBDIR}/swift/${SWIFT_OS}/${SWIFT_HOST_ARCH}) - - add_library(swiftCore - SHARED IMPORTED GLOBAL) - set_target_properties(swiftCore - PROPERTIES - IMPORTED_LOCATION - ${SWIFT_RUNTIME_LIBDIR}/${CMAKE_SHARED_LIBRARY_PREFIX}swiftCore${CMAKE_SHARED_LIBRARY_SUFFIX}) + string(TOLOWER ${CMAKE_SYSTEM_NAME} swift_os) + get_swift_host_arch(swift_arch) - add_library(swiftSwiftOnoneSupport - SHARED IMPORTED GLOBAL) - set_target_properties(swiftSwiftOnoneSupport - PROPERTIES - IMPORTED_LOCATION - ${SWIFT_RUNTIME_LIBDIR}/${CMAKE_SHARED_LIBRARY_PREFIX}swiftSwiftOnoneSupport${CMAKE_SHARED_LIBRARY_SUFFIX}) + if(BUILD_SHARED_LIBS) + set(swift_dir swift) + else() + set(swift_dir swift_static) + endif() - set(INSTALL_TARGET_DIR "${INSTALL_LIBDIR}/swift/${SWIFT_OS}" CACHE PATH "Path where the libraries will be installed") - set(INSTALL_DISPATCH_HEADERS_DIR "${INSTALL_LIBDIR}/swift/dispatch" CACHE PATH "Path where the headers will be installed for libdispatch") - set(INSTALL_BLOCK_HEADERS_DIR "${INSTALL_LIBDIR}/swift/Block" CACHE PATH "Path where the headers will be installed for the blocks runtime") - set(INSTALL_OS_HEADERS_DIR "${INSTALL_LIBDIR}/swift/os" CACHE PATH "Path where the os/ headers will be installed") + set(INSTALL_TARGET_DIR "${INSTALL_LIBDIR}/${swift_dir}/${swift_os}" CACHE PATH "Path where the libraries will be installed") + set(INSTALL_DISPATCH_HEADERS_DIR "${INSTALL_LIBDIR}/${swift_dir}/dispatch" CACHE PATH "Path where the headers will be installed for libdispatch") + set(INSTALL_BLOCK_HEADERS_DIR "${INSTALL_LIBDIR}/${swift_dir}/Block" CACHE PATH "Path where the headers will be installed for the blocks runtime") + set(INSTALL_OS_HEADERS_DIR "${INSTALL_LIBDIR}/${swift_dir}/os" CACHE PATH "Path where the os/ headers will be installed") endif() if(NOT ENABLE_SWIFT) @@ -87,11 +81,6 @@ endif() option(ENABLE_DTRACE "enable dtrace support" "") -# NOTE(abdulras) this is the CMake supported way to control whether we generate -# shared or static libraries. This impacts the behaviour of `add_library` in -# what type of library it generates. -option(BUILD_SHARED_LIBS "build shared libraries" ON) - option(ENABLE_TESTING "build libdispatch tests" ON) option(USE_LLD_LINKER "use the lld linker" FALSE) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 9782fed83..efe3aed2f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -112,11 +112,16 @@ if(ENABLE_SWIFT) ${swift_optimization_flags} DEPENDS ${PROJECT_SOURCE_DIR}/dispatch/module.modulemap) + + get_filename_component(swift_toolchain ${CMAKE_SWIFT_COMPILER} DIRECTORY) + get_filename_component(swift_toolchain ${swift_toolchain} DIRECTORY) + set(swift_runtime_libdir ${swift_toolchain}/lib/${swift_dir}/${swift_os}/${swift_arch}) + target_sources(dispatch PRIVATE swift/DispatchStubs.cc ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o - ${SWIFT_RUNTIME_LIBDIR}/swiftrt.o) + ${swift_runtime_libdir}/swiftrt.o) if(CMAKE_BUILD_TYPE MATCHES Debug) target_link_libraries(dispatch PRIVATE From 8470f2413ff58d90abb8c799b65095509ba91adc Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 18 Oct 2018 06:46:25 -0700 Subject: [PATCH 016/249] Merge pull request #397 from compnerd/static-stdlib build: support static builds for Swift Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 5b762510b..6a97b7e26 100644 --- a/PATCHES +++ b/PATCHES @@ -441,3 +441,4 @@ github commits starting with 29bdc2f from [0710b29] APPLIED rdar://54572081 [e99de71] APPLIED rdar://54572081 [6d83ad5] APPLIED rdar://54572081 +[3ed78b5] APPLIED rdar://54572081 From 174bb9074622cf5f4b098220930e066488e49f2f Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 18 Oct 2018 06:47:35 -0700 Subject: [PATCH 017/249] Merge pull request #406 from adierking/printflike dispatch_c99: remove unnecessary __printflike() definition Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 6a97b7e26..9b90dcc00 100644 --- a/PATCHES +++ b/PATCHES @@ -442,3 +442,4 @@ github commits starting with 29bdc2f from [e99de71] APPLIED rdar://54572081 [6d83ad5] APPLIED rdar://54572081 [3ed78b5] APPLIED rdar://54572081 +[f6376cb] APPLIED rdar://54572081 From d8e05b4a56e11e8d28bfb1397fd80a822c71eeee Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 27 Oct 2018 13:14:06 -0700 Subject: [PATCH 018/249] build: always copy the modulemaps Change from a symlink to a copy. This is more portable and fixes the distribution aspect. When the installation occurs, the symbolic link is not followed and a symbolic link is installed. Rather if we copy, we can get the contents. The files are small so the cost is relatively low. Signed-off-by: Kim Topley --- CMakeLists.txt | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ec1ca0e9c..c7ab83af8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -309,27 +309,19 @@ if(CMAKE_SYSTEM_NAME STREQUAL Darwin) "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap" COMMAND - ${CMAKE_COMMAND} -E create_symlink "${PROJECT_SOURCE_DIR}/dispatch/darwin/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + ${CMAKE_COMMAND} -E copy_if_different "${PROJECT_SOURCE_DIR}/dispatch/darwin/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" COMMAND - ${CMAKE_COMMAND} -E create_symlink "${PROJECT_SOURCE_DIR}/private/darwin/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") -elseif(CMAKE_SYSTEM_NAME STREQUAL Windows) - add_custom_command(OUTPUT - "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" - "${PROJECT_SOURCE_DIR}/private/module.modulemap" - COMMAND - ${CMAKE_COMMAND} -E copy "${PROJECT_SOURCE_DIR}/dispatch/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" - COMMAND - ${CMAKE_COMMAND} -E copy "${PROJECT_SOURCE_DIR}/private/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") + ${CMAKE_COMMAND} -E copy_if_different "${PROJECT_SOURCE_DIR}/private/darwin/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") else() add_custom_command(OUTPUT "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap" COMMAND - ${CMAKE_COMMAND} -E create_symlink "${PROJECT_SOURCE_DIR}/dispatch/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + ${CMAKE_COMMAND} -E copy_if_different "${PROJECT_SOURCE_DIR}/dispatch/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" COMMAND - ${CMAKE_COMMAND} -E create_symlink "${PROJECT_SOURCE_DIR}/private/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") + ${CMAKE_COMMAND} -E copy_if_different "${PROJECT_SOURCE_DIR}/private/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") endif() -add_custom_target(module-map-symlinks +add_custom_target(module-maps ALL DEPENDS "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") From 6d24e9c0b855100b3c56ce91c1d31c45118af2dd Mon Sep 17 00:00:00 2001 From: Mishal Shah Date: Mon, 29 Oct 2018 00:06:57 -0700 Subject: [PATCH 019/249] Merge pull request #412 from compnerd/copy build: always copy the modulemaps Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 9b90dcc00..ff9d08541 100644 --- a/PATCHES +++ b/PATCHES @@ -443,3 +443,4 @@ github commits starting with 29bdc2f from [6d83ad5] APPLIED rdar://54572081 [3ed78b5] APPLIED rdar://54572081 [f6376cb] APPLIED rdar://54572081 +[9acbab3] APPLIED rdar://54572081 From aac28252f55350d3960ca1211dfd27c2a507256b Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 11 Oct 2018 21:18:24 -0700 Subject: [PATCH 020/249] cmake: update SwiftSupport from XCTest Update the swift support CMake rules from XCTest. This adds support for additional features like partial module compilation which improves incremental builds. It allows for executable and library builds. Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 136 ++++++++++++++++++++++--------- 1 file changed, 97 insertions(+), 39 deletions(-) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index 2a3f4d61a..478f24118 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -1,49 +1,55 @@ include(CMakeParseArguments) -function(add_swift_library library) - set(options) +function(add_swift_target target) + set(options LIBRARY) set(single_value_options MODULE_NAME;MODULE_LINK_NAME;MODULE_PATH;MODULE_CACHE_PATH;OUTPUT;TARGET) - set(multiple_value_options SOURCES;SWIFT_FLAGS;CFLAGS;DEPENDS) + set(multiple_value_options CFLAGS;DEPENDS;LINK_FLAGS;SOURCES;SWIFT_FLAGS) - cmake_parse_arguments(ASL "${options}" "${single_value_options}" "${multiple_value_options}" ${ARGN}) + cmake_parse_arguments(AST "${options}" "${single_value_options}" "${multiple_value_options}" ${ARGN}) set(flags ${CMAKE_SWIFT_FLAGS}) + set(link_flags) - list(APPEND flags -emit-library) - - if(ASL_TARGET) - list(APPEND FLAGS -target;${ASL_TARGET}) + if(AST_TARGET) + list(APPEND flags -target;${AST_TARGET}) endif() - if(ASL_MODULE_NAME) - list(APPEND flags -module-name;${ASL_MODULE_NAME}) - endif() - if(ASL_MODULE_LINK_NAME) - list(APPEND flags -module-link-name;${ASL_MODULE_LINK_NAME}) + if(AST_MODULE_NAME) + list(APPEND flags -module-name;${AST_MODULE_NAME}) + else() + list(APPEND flags -module-name;${target}) endif() - if(ASL_MODULE_PATH) - list(APPEND flags -emit-module-path;${ASL_MODULE_PATH}) + if(AST_MODULE_LINK_NAME) + list(APPEND flags -module-link-name;${AST_MODULE_LINK_NAME}) endif() - if(ASL_MODULE_CACHE_PATH) - list(APPEND flags -module-cache-path;${ASL_MODULE_CACHE_PATH}) + if(AST_MODULE_CACHE_PATH) + list(APPEND flags -module-cache-path;${AST_MODULE_CACHE_PATH}) endif() - if(ASL_SWIFT_FLAGS) - foreach(flag ${ASL_SWIFT_FLAGS}) + if(AST_SWIFT_FLAGS) + foreach(flag ${AST_SWIFT_FLAGS}) list(APPEND flags ${flag}) endforeach() endif() - if(ASL_CFLAGS) - foreach(flag ${ASL_CFLAGS}) + if(AST_CFLAGS) + foreach(flag ${AST_CFLAGS}) list(APPEND flags -Xcc;${flag}) endforeach() endif() - - # FIXME: We shouldn't /have/ to build things in a single process. - # - list(APPEND flags -force-single-frontend-invocation) + if(AST_LINK_FLAGS) + foreach(flag ${AST_LINK_FLAGS}) + list(APPEND link_flags ${flag}) + endforeach() + endif() + if(NOT AST_OUTPUT) + if(AST_LIBRARY) + set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_SHARED_LIBRARY_PREFIX}${target}${CMAKE_SHARED_LIBRARY_SUFFIX}) + else() + set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${target}${CMAKE_EXECUTABLE_SUFFIX}) + endif() + endif() set(sources) - foreach(source ${ASL_SOURCES}) + foreach(source ${AST_SOURCES}) get_filename_component(location ${source} PATH) if(IS_ABSOLUTE ${location}) list(APPEND sources ${source}) @@ -52,25 +58,77 @@ function(add_swift_library library) endif() endforeach() - get_filename_component(module_directory ${ASL_MODULE_PATH} DIRECTORY) + set(objs) + set(mods) + set(docs) + set(i 0) + foreach(source ${sources}) + get_filename_component(name ${source} NAME) + + set(obj ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${name}${CMAKE_C_OUTPUT_EXTENSION}) + set(mod ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${name}.swiftmodule) + set(doc ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${name}.swiftdoc) + + set(all_sources ${sources}) + list(INSERT all_sources ${i} -primary-file) + + add_custom_command(OUTPUT + ${obj} + ${mod} + ${doc} + DEPENDS + ${source} + COMMAND + ${CMAKE_SWIFT_COMPILER} -frontend ${flags} -emit-module-path ${mod} -emit-module-doc-path ${doc} -o ${obj} -c ${all_sources}) + + list(APPEND objs ${obj}) + list(APPEND mods ${mod}) + list(APPEND docs ${doc}) + + math(EXPR i "${i}+1") + endforeach() + + if(AST_LIBRARY) + get_filename_component(module_directory ${AST_MODULE_PATH} DIRECTORY) + + set(module ${AST_MODULE_PATH}) + set(documentation ${module_directory}/${AST_MODULE_NAME}.swiftdoc) + + add_custom_command(OUTPUT + ${module} + ${documentation} + DEPENDS + ${mods} + ${docs} + COMMAND + ${CMAKE_SWIFT_COMPILER} -frontend ${flags} -sil-merge-partial-modules -emit-module ${mods} -o ${module} -emit-module-doc-path ${documentation}) + endif() + if(AST_LIBRARY) + set(emit_library -emit-library) + endif() add_custom_command(OUTPUT - ${ASL_OUTPUT} - ${ASL_MODULE_PATH} - ${module_directory}/${ASL_MODULE_NAME}.swiftdoc + ${AST_OUTPUT} DEPENDS - ${ASL_SOURCES} - ${CMAKE_SWIFT_COMPILER} - ${ASL_DEPENDS} + ${objs} COMMAND - ${CMAKE_COMMAND} -E make_directory ${module_directory} + ${CMAKE_SWIFT_COMPILER} ${emit_library} ${link_flags} -o ${AST_OUTPUT} ${objs} COMMAND - ${CMAKE_SWIFT_COMPILER} ${flags} -c ${sources} -o ${ASL_OUTPUT}) - add_custom_target(${library} + ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}) + add_custom_target(${target} + ALL DEPENDS - ${ASL_OUTPUT} - ${ASL_MODULE_PATH} - ${module_directory}/${ASL_MODULE_NAME}.swiftdoc) + ${AST_OUTPUT} + ${module} + ${documentation}) +endfunction() + +function(add_swift_library library) + add_swift_target(${library} LIBRARY ${ARGN}) +endfunction() + +function(add_swift_executable executable) + add_swift_target(${executable} ${ARGN}) endfunction() # Returns the current achitecture name in a variable From 9012286739827b3d8754975c95414b15722f49a5 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 11 Oct 2018 21:27:22 -0700 Subject: [PATCH 021/249] cmake: support static and shared swift libraries Enhance add_wift_target to support building static libraries. We would always generate shared libraries previously. Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 68 ++++++++++++++++++++++++-------- 1 file changed, 52 insertions(+), 16 deletions(-) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index 478f24118..92f6f7a02 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -2,7 +2,7 @@ include(CMakeParseArguments) function(add_swift_target target) - set(options LIBRARY) + set(options LIBRARY;SHARED;STATIC) set(single_value_options MODULE_NAME;MODULE_LINK_NAME;MODULE_PATH;MODULE_CACHE_PATH;OUTPUT;TARGET) set(multiple_value_options CFLAGS;DEPENDS;LINK_FLAGS;SOURCES;SWIFT_FLAGS) @@ -40,9 +40,28 @@ function(add_swift_target target) list(APPEND link_flags ${flag}) endforeach() endif() + if(AST_LIBRARY) + if(AST_STATIC AND AST_SHARED) + message(SEND_ERROR "add_swift_target asked to create library as STATIC and SHARED") + elseif(AST_STATIC OR NOT BUILD_SHARED_LIBS) + set(library_kind STATIC) + elseif(AST_SHARED OR BUILD_SHARED_LIBS) + set(library_kind SHARED) + endif() + else() + if(AST_STATIC OR AST_SHARED) + message(SEND_ERROR "add_swift_target asked to create executable as STATIC or SHARED") + endif() + endif() if(NOT AST_OUTPUT) if(AST_LIBRARY) - set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_SHARED_LIBRARY_PREFIX}${target}${CMAKE_SHARED_LIBRARY_SUFFIX}) + if(AST_SHARED OR BUILD_SHARED_LIBS) + set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_SHARED_LIBRARY_PREFIX}${target}${CMAKE_SHARED_LIBRARY_SUFFIX}) + else() + # NOTE(compnerd) this is a hack for the computation of the + # basename/dirname below for the static path. + set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${target}) + endif() else() set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${target}${CMAKE_EXECUTABLE_SUFFIX}) endif() @@ -107,20 +126,37 @@ function(add_swift_target target) if(AST_LIBRARY) set(emit_library -emit-library) endif() - add_custom_command(OUTPUT - ${AST_OUTPUT} - DEPENDS - ${objs} - COMMAND - ${CMAKE_SWIFT_COMPILER} ${emit_library} ${link_flags} -o ${AST_OUTPUT} ${objs} - COMMAND - ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}) - add_custom_target(${target} - ALL - DEPENDS - ${AST_OUTPUT} - ${module} - ${documentation}) + if(library_kind STREQUAL SHARED) + add_custom_command(OUTPUT + ${AST_OUTPUT} + DEPENDS + ${objs} + COMMAND + ${CMAKE_SWIFT_COMPILER} ${emit_library} ${link_flags} -o ${AST_OUTPUT} ${objs} + COMMAND + ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}) + add_custom_target(${target} + ALL + DEPENDS + ${AST_OUTPUT} + ${module} + ${documentation}) + else() + add_library(${target}-static STATIC ${objs}) + get_filename_component(ast_output_bn ${AST_OUTPUT} NAME) + get_filename_component(ast_output_dn ${AST_OUTPUT} DIRECTORY) + set_target_properties(${target}-static + PROPERTIES + LINKER_LANGUAGE C + OUTPUT_DIRECTORY ${ast_output_dn} + OUTPUT_NAME ${ast_output_bn}) + add_custom_target(${target} + ALL + DEPENDS + ${target}-static + ${module} + ${documentation}) + endif() endfunction() function(add_swift_library library) From 9bc407f2f8158007dbda13f1a039a092a8a0e086 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 11 Oct 2018 22:28:56 -0700 Subject: [PATCH 022/249] build: split out the SDK overlay This splits out the SDK overlay component and libdispatch runtime itself. Doing so enables the re-use of libdispatch with and without swift and makes the behaviour similar across Darwin and other platforms. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 83 ++++++++++++++++++++++++++++++---------------- 1 file changed, 54 insertions(+), 29 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index efe3aed2f..31b5ddad5 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -72,25 +72,60 @@ target_sources(dispatch PRIVATE block.cpp) if(HAVE_OBJC) + # TODO(compnerd) split DispatchStubs.cc into a separate component for the ObjC + # registration and a separate component for the swift compiler's emission of a + # call to the ObjC autorelease elision entry point. target_sources(dispatch PRIVATE data.m - object.m) + object.m + swift/DispatchStubs.cc) endif() if(ENABLE_SWIFT) set(swift_optimization_flags) if(NOT CMAKE_BUILD_TYPE MATCHES Debug) set(swift_optimization_flags -O) endif() + + # NOTE(compnerd) Today regardless of whether or not ObjC interop is enabled, + # swift will use an autoreleased return value convention for certain CF + # functions (including some that are used/related to dispatch). This means + # that the swift compiler in callers to such functions will call the function, + # and then pass the result of the function to + # objc_retainAutoreleasedReturnValue. In a context where we have ObjC interop + # disabled, we do not have access to the objc runtime so an implementation of + # objc_retainAutoreleasedReturnValue is not available. To work around this, we + # provide a shim for objc_retainAutoreleasedReturnValue in DispatchStubs.cc + # that just calls retain on the object. Once we fix the swift compiler to + # switch to a different model for handling these arguments with objc-interop + # disabled these shims can be eliminated. + add_library(DispatchStubs + STATIC + swift/DispatchStubs.cc) + target_include_directories(DispatchStubs + PRIVATE + ${PROJECT_SOURCE_DIR}) + set_target_properties(DispatchStubs + PROPERTIES + POSITION_INDEPENDENT_CODE YES) + add_swift_library(swiftDispatch + CFLAGS + -fblocks + -fmodule-map-file=${PROJECT_SOURCE_DIR}/dispatch/module.modulemap + DEPENDS + ${PROJECT_SOURCE_DIR}/dispatch/module.modulemap + DispatchStubs + LINK_FLAGS + -lDispatchStubs + -L $ + -ldispatch MODULE_NAME Dispatch MODULE_LINK_NAME - dispatch + swiftDispatch MODULE_PATH ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule - OUTPUT - ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o SOURCES swift/Block.swift swift/Data.swift @@ -101,32 +136,12 @@ if(ENABLE_SWIFT) swift/Source.swift swift/Time.swift swift/Wrapper.swift - TARGET - ${CMAKE_C_COMPILER_TARGET} - CFLAGS - -fblocks - -fmodule-map-file=${PROJECT_SOURCE_DIR}/dispatch/module.modulemap SWIFT_FLAGS -I ${PROJECT_SOURCE_DIR} -I/usr/include ${swift_optimization_flags} - DEPENDS - ${PROJECT_SOURCE_DIR}/dispatch/module.modulemap) - - get_filename_component(swift_toolchain ${CMAKE_SWIFT_COMPILER} DIRECTORY) - get_filename_component(swift_toolchain ${swift_toolchain} DIRECTORY) - set(swift_runtime_libdir ${swift_toolchain}/lib/${swift_dir}/${swift_os}/${swift_arch}) - - target_sources(dispatch - PRIVATE - swift/DispatchStubs.cc - ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o - ${swift_runtime_libdir}/swiftrt.o) - if(CMAKE_BUILD_TYPE MATCHES Debug) - target_link_libraries(dispatch - PRIVATE - swiftSwiftOnoneSupport) - endif() + TARGET + ${CMAKE_C_COMPILER_TARGET}) endif() if(ENABLE_DTRACE) dtrace_usdt_probe(${CMAKE_CURRENT_SOURCE_DIR}/provider.d @@ -231,8 +246,6 @@ add_custom_command(TARGET dispatch POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy $ .libs COMMENT "Copying libdispatch to .libs") -get_swift_host_arch(SWIFT_HOST_ARCH) - install(TARGETS dispatch DESTINATION @@ -242,6 +255,18 @@ if(ENABLE_SWIFT) ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftdoc DESTINATION - "${INSTALL_TARGET_DIR}/${SWIFT_HOST_ARCH}") + ${INSTALL_TARGET_DIR}/${swift_arch}) + + if(BUILD_SHARED_LIBS) + set(library_kind SHARED) + else() + set(library_kind STATIC) + endif() + set(swiftDispatch_OUTPUT_FILE + ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_${library_kind}_LIBRARY_PREFIX}swiftDispatch${CMAKE_${library_kind}_LIBRARY_SUFFIX}) + install(FILES + ${swiftDispatch_OUTPUT_FILE} + DESTINATION + ${INSTALL_TARGET_DIR}) endif() From 242e725fe26f504d25dcb21c986d0cc5d066bfe1 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 23 Oct 2018 14:53:45 -0700 Subject: [PATCH 023/249] build: correct dependency tracking Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index 92f6f7a02..537e60cb8 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -97,6 +97,7 @@ function(add_swift_target target) ${doc} DEPENDS ${source} + ${AST_DEPENDS} COMMAND ${CMAKE_SWIFT_COMPILER} -frontend ${flags} -emit-module-path ${mod} -emit-module-doc-path ${doc} -o ${obj} -c ${all_sources}) @@ -119,6 +120,7 @@ function(add_swift_target target) DEPENDS ${mods} ${docs} + ${AST_DEPENDS} COMMAND ${CMAKE_SWIFT_COMPILER} -frontend ${flags} -sil-merge-partial-modules -emit-module ${mods} -o ${module} -emit-module-doc-path ${documentation}) endif() @@ -131,6 +133,7 @@ function(add_swift_target target) ${AST_OUTPUT} DEPENDS ${objs} + ${AST_DEPENDS} COMMAND ${CMAKE_SWIFT_COMPILER} ${emit_library} ${link_flags} -o ${AST_OUTPUT} ${objs} COMMAND From 370b38f81898438861e855587cb6666d3c2e98a0 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 5 Nov 2018 07:30:45 -0800 Subject: [PATCH 024/249] Merge pull request #401 from compnerd/split-sdk-overlay Split Swift SDK Overlay Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index ff9d08541..b8e137af1 100644 --- a/PATCHES +++ b/PATCHES @@ -444,3 +444,4 @@ github commits starting with 29bdc2f from [3ed78b5] APPLIED rdar://54572081 [f6376cb] APPLIED rdar://54572081 [9acbab3] APPLIED rdar://54572081 +[ca08b5f] APPLIED rdar://54572081 From 7bb4a40cdcdb6ea5e78611a55b54415d88022acf Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 18 Oct 2018 11:39:21 -0700 Subject: [PATCH 025/249] build: remove "/usr/include" for swift Remove the explicit /usr/include search path for the swift library build. This allows us to build for Windows as well. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 31b5ddad5..b6593618d 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -138,7 +138,6 @@ if(ENABLE_SWIFT) swift/Wrapper.swift SWIFT_FLAGS -I ${PROJECT_SOURCE_DIR} - -I/usr/include ${swift_optimization_flags} TARGET ${CMAKE_C_COMPILER_TARGET}) From 516be482d3309072f4a74559c8bf266f8a1068b0 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 5 Nov 2018 07:31:27 -0800 Subject: [PATCH 026/249] Merge pull request #410 from compnerd/include build: remove "/usr/include" for swift Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index b8e137af1..1f1b3a98f 100644 --- a/PATCHES +++ b/PATCHES @@ -445,3 +445,4 @@ github commits starting with 29bdc2f from [f6376cb] APPLIED rdar://54572081 [9acbab3] APPLIED rdar://54572081 [ca08b5f] APPLIED rdar://54572081 +[775f9f2] APPLIED rdar://54572081 From 769480893b942f29d6af4fa38600d349d9cffc96 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 29 Oct 2018 15:13:42 -0700 Subject: [PATCH 027/249] SDK: guard BSD specific paths against windows Treat windows like Linux and android and remove the BSD interfaces on these targets. This allows us to mostly build the SDK overlay for Windows. Signed-off-by: Kim Topley --- src/swift/Source.swift | 10 +++++----- src/swift/Wrapper.swift | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/swift/Source.swift b/src/swift/Source.swift index fa0b3624e..8d9fcba35 100644 --- a/src/swift/Source.swift +++ b/src/swift/Source.swift @@ -113,7 +113,7 @@ extension DispatchSource { } #endif -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) public struct ProcessEvent : OptionSet, RawRepresentable { public let rawValue: UInt public init(rawValue: UInt) { self.rawValue = rawValue } @@ -171,7 +171,7 @@ extension DispatchSource { } #endif -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) public class func makeProcessSource(identifier: pid_t, eventMask: ProcessEvent, queue: DispatchQueue? = nil) -> DispatchSourceProcess { let source = dispatch_source_create(_swift_dispatch_source_type_PROC(), UInt(identifier), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceProcess @@ -208,7 +208,7 @@ extension DispatchSource { return DispatchSource(source: source) as DispatchSourceUserDataReplace } -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) public class func makeFileSystemObjectSource(fileDescriptor: Int32, eventMask: FileSystemEvent, queue: DispatchQueue? = nil) -> DispatchSourceFileSystemObject { let source = dispatch_source_create(_swift_dispatch_source_type_VNODE(), UInt(fileDescriptor), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceFileSystemObject @@ -261,7 +261,7 @@ extension DispatchSourceMemoryPressure { } #endif -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) extension DispatchSourceProcess { public var handle: pid_t { return pid_t(dispatch_source_get_handle(self as! DispatchSource)) @@ -617,7 +617,7 @@ extension DispatchSourceTimer { } } -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) extension DispatchSourceFileSystemObject { public var handle: Int32 { return Int32(dispatch_source_get_handle((self as! DispatchSource).__wrapped)) diff --git a/src/swift/Wrapper.swift b/src/swift/Wrapper.swift index 649043d95..678631b03 100644 --- a/src/swift/Wrapper.swift +++ b/src/swift/Wrapper.swift @@ -181,7 +181,7 @@ extension DispatchSource : DispatchSourceMachSend, } #endif -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) extension DispatchSource : DispatchSourceProcess, DispatchSourceFileSystemObject { } @@ -272,7 +272,7 @@ public protocol DispatchSourceMemoryPressure : DispatchSourceProtocol { } #endif -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) public protocol DispatchSourceProcess : DispatchSourceProtocol { var handle: pid_t { get } @@ -302,7 +302,7 @@ public protocol DispatchSourceTimer : DispatchSourceProtocol { func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval) } -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) public protocol DispatchSourceFileSystemObject : DispatchSourceProtocol { var handle: Int32 { get } From e57060687b49489c1e5d61259506d2eb96c2e80a Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 5 Nov 2018 07:33:16 -0800 Subject: [PATCH 028/249] Merge pull request #413 from compnerd/bsdism SDK: guard BSD specific paths against windows Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 1f1b3a98f..6443af19b 100644 --- a/PATCHES +++ b/PATCHES @@ -446,3 +446,4 @@ github commits starting with 29bdc2f from [9acbab3] APPLIED rdar://54572081 [ca08b5f] APPLIED rdar://54572081 [775f9f2] APPLIED rdar://54572081 +[db37bbc] APPLIED rdar://54572081 From f2bc62b1e800f4582d37e292a05ca08a7e12b9f5 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 1 Nov 2018 20:18:54 -0700 Subject: [PATCH 029/249] DispatchStubs: make more Windows friendly Provide an alias for the import symbol as the ObjC runtime is normally in a separate DLL and the compiler will annotate the function as being DLLImport. This allows us to resolve the symbols when building the SDK overlay. Signed-off-by: Kim Topley --- src/swift/DispatchStubs.cc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/swift/DispatchStubs.cc b/src/swift/DispatchStubs.cc index aef5505bd..594f66648 100644 --- a/src/swift/DispatchStubs.cc +++ b/src/swift/DispatchStubs.cc @@ -67,7 +67,11 @@ extern "C" void * objc_retainAutoreleasedReturnValue(void *obj); // eventually call swift_release to balance the retain below. This is a // workaround until the compiler no longer emits this callout on non-ObjC // platforms. -extern "C" void swift_retain(void *); +extern "C" +#if defined(_WIN32) +__declspec(dllimport) +#endif +void swift_retain(void *); DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" void * objc_retainAutoreleasedReturnValue(void *obj) { @@ -78,4 +82,9 @@ extern "C" void * objc_retainAutoreleasedReturnValue(void *obj) { else return NULL; } +#if defined(_WIN32) +extern "C" void *(*__imp_objc_retainAutoreleasedReturnValue)(void *) = + &objc_retainAutoreleasedReturnValue; +#endif + #endif // !USE_OBJC From 0ba34b4e2f4adfd613d39bbacc23015f62f87f5d Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 5 Nov 2018 07:34:32 -0800 Subject: [PATCH 030/249] Merge pull request #415 from compnerd/stubs DispatchStubs: make more Windows friendly Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 6443af19b..7a1cd95dd 100644 --- a/PATCHES +++ b/PATCHES @@ -447,3 +447,4 @@ github commits starting with 29bdc2f from [ca08b5f] APPLIED rdar://54572081 [775f9f2] APPLIED rdar://54572081 [db37bbc] APPLIED rdar://54572081 +[9852dcb] APPLIED rdar://54572081 From bdb36482258243ef2494df9a5d2709dc85b4c694 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 20 Oct 2018 21:30:20 -0700 Subject: [PATCH 031/249] dispatch: make public interfaces LLP64 friendly This attempts to replace the public uses of `unsigned long` with `uintptr_t` and `long` with `intptr_t`. The use of `long` and `unsigned long` here was as a type suitable for matching pointer width. However, on LLP64 targets, this does not hold. Replace them with `intptr_t` which gives a proper pointer sized type. This is done only for the argument types and not for named types to ensure that the ABI is not modified for C++ (where a template may be specialized on the named type). Signed-off-by: Kim Topley --- dispatch/block.h | 4 ++-- dispatch/group.h | 2 +- dispatch/object.h | 4 ++-- dispatch/queue.h | 2 +- dispatch/semaphore.h | 6 +++--- dispatch/source.h | 10 +++++----- src/event/event.c | 16 ++++++++-------- src/event/event_internal.h | 8 ++++---- src/init.c | 4 ++-- src/queue.c | 6 +++--- src/semaphore.c | 14 +++++++------- src/semaphore_internal.h | 4 ++-- src/shims/priority.h | 2 +- src/source.c | 12 ++++++------ 14 files changed, 47 insertions(+), 47 deletions(-) diff --git a/dispatch/block.h b/dispatch/block.h index 4d6f5b548..6aa3c8f2d 100644 --- a/dispatch/block.h +++ b/dispatch/block.h @@ -323,7 +323,7 @@ dispatch_block_perform(dispatch_block_flags_t flags, */ API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW -long +intptr_t dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout); /*! @@ -416,7 +416,7 @@ dispatch_block_cancel(dispatch_block_t block); API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW -long +intptr_t dispatch_block_testcancel(dispatch_block_t block); __END_DECLS diff --git a/dispatch/group.h b/dispatch/group.h index 8d74ada2e..6b30b26c6 100644 --- a/dispatch/group.h +++ b/dispatch/group.h @@ -160,7 +160,7 @@ dispatch_group_async_f(dispatch_group_t group, */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -long +intptr_t dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); /*! diff --git a/dispatch/object.h b/dispatch/object.h index 024a3c2a8..8211fbd49 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -458,7 +458,7 @@ dispatch_set_qos_class_floor(dispatch_object_t object, */ DISPATCH_UNAVAILABLE DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW -long +intptr_t dispatch_wait(void *object, dispatch_time_t timeout); #if __has_extension(c_generic_selections) #define dispatch_wait(object, timeout) \ @@ -556,7 +556,7 @@ dispatch_cancel(void *object); DISPATCH_UNAVAILABLE DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW -long +intptr_t dispatch_testcancel(void *object); #if __has_extension(c_generic_selections) #define dispatch_testcancel(object) \ diff --git a/dispatch/queue.h b/dispatch/queue.h index ddace0659..dc5aae79a 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -681,7 +681,7 @@ typedef long dispatch_queue_priority_t; API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_global_t -dispatch_get_global_queue(long identifier, unsigned long flags); +dispatch_get_global_queue(intptr_t identifier, uintptr_t flags); /*! * @typedef dispatch_queue_attr_t diff --git a/dispatch/semaphore.h b/dispatch/semaphore.h index f5394b45d..a6f9394f9 100644 --- a/dispatch/semaphore.h +++ b/dispatch/semaphore.h @@ -61,7 +61,7 @@ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_semaphore_t -dispatch_semaphore_create(long value); +dispatch_semaphore_create(intptr_t value); /*! * @function dispatch_semaphore_wait @@ -85,7 +85,7 @@ dispatch_semaphore_create(long value); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -long +intptr_t dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); /*! @@ -107,7 +107,7 @@ dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -long +intptr_t dispatch_semaphore_signal(dispatch_semaphore_t dsema); __END_DECLS diff --git a/dispatch/source.h b/dispatch/source.h index 40453fa3e..5ce826022 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -389,7 +389,7 @@ DISPATCH_NOTHROW dispatch_source_t dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, - unsigned long mask, + uintptr_t mask, dispatch_queue_t _Nullable queue); /*! @@ -537,7 +537,7 @@ dispatch_source_cancel(dispatch_source_t source); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW -long +intptr_t dispatch_source_testcancel(dispatch_source_t source); /*! @@ -601,7 +601,7 @@ dispatch_source_get_handle(dispatch_source_t source); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW -unsigned long +uintptr_t dispatch_source_get_mask(dispatch_source_t source); /*! @@ -640,7 +640,7 @@ dispatch_source_get_mask(dispatch_source_t source); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW -unsigned long +uintptr_t dispatch_source_get_data(dispatch_source_t source); /*! @@ -662,7 +662,7 @@ dispatch_source_get_data(dispatch_source_t source); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void -dispatch_source_merge_data(dispatch_source_t source, unsigned long value); +dispatch_source_merge_data(dispatch_source_t source, uintptr_t value); /*! * @function dispatch_source_set_timer diff --git a/src/event/event.c b/src/event/event.c index 937ca6ca2..8ea5d17dd 100644 --- a/src/event/event.c +++ b/src/event/event.c @@ -30,7 +30,7 @@ static void _dispatch_timer_unote_unregister(dispatch_timer_source_refs_t dt); DISPATCH_NOINLINE static dispatch_unote_t _dispatch_unote_create(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask) + uintptr_t handle, uintptr_t mask) { dispatch_unote_linkage_t dul; dispatch_unote_class_t du; @@ -63,7 +63,7 @@ _dispatch_unote_create(dispatch_source_type_t dst, DISPATCH_NOINLINE dispatch_unote_t _dispatch_unote_create_with_handle(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask) + uintptr_t handle, uintptr_t mask) { if (!handle) { return DISPATCH_UNOTE_NULL; @@ -74,7 +74,7 @@ _dispatch_unote_create_with_handle(dispatch_source_type_t dst, DISPATCH_NOINLINE dispatch_unote_t _dispatch_unote_create_with_fd(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask) + uintptr_t handle, uintptr_t mask) { #if !TARGET_OS_MAC // if (handle > INT_MAX) { @@ -87,7 +87,7 @@ _dispatch_unote_create_with_fd(dispatch_source_type_t dst, DISPATCH_NOINLINE dispatch_unote_t _dispatch_unote_create_without_handle(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask) + uintptr_t handle, uintptr_t mask) { if (handle) { return DISPATCH_UNOTE_NULL; @@ -206,7 +206,7 @@ _dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags) static dispatch_unote_t _dispatch_source_data_create(dispatch_source_type_t dst, uintptr_t handle, - unsigned long mask) + uintptr_t mask) { if (handle || mask) { return DISPATCH_UNOTE_NULL; @@ -294,7 +294,7 @@ const dispatch_source_type_s _dispatch_source_type_write = { static dispatch_unote_t _dispatch_source_signal_create(dispatch_source_type_t dst, uintptr_t handle, - unsigned long mask) + uintptr_t mask) { if (handle >= NSIG) { return DISPATCH_UNOTE_NULL; @@ -927,13 +927,13 @@ _dispatch_timer_unote_unregister(dispatch_timer_source_refs_t dt) static dispatch_unote_t _dispatch_source_timer_create(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask) + uintptr_t handle, uintptr_t mask) { dispatch_timer_source_refs_t dt; // normalize flags if (mask & DISPATCH_TIMER_STRICT) { - mask &= ~(unsigned long)DISPATCH_TIMER_BACKGROUND; + mask &= ~(uintptr_t)DISPATCH_TIMER_BACKGROUND; } if (mask & ~dst->dst_mask) { return DISPATCH_UNOTE_NULL; diff --git a/src/event/event_internal.h b/src/event/event_internal.h index d59b303c4..8a3ae22b4 100644 --- a/src/event/event_internal.h +++ b/src/event/event_internal.h @@ -355,7 +355,7 @@ typedef struct dispatch_source_type_s { uint32_t dst_size; dispatch_unote_t (*dst_create)(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask); + uintptr_t handle, uintptr_t mask); #if DISPATCH_EVENT_BACKEND_KEVENT bool (*dst_update_mux)(struct dispatch_muxnote_s *dmn); #endif @@ -614,11 +614,11 @@ _dispatch_timer_unote_compute_missed(dispatch_timer_source_refs_t dt, extern struct dispatch_timer_heap_s _dispatch_timers_heap[DISPATCH_TIMER_COUNT]; dispatch_unote_t _dispatch_unote_create_with_handle(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask); + uintptr_t handle, uintptr_t mask); dispatch_unote_t _dispatch_unote_create_with_fd(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask); + uintptr_t handle, uintptr_t mask); dispatch_unote_t _dispatch_unote_create_without_handle( - dispatch_source_type_t dst, uintptr_t handle, unsigned long mask); + dispatch_source_type_t dst, uintptr_t handle, uintptr_t mask); void _dispatch_unote_dispose(dispatch_unote_t du); /* diff --git a/src/init.c b/src/init.c index abaf55d26..887ac874c 100644 --- a/src/init.c +++ b/src/init.c @@ -361,12 +361,12 @@ unsigned long volatile _dispatch_queue_serial_numbers = dispatch_queue_global_t -dispatch_get_global_queue(long priority, unsigned long flags) +dispatch_get_global_queue(intptr_t priority, uintptr_t flags) { dispatch_assert(countof(_dispatch_root_queues) == DISPATCH_ROOT_QUEUE_COUNT); - if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { + if (flags & ~(uintptr_t)DISPATCH_QUEUE_OVERCOMMIT) { return DISPATCH_BAD_INPUT; } dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority); diff --git a/src/queue.c b/src/queue.c index e3ea832fb..e12b70455 100644 --- a/src/queue.c +++ b/src/queue.c @@ -561,7 +561,7 @@ dispatch_block_cancel(dispatch_block_t db) (void)os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed); } -long +intptr_t dispatch_block_testcancel(dispatch_block_t db) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); @@ -572,7 +572,7 @@ dispatch_block_testcancel(dispatch_block_t db) return (bool)(dbpd->dbpd_atomic_flags & DBF_CANCELED); } -long +intptr_t dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); @@ -618,7 +618,7 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) "run more than once and waited for"); } - long ret = dispatch_group_wait(dbpd->dbpd_group, timeout); + intptr_t ret = dispatch_group_wait(dbpd->dbpd_group, timeout); if (boost_th) { _dispatch_thread_override_end(boost_th, dbpd); diff --git a/src/semaphore.c b/src/semaphore.c index af597ee04..8b5be3b73 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -27,7 +27,7 @@ long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); #pragma mark dispatch_semaphore_t dispatch_semaphore_t -dispatch_semaphore_create(long value) +dispatch_semaphore_create(intptr_t value) { dispatch_semaphore_t dsema; @@ -81,7 +81,7 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) } DISPATCH_NOINLINE -long +intptr_t _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) { _dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); @@ -89,7 +89,7 @@ _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) return 1; } -long +intptr_t dispatch_semaphore_signal(dispatch_semaphore_t dsema) { long value = os_atomic_inc2o(dsema, dsema_value, release); @@ -104,7 +104,7 @@ dispatch_semaphore_signal(dispatch_semaphore_t dsema) } DISPATCH_NOINLINE -static long +static intptr_t _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) { @@ -135,7 +135,7 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, return 0; } -long +intptr_t dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) { long value = os_atomic_dec2o(dsema, dsema_value, acquire); @@ -206,7 +206,7 @@ _dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz) } DISPATCH_NOINLINE -static long +static intptr_t _dispatch_group_wait_slow(dispatch_group_t dg, uint32_t gen, dispatch_time_t timeout) { @@ -221,7 +221,7 @@ _dispatch_group_wait_slow(dispatch_group_t dg, uint32_t gen, } } -long +intptr_t dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) { uint64_t old_state, new_state; diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index 850792df5..b9b6c7bf2 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -32,8 +32,8 @@ struct dispatch_queue_s; DISPATCH_CLASS_DECL(semaphore, OBJECT); struct dispatch_semaphore_s { DISPATCH_OBJECT_HEADER(semaphore); - long volatile dsema_value; - long dsema_orig; + intptr_t volatile dsema_value; + intptr_t dsema_orig; _dispatch_sema4_t dsema_sema; }; diff --git a/src/shims/priority.h b/src/shims/priority.h index 56ea5ce09..df26f848e 100644 --- a/src/shims/priority.h +++ b/src/shims/priority.h @@ -165,7 +165,7 @@ _dispatch_qos_to_qos_class(dispatch_qos_t qos) DISPATCH_ALWAYS_INLINE static inline dispatch_qos_t -_dispatch_qos_from_queue_priority(long priority) +_dispatch_qos_from_queue_priority(intptr_t priority) { switch (priority) { case DISPATCH_QUEUE_PRIORITY_BACKGROUND: return DISPATCH_QOS_BACKGROUND; diff --git a/src/source.c b/src/source.c index 96c0eca43..daa637a89 100644 --- a/src/source.c +++ b/src/source.c @@ -40,7 +40,7 @@ _dispatch_source_get_handler(dispatch_source_refs_t dr, long kind) dispatch_source_t dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle, - unsigned long mask, dispatch_queue_t dq) + uintptr_t mask, dispatch_queue_t dq) { dispatch_source_refs_t dr; dispatch_source_t ds; @@ -96,13 +96,13 @@ _dispatch_source_xref_dispose(dispatch_source_t ds) dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY); } -long +intptr_t dispatch_source_testcancel(dispatch_source_t ds) { return (bool)(ds->dq_atomic_flags & DSF_CANCELED); } -unsigned long +uintptr_t dispatch_source_get_mask(dispatch_source_t ds) { dispatch_source_refs_t dr = ds->ds_refs; @@ -144,7 +144,7 @@ dispatch_source_get_handle(dispatch_source_t ds) return dr->du_ident; } -unsigned long +uintptr_t dispatch_source_get_data(dispatch_source_t ds) { #if DISPATCH_USE_MEMORYSTATUS @@ -159,7 +159,7 @@ dispatch_source_get_data(dispatch_source_t ds) #endif #endif // DISPATCH_USE_MEMORYSTATUS uint64_t value = os_atomic_load2o(dr, ds_data, relaxed); - return (unsigned long)(dr->du_has_extended_status ? + return (uintptr_t)(dr->du_has_extended_status ? DISPATCH_SOURCE_GET_DATA(value) : value); } @@ -197,7 +197,7 @@ dispatch_source_get_extended_data(dispatch_source_t ds, } void -dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) +dispatch_source_merge_data(dispatch_source_t ds, uintptr_t val) { dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); dispatch_source_refs_t dr = ds->ds_refs; From 852f2d336605f7d8500c4e2f47175453da4482dc Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 5 Nov 2018 09:58:27 -0800 Subject: [PATCH 032/249] Merge pull request #409 from compnerd/llp64 dispatch: make public interfaces LLP64 friendly Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 7a1cd95dd..f52426a15 100644 --- a/PATCHES +++ b/PATCHES @@ -448,3 +448,4 @@ github commits starting with 29bdc2f from [775f9f2] APPLIED rdar://54572081 [db37bbc] APPLIED rdar://54572081 [9852dcb] APPLIED rdar://54572081 +[9ec95bf] APPLIED rdar://54572081 From d0f6c293c25935e1461cabbe0e9be05875b04445 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 29 Oct 2018 15:25:16 -0700 Subject: [PATCH 033/249] WIP/DNM: LLP64 support for SDK Overlay If we cannot break the ABI for C++ users of libdispatch, this would allow us to build the SDK overlay for LLP64 targets. Signed-off-by: Kim Topley --- src/swift/Block.swift | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/swift/Block.swift b/src/swift/Block.swift index 0afbb265c..f1c2f08df 100644 --- a/src/swift/Block.swift +++ b/src/swift/Block.swift @@ -40,14 +40,24 @@ public class DispatchWorkItem { internal var _block: _DispatchBlock public init(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @escaping @convention(block) () -> ()) { - _block = dispatch_block_create_with_qos_class(dispatch_block_flags_t(UInt(flags.rawValue)), +#if os(Windows) && arch(x86_64) + let flags = dispatch_block_flags_t(UInt32(flags.rawValue)) +#else + let flags = dispatch_block_flags_t(UInt(flags.rawValue)) +#endif + _block = dispatch_block_create_with_qos_class(flags, qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority), block) } // Used by DispatchQueue.synchronously to provide a path through // dispatch_block_t, as we know the lifetime of the block in question. internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: () -> ()) { - _block = _swift_dispatch_block_create_noescape(dispatch_block_flags_t(UInt(flags.rawValue)), noescapeBlock) +#if os(Windows) && arch(x86_64) + let flags = dispatch_block_flags_t(UInt32(flags.rawValue)) +#else + let flags = dispatch_block_flags_t(UInt(flags.rawValue)) +#endif + _block = _swift_dispatch_block_create_noescape(flags, noescapeBlock) } public func perform() { From e6b27dba08ca5f8b44eb085439a9548fba93eb61 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 5 Nov 2018 10:23:29 -0800 Subject: [PATCH 034/249] Merge pull request #414 from compnerd/llp64-overlay WIP/DNM: LLP64 support for SDK Overlay Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index f52426a15..fda486384 100644 --- a/PATCHES +++ b/PATCHES @@ -449,3 +449,4 @@ github commits starting with 29bdc2f from [db37bbc] APPLIED rdar://54572081 [9852dcb] APPLIED rdar://54572081 [9ec95bf] APPLIED rdar://54572081 +[bd2367c] APPLIED rdar://54572081 From ac858786603c69003f667da2759d0fd77ab796a8 Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Tue, 16 Oct 2018 15:03:06 -0700 Subject: [PATCH 035/249] Replace valloc() usages in portable code valloc() is obsolete and 64-bit Android does not provide it at all. When portability is required, use posix_memalign() instead. Signed-off-by: Kim Topley --- src/io.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/io.c b/src/io.c index 0624fffd4..4cffdebf6 100644 --- a/src/io.c +++ b/src/io.c @@ -2312,7 +2312,10 @@ _dispatch_operation_perform(dispatch_operation_t op) } op->buf = _aligned_malloc(op->buf_siz, siInfo.dwPageSize); #else - op->buf = valloc(op->buf_siz); + err = posix_memalign(&op->buf, (size_t)PAGE_SIZE, op->buf_siz); + if (err != 0) { + goto error; + } #endif _dispatch_op_debug("buffer allocated", op); } else if (op->direction == DOP_DIR_WRITE) { From a129ab2e6f95207735f14c60ff85f80e74b44cef Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 5 Nov 2018 10:24:26 -0800 Subject: [PATCH 036/249] Merge pull request #405 from adierking/valloc Replace valloc() usages in portable code Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index fda486384..e631a1db1 100644 --- a/PATCHES +++ b/PATCHES @@ -450,3 +450,4 @@ github commits starting with 29bdc2f from [9852dcb] APPLIED rdar://54572081 [9ec95bf] APPLIED rdar://54572081 [bd2367c] APPLIED rdar://54572081 +[a736ea7] APPLIED rdar://54572081 From 34878d291a01fd57907920ca5cdacc27ef6579d8 Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Mon, 15 Oct 2018 18:06:29 -0700 Subject: [PATCH 037/249] io: use posix_fadvise() instead of readahead() readahead() is a Linux-specific system call, and many C libraries (e.g. Android's Bionic) do not declare it by default. A more-portable alternative is to use posix_fadvise() with `POSIX_FADV_WILLNEED`. This is functionally equivalent to readahead() on Linux. Additionally, on FreeBSD, Dispatch is currently using fcntl() with `F_RDAHEAD`. This is not exactly equivalent to Darwin's `F_RDADVISE`. Using posix_fadvise() should work better here too. Signed-off-by: Kim Topley --- CMakeLists.txt | 1 + cmake/config.h.in | 3 +++ src/io.c | 29 ++++++++++++++--------------- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c7ab83af8..d5b1b6301 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -188,6 +188,7 @@ check_function_exists(mach_absolute_time HAVE_MACH_ABSOLUTE_TIME) check_function_exists(mach_approximate_time HAVE_MACH_APPROXIMATE_TIME) check_function_exists(mach_port_construct HAVE_MACH_PORT_CONSTRUCT) check_function_exists(malloc_create_zone HAVE_MALLOC_CREATE_ZONE) +check_function_exists(posix_fadvise HAVE_POSIX_FADVISE) check_function_exists(pthread_key_init_np HAVE_PTHREAD_KEY_INIT_NP) check_function_exists(pthread_main_np HAVE_PTHREAD_MAIN_NP) check_function_exists(strlcpy HAVE_STRLCPY) diff --git a/cmake/config.h.in b/cmake/config.h.in index 0709c254c..986e420f9 100644 --- a/cmake/config.h.in +++ b/cmake/config.h.in @@ -136,6 +136,9 @@ /* Define if you have the Objective-C runtime */ #cmakedefine HAVE_OBJC +/* Define to 1 if you have the `posix_fadvise' function. */ +#cmakedefine HAVE_POSIX_FADVISE + /* Define to 1 if you have the `pthread_key_init_np' function. */ #cmakedefine HAVE_PTHREAD_KEY_INIT_NP diff --git a/src/io.c b/src/io.c index 4cffdebf6..24be10e8e 100644 --- a/src/io.c +++ b/src/io.c @@ -20,11 +20,6 @@ #include "internal.h" -#if defined(__FreeBSD__) -#include -#define F_RDADVISE F_RDAHEAD -#endif - #ifndef DISPATCH_IO_DEBUG #define DISPATCH_IO_DEBUG DISPATCH_DEBUG #endif @@ -2227,9 +2222,8 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) (void)chunk_size; #else if (_dispatch_io_get_error(op, NULL, true)) return; -#if defined(__linux__) || defined(__FreeBSD__) - // linux does not support fcntl (F_RDAVISE) - // define necessary datastructure and use readahead +#if !defined(F_RDADVISE) + // Compatibility struct whose values may be passed to posix_fadvise() struct radvisory { off_t ra_offset; int ra_count; @@ -2254,13 +2248,7 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) } advise.ra_offset = op->advise_offset; op->advise_offset += advise.ra_count; -#if defined(__linux__) - _dispatch_io_syscall_switch(err, - readahead(op->fd_entry->fd, advise.ra_offset, (size_t)advise.ra_count), - case EINVAL: break; // fd does refer to a non-supported filetype - default: (void)dispatch_assume_zero(err); break; - ); -#else +#if defined(F_RDADVISE) _dispatch_io_syscall_switch(err, fcntl(op->fd_entry->fd, F_RDADVISE, &advise), case EFBIG: break; // advised past the end of the file rdar://10415691 @@ -2268,6 +2256,17 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) // TODO: set disk status on error default: (void)dispatch_assume_zero(err); break; ); +#elif defined(HAVE_POSIX_FADVISE) + err = posix_fadvise(op->fd_entry->fd, advise.ra_offset, + (off_t)advise.ra_count, POSIX_FADV_WILLNEED); + switch (err) { + case 0: break; + case EINVAL: break; // unsupported advice or file type + case ESPIPE: break; // fd refers to a pipe or FIFO + default: (void)dispatch_assume_zero(err); break; + } +#else +#error "_dispatch_operation_advise not implemented on this platform" #endif #endif } From 9c481b342f490c8d07e33ce2af4191d9556bbd22 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 5 Nov 2018 16:01:42 -0800 Subject: [PATCH 038/249] Merge pull request #404 from adierking/readahead io: use posix_fadvise() instead of readahead() Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index e631a1db1..9efa559b4 100644 --- a/PATCHES +++ b/PATCHES @@ -451,3 +451,4 @@ github commits starting with 29bdc2f from [9ec95bf] APPLIED rdar://54572081 [bd2367c] APPLIED rdar://54572081 [a736ea7] APPLIED rdar://54572081 +[3e4ea66] APPLIED rdar://54572081 From 73e145f1c948fc6e323733164d84e4cdd30113c2 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 5 Nov 2018 09:20:38 -0800 Subject: [PATCH 039/249] build: clean up the transition compatibility work This removes the compatibility locations that we were populating now that the consumers should have been updated. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index b6593618d..e9fb24087 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -236,15 +236,6 @@ if(CMAKE_SYSTEM_NAME STREQUAL Darwin) endif() dispatch_set_linker(dispatch) -# Temporary staging; the various swift projects that depend on libdispatch -# all expect libdispatch.so to be in src/.libs/libdispatch.so -# So for now, make a copy so we don't have to do a coordinated commit across -# all the swift projects to change this assumption. -add_custom_command(TARGET dispatch POST_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory .libs - COMMAND ${CMAKE_COMMAND} -E copy $ .libs - COMMENT "Copying libdispatch to .libs") - install(TARGETS dispatch DESTINATION From 46fa585192f2f5dbd95bbd4507a419d2a00ab1d7 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 6 Nov 2018 06:57:07 -0800 Subject: [PATCH 040/249] Merge pull request #411 from compnerd/cleanup build: clean up the transition compatibility work Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 9efa559b4..014ee4acf 100644 --- a/PATCHES +++ b/PATCHES @@ -452,3 +452,4 @@ github commits starting with 29bdc2f from [bd2367c] APPLIED rdar://54572081 [a736ea7] APPLIED rdar://54572081 [3e4ea66] APPLIED rdar://54572081 +[c85c0d8] APPLIED rdar://54572081 From d264761b9b9874c353d1765f710e03015dea89ae Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 3 Nov 2018 14:52:22 -0700 Subject: [PATCH 041/249] build: honour the USE_*_LINKER flags for swift Ensure that we honour the linker setting when building the swift SDK overlay. This is particularly important for android. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index e9fb24087..21f284387 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -109,6 +109,12 @@ if(ENABLE_SWIFT) PROPERTIES POSITION_INDEPENDENT_CODE YES) + if(USE_LLD_LINKER) + set(use_ld_flag -use-ld=lld) + elseif(USE_GOLD_LINKER) + set(use_ld_flag -use-ld=gold) + endif() + add_swift_library(swiftDispatch CFLAGS -fblocks @@ -117,6 +123,7 @@ if(ENABLE_SWIFT) ${PROJECT_SOURCE_DIR}/dispatch/module.modulemap DispatchStubs LINK_FLAGS + ${use_ld_flag} -lDispatchStubs -L $ -ldispatch From 21d90b1978f75f82e7dea1e7106627bdbaeee934 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 6 Nov 2018 07:02:21 -0800 Subject: [PATCH 042/249] Merge pull request #421 from compnerd/linker build: honour the USE_*_LINKER flags for swift Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 014ee4acf..998301654 100644 --- a/PATCHES +++ b/PATCHES @@ -453,3 +453,4 @@ github commits starting with 29bdc2f from [a736ea7] APPLIED rdar://54572081 [3e4ea66] APPLIED rdar://54572081 [c85c0d8] APPLIED rdar://54572081 +[7187ea2] APPLIED rdar://54572081 From 29892e46711a7694ea3cb488ed6020ec65313d01 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 3 Nov 2018 14:53:20 -0700 Subject: [PATCH 043/249] build: add an explicit link to BlocksRuntime The SDK overlay requires the BlocksRuntime but did not explicitly link to it. This works on most platforms, however, Windows requires that all symbols are fully resolved (effectively `-z defs` on Linux). Add the missing dependency. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 21f284387..12105999e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -125,6 +125,8 @@ if(ENABLE_SWIFT) LINK_FLAGS ${use_ld_flag} -lDispatchStubs + -L $ + -lBlocksRuntime -L $ -ldispatch MODULE_NAME From 194cc4f63e45353b0f3594231bf5c8f162506639 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 6 Nov 2018 07:02:58 -0800 Subject: [PATCH 044/249] Merge pull request #422 from compnerd/blocks build: add an explicit link to BlocksRuntime Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 998301654..e8f9b4fde 100644 --- a/PATCHES +++ b/PATCHES @@ -454,3 +454,4 @@ github commits starting with 29bdc2f from [3e4ea66] APPLIED rdar://54572081 [c85c0d8] APPLIED rdar://54572081 [7187ea2] APPLIED rdar://54572081 +[30eeb14] APPLIED rdar://54572081 From c50daccb7a35d488cb0f2a0ae1ea294d8fa063ac Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Wed, 17 Oct 2018 14:04:52 -0700 Subject: [PATCH 045/249] tests: fall back to fork() if posix_spawnp() is not available Android only started supporting posix_spawnp() in API level 28 (Android 9), so it is not going to be available on most devices. If the system doesn't provide it, then fall back to using fork() and execve(). Signed-off-by: Kim Topley --- CMakeLists.txt | 1 + cmake/config.h.in | 3 +++ 2 files changed, 4 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index d5b1b6301..1a4c96cd9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -189,6 +189,7 @@ check_function_exists(mach_approximate_time HAVE_MACH_APPROXIMATE_TIME) check_function_exists(mach_port_construct HAVE_MACH_PORT_CONSTRUCT) check_function_exists(malloc_create_zone HAVE_MALLOC_CREATE_ZONE) check_function_exists(posix_fadvise HAVE_POSIX_FADVISE) +check_function_exists(posix_spawnp HAVE_POSIX_SPAWNP) check_function_exists(pthread_key_init_np HAVE_PTHREAD_KEY_INIT_NP) check_function_exists(pthread_main_np HAVE_PTHREAD_MAIN_NP) check_function_exists(strlcpy HAVE_STRLCPY) diff --git a/cmake/config.h.in b/cmake/config.h.in index 986e420f9..a076208e5 100644 --- a/cmake/config.h.in +++ b/cmake/config.h.in @@ -139,6 +139,9 @@ /* Define to 1 if you have the `posix_fadvise' function. */ #cmakedefine HAVE_POSIX_FADVISE +/* Define to 1 if you have the `posix_spawnp' function. */ +#cmakedefine HAVE_POSIX_SPAWNP + /* Define to 1 if you have the `pthread_key_init_np' function. */ #cmakedefine HAVE_PTHREAD_KEY_INIT_NP From 9f0dab00a3fdc371e88eebfa1ea5c0496ca19653 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 6 Nov 2018 14:49:36 -0800 Subject: [PATCH 046/249] Merge pull request #408 from adierking/fork tests: fall back to fork() if posix_spawnp() is not available Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index e8f9b4fde..8c94d3726 100644 --- a/PATCHES +++ b/PATCHES @@ -455,3 +455,4 @@ github commits starting with 29bdc2f from [c85c0d8] APPLIED rdar://54572081 [7187ea2] APPLIED rdar://54572081 [30eeb14] APPLIED rdar://54572081 +[6a5c6d8] APPLIED rdar://54572081 From 7151c0d9568e5a993c54a290b8422eb4ca4ffd4b Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 6 Nov 2018 14:50:11 -0800 Subject: [PATCH 047/249] Merge pull request #407 from adierking/testfile tests: read from a temporary file instead of vi on non-Apple platforms Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 8c94d3726..d8957dacb 100644 --- a/PATCHES +++ b/PATCHES @@ -456,3 +456,4 @@ github commits starting with 29bdc2f from [7187ea2] APPLIED rdar://54572081 [30eeb14] APPLIED rdar://54572081 [6a5c6d8] APPLIED rdar://54572081 +[64a12c6] APPLIED rdar://54572081 From 330fc49b0c0d96f38855cf3235e6338fb7592e75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Rodr=C3=ADguez=20Troiti=C3=B1o?= Date: Wed, 7 Nov 2018 12:04:29 -0800 Subject: [PATCH 048/249] Add string.h for memcpy in Data.swift. In Data.swift the C function memcpy is used as part of _copyBytesHelper. The function is defined in string.h, but that header is not referenced directly by any of the headers in dispatch/ or by DispatchOverlayShims.h in the compiler, which are the only two headers imported by the swift file. For some reason, this is working on Darwin and Ubuntu, probably because some included header includes string.h or defines memcpy, avoiding the problem. For those platforms that this already works, adding an extra include should not affect them. Signed-off-by: Kim Topley --- dispatch/dispatch.h | 1 + 1 file changed, 1 insertion(+) diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index cbc39ede6..836d62e09 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -37,6 +37,7 @@ #include #include #include +#include #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) #include #endif From 09e301340dc931b873410e9d06a7957c8a6591f6 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 9 Nov 2018 13:59:20 -0800 Subject: [PATCH 049/249] Merge pull request #426 from drodriguez/include-string-for-memcpy Add string.h for memcpy in Data.swift. Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index d8957dacb..de891ed05 100644 --- a/PATCHES +++ b/PATCHES @@ -457,3 +457,4 @@ github commits starting with 29bdc2f from [30eeb14] APPLIED rdar://54572081 [6a5c6d8] APPLIED rdar://54572081 [64a12c6] APPLIED rdar://54572081 +[09ec354] APPLIED rdar://54572081 From ecb4a756de7cb179df4d1da6f43eeb10cb0614f8 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 15 Nov 2018 15:39:24 -0800 Subject: [PATCH 050/249] build: rename flags to cmake_flags (NFC) Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index 537e60cb8..7d8c1ea43 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -8,31 +8,31 @@ function(add_swift_target target) cmake_parse_arguments(AST "${options}" "${single_value_options}" "${multiple_value_options}" ${ARGN}) - set(flags ${CMAKE_SWIFT_FLAGS}) + set(compile_flags ${CMAKE_SWIFT_FLAGS}) set(link_flags) if(AST_TARGET) - list(APPEND flags -target;${AST_TARGET}) + list(APPEND compile_flags -target;${AST_TARGET}) endif() if(AST_MODULE_NAME) - list(APPEND flags -module-name;${AST_MODULE_NAME}) + list(APPEND compile_flags -module-name;${AST_MODULE_NAME}) else() - list(APPEND flags -module-name;${target}) + list(APPEND compile_flags -module-name;${target}) endif() if(AST_MODULE_LINK_NAME) - list(APPEND flags -module-link-name;${AST_MODULE_LINK_NAME}) + list(APPEND compile_flags -module-link-name;${AST_MODULE_LINK_NAME}) endif() if(AST_MODULE_CACHE_PATH) - list(APPEND flags -module-cache-path;${AST_MODULE_CACHE_PATH}) + list(APPEND compile_flags -module-cache-path;${AST_MODULE_CACHE_PATH}) endif() if(AST_SWIFT_FLAGS) foreach(flag ${AST_SWIFT_FLAGS}) - list(APPEND flags ${flag}) + list(APPEND compile_flags ${flag}) endforeach() endif() if(AST_CFLAGS) foreach(flag ${AST_CFLAGS}) - list(APPEND flags -Xcc;${flag}) + list(APPEND compile_flags -Xcc;${flag}) endforeach() endif() if(AST_LINK_FLAGS) @@ -99,7 +99,7 @@ function(add_swift_target target) ${source} ${AST_DEPENDS} COMMAND - ${CMAKE_SWIFT_COMPILER} -frontend ${flags} -emit-module-path ${mod} -emit-module-doc-path ${doc} -o ${obj} -c ${all_sources}) + ${CMAKE_SWIFT_COMPILER} -frontend ${compile_flags} -emit-module-path ${mod} -emit-module-doc-path ${doc} -o ${obj} -c ${all_sources}) list(APPEND objs ${obj}) list(APPEND mods ${mod}) @@ -122,7 +122,7 @@ function(add_swift_target target) ${docs} ${AST_DEPENDS} COMMAND - ${CMAKE_SWIFT_COMPILER} -frontend ${flags} -sil-merge-partial-modules -emit-module ${mods} -o ${module} -emit-module-doc-path ${documentation}) + ${CMAKE_SWIFT_COMPILER} -frontend ${compile_flags} -sil-merge-partial-modules -emit-module ${mods} -o ${module} -emit-module-doc-path ${documentation}) endif() if(AST_LIBRARY) From 946b2875b9be8779b453eac7ac06d5c855ee56f8 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 15 Nov 2018 15:40:53 -0800 Subject: [PATCH 051/249] build: enable cross-linking Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index 7d8c1ea43..bda9e6fb2 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -13,6 +13,7 @@ function(add_swift_target target) if(AST_TARGET) list(APPEND compile_flags -target;${AST_TARGET}) + list(APPEND link_flags -target;${AST_TARGET}) endif() if(AST_MODULE_NAME) list(APPEND compile_flags -module-name;${AST_MODULE_NAME}) @@ -25,6 +26,9 @@ function(add_swift_target target) if(AST_MODULE_CACHE_PATH) list(APPEND compile_flags -module-cache-path;${AST_MODULE_CACHE_PATH}) endif() + if(CMAKE_BUILD_TYPE MATCHES Debug OR CMAKE_BUILD_TYPE MATCHES RelWithDebInfo) + list(APPEND compile_flags -g) + endif() if(AST_SWIFT_FLAGS) foreach(flag ${AST_SWIFT_FLAGS}) list(APPEND compile_flags ${flag}) From 59d9ea65ff858919b6f59aea0ae3968800cfa131 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 16 Nov 2018 07:45:49 -0800 Subject: [PATCH 052/249] Merge pull request #427 from compnerd/cross-link enable cross-linking/synchronise with Foundation Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index de891ed05..4649b2751 100644 --- a/PATCHES +++ b/PATCHES @@ -458,3 +458,4 @@ github commits starting with 29bdc2f from [6a5c6d8] APPLIED rdar://54572081 [64a12c6] APPLIED rdar://54572081 [09ec354] APPLIED rdar://54572081 +[5bcd598] APPLIED rdar://54572081 From a65aa25916831ff2bc0cb4c30ae5c52ec73fbd2b Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 11 Oct 2018 21:18:24 -0700 Subject: [PATCH 053/249] cmake: update SwiftSupport from XCTest Update the swift support CMake rules from XCTest. This adds support for additional features like partial module compilation which improves incremental builds. It allows for executable and library builds. Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index bda9e6fb2..62189fa22 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -4,7 +4,7 @@ include(CMakeParseArguments) function(add_swift_target target) set(options LIBRARY;SHARED;STATIC) set(single_value_options MODULE_NAME;MODULE_LINK_NAME;MODULE_PATH;MODULE_CACHE_PATH;OUTPUT;TARGET) - set(multiple_value_options CFLAGS;DEPENDS;LINK_FLAGS;SOURCES;SWIFT_FLAGS) + set(multiple_value_options CFLAGS;DEPENDS;LINK_FLAGS;RESOURCES;SOURCES;SWIFT_FLAGS) cmake_parse_arguments(AST "${options}" "${single_value_options}" "${multiple_value_options}" ${ARGN}) @@ -132,16 +132,14 @@ function(add_swift_target target) if(AST_LIBRARY) set(emit_library -emit-library) endif() - if(library_kind STREQUAL SHARED) + if(NOT AST_LIBRARY OR library_kind STREQUAL SHARED) add_custom_command(OUTPUT ${AST_OUTPUT} DEPENDS ${objs} ${AST_DEPENDS} COMMAND - ${CMAKE_SWIFT_COMPILER} ${emit_library} ${link_flags} -o ${AST_OUTPUT} ${objs} - COMMAND - ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}) + ${CMAKE_SWIFT_COMPILER} ${emit_library} ${link_flags} -o ${AST_OUTPUT} ${objs}) add_custom_target(${target} ALL DEPENDS @@ -164,6 +162,26 @@ function(add_swift_target target) ${module} ${documentation}) endif() + + if(AST_RESOURCES) + add_custom_command(TARGET + ${target} + POST_BUILD + COMMAND + ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/${target} + COMMAND + ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}/${target} + COMMAND + ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/${target}/Resources + COMMAND + ${CMAKE_COMMAND} -E copy ${AST_RESOURCES} ${CMAKE_CURRENT_BINARY_DIR}/${target}/Resources) + else() + add_custom_command(TARGET + ${target} + POST_BUILD + COMMAND + ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}) + endif() endfunction() function(add_swift_library library) From 6c9bf7cb31c9b4ba2b17da45b3d44269e3e6c3eb Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 11 Oct 2018 21:27:22 -0700 Subject: [PATCH 054/249] cmake: support static and shared swift libraries Enhance add_wift_target to support building static libraries. We would always generate shared libraries previously. Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index 62189fa22..b55f161e3 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -148,6 +148,7 @@ function(add_swift_target target) ${documentation}) else() add_library(${target}-static STATIC ${objs}) + add_dependencies(${target}-static ${AST_DEPENDS}) get_filename_component(ast_output_bn ${AST_OUTPUT} NAME) get_filename_component(ast_output_dn ${AST_OUTPUT} DIRECTORY) set_target_properties(${target}-static From 6d9b70e64bee0363e6a4c1d447797d1b7d5cf421 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 17 Nov 2018 08:03:46 -0800 Subject: [PATCH 055/249] build: install import libraries for Windows This is needed to enable Windows to actually link against swiftDispatch to build code. When building against a distribution image rather than the build tree, this comes to light. Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 14 ++++++++++++++ src/CMakeLists.txt | 8 ++++++++ 2 files changed, 22 insertions(+) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index b55f161e3..dff4ebe23 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -70,6 +70,11 @@ function(add_swift_target target) set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${target}${CMAKE_EXECUTABLE_SUFFIX}) endif() endif() + if(CMAKE_SYSTEM_NAME STREQUAL Windows) + if(AST_SHARED OR BUILD_SHARED_LIBS) + set(IMPORT_LIBRARY ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_IMPORT_LIBRARY_PREFIX}${target}${CMAKE_IMPORT_LIBRARY_SUFFIX}) + endif() + endif() set(sources) foreach(source ${AST_SOURCES}) @@ -182,6 +187,15 @@ function(add_swift_target target) POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}) + if(CMAKE_SYSTEM_NAME STREQUAL Windows) + if(AST_SHARED OR BUILD_SHARED_LIBS) + add_custom_command(TARGET + ${target} + POST_BUILD + COMMAND + ${CMAKE_COMMAND} -E copy ${IMPORT_LIBRARY} ${CMAKE_CURRENT_BINARY_DIR}) + endif() + endif() endif() endfunction() diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 12105999e..dec1dd436 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -267,5 +267,13 @@ if(ENABLE_SWIFT) ${swiftDispatch_OUTPUT_FILE} DESTINATION ${INSTALL_TARGET_DIR}) + if(CMAKE_SYSTEM_NAME STREQUAL Windows) + if(BUILD_SHARED_LIBS) + install(FILES + ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_IMPORT_LIBRARY_PREFIX}swiftDispatch${CMAKE_IMPORT_LIBRARY_SUFFIX} + DESTINATION + ${INSTALL_TARGET_DIR}) + endif() + endif() endif() From 5f5eebce24e8611b5160a12bea1cb0f82552d39b Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 17 Nov 2018 08:04:39 -0800 Subject: [PATCH 056/249] build: install os header for Windows We did not distribute generic_win_base.h which prevents the CDispatch module from being built when building against a distribution image. Signed-off-by: Kim Topley --- os/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/os/CMakeLists.txt b/os/CMakeLists.txt index 2c4d32e66..282af25f7 100644 --- a/os/CMakeLists.txt +++ b/os/CMakeLists.txt @@ -5,6 +5,7 @@ install(FILES object.h generic_unix_base.h + generic_win_base.h DESTINATION "${INSTALL_OS_HEADERS_DIR}") From 023f50dcc06fbf3313cce0a43720eea628840667 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 17 Nov 2018 08:05:52 -0800 Subject: [PATCH 057/249] build: spell `-fno-exceptions` for MSVC We did not disable exceptions when building with cl, do so like we do with the GCC style driver. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index dec1dd436..d90aeae80 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -174,7 +174,9 @@ if(WIN32) PRIVATE _CRT_NONSTDC_NO_WARNINGS) endif() -if(NOT "${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") +if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") + target_compile_options(dispatch PRIVATE /EHs-c-) +else() target_compile_options(dispatch PRIVATE -fno-exceptions) endif() if(DISPATCH_ENABLE_ASSERTS) From 09528b00583d92b7168cdef43f4c29958b6415e8 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 17 Nov 2018 08:07:10 -0800 Subject: [PATCH 058/249] build: install libraries into the right location Windows splits the installation into the library directory (for the import library) and the runtime component (dll) which is installed into bin. This makes the Windows build happier. Signed-off-by: Kim Topley --- CMakeLists.txt | 5 +++-- src/CMakeLists.txt | 6 ++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1a4c96cd9..10d4b7de9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -165,8 +165,9 @@ if(NOT CMAKE_SYSTEM_NAME STREQUAL Darwin) endif() install(TARGETS BlocksRuntime - DESTINATION - ${INSTALL_TARGET_DIR}) + ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} + LIBRARY DESTINATION ${INSTALL_TARGET_DIR} + RUNTIME DESTINATION bin) endif() check_symbol_exists(__GNU_LIBRARY__ "features.h" _GNU_SOURCE) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d90aeae80..d41f4d305 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -249,8 +249,10 @@ dispatch_set_linker(dispatch) install(TARGETS dispatch - DESTINATION - "${INSTALL_TARGET_DIR}") + ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} + LIBRARY DESTINATION ${INSTALL_TARGET_DIR} + RUNTIME DESTINATION bin) + if(ENABLE_SWIFT) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule From 7c19bda70dc2ffdc17104445fd50b91810afcd27 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 26 Nov 2018 08:49:23 -0800 Subject: [PATCH 059/249] Merge pull request #429 from compnerd/image build improvements Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 4649b2751..7ba51db2a 100644 --- a/PATCHES +++ b/PATCHES @@ -459,3 +459,4 @@ github commits starting with 29bdc2f from [64a12c6] APPLIED rdar://54572081 [09ec354] APPLIED rdar://54572081 [5bcd598] APPLIED rdar://54572081 +[7874a92] APPLIED rdar://54572081 From 0ca99b106dd01d019a0dc9e1fa01c39d7d71a94c Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 24 Nov 2018 12:58:27 -0800 Subject: [PATCH 060/249] dispatch: include `time.h` on windows time.h is needed on Windows to ensure that we have a definition of `timespec`. This is needed when building the swift SDK overlay. It seems that when cross-compiling, we were getting lucky with the `timespec` definition being provided by some other header being included. This repairs the build of libdispatch's swift SDK overlay on Windows targeting Windows. Signed-off-by: Kim Topley --- dispatch/dispatch.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index 836d62e09..0ed604fce 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -42,6 +42,9 @@ #include #endif #include +#if defined(_WIN32) +#include +#endif #if (defined(__linux__) || defined(__FreeBSD__)) && defined(__has_feature) #if __has_feature(modules) From bac98b0e58ddaa4ad10c9eabfe46ec44ea017830 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 26 Nov 2018 08:51:04 -0800 Subject: [PATCH 061/249] Merge pull request #430 from compnerd/it-is-time dispatch: include `time.h` on windows Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 7ba51db2a..2e471c8cf 100644 --- a/PATCHES +++ b/PATCHES @@ -460,3 +460,4 @@ github commits starting with 29bdc2f from [09ec354] APPLIED rdar://54572081 [5bcd598] APPLIED rdar://54572081 [7874a92] APPLIED rdar://54572081 +[619775e] APPLIED rdar://54572081 From ed2329c2bfaf1643c33bd1a937c04b2cf0602e6c Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 23 Nov 2018 12:00:05 -0800 Subject: [PATCH 062/249] libdispatch: clean up some Win64 warnings This cleans up the few warnings that remain when building libdispatch. It now builds clean of warnings with clang 8. Signed-off-by: Kim Topley --- src/init.c | 2 +- src/internal.h | 1 + src/io.c | 8 ++++---- src/queue.c | 4 +++- src/semaphore.c | 4 ++-- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/init.c b/src/init.c index 887ac874c..10e24dd38 100644 --- a/src/init.c +++ b/src/init.c @@ -1087,7 +1087,7 @@ _dispatch_logv_init(void *context DISPATCH_UNUSED) char path[MAX_PATH + 1] = {0}; DWORD dwLength = GetTempPathA(MAX_PATH, path); dispatch_assert(dwLength <= MAX_PATH + 1); - snprintf(&path[dwLength], MAX_PATH - dwLength, "libdispatch.%d.log", + snprintf(&path[dwLength], MAX_PATH - dwLength, "libdispatch.%lu.log", GetCurrentProcessId()); dispatch_logfile = _open(path, O_WRONLY | O_APPEND | O_CREAT, 0666); #else diff --git a/src/internal.h b/src/internal.h index 17ed1e628..9f3d6118c 100644 --- a/src/internal.h +++ b/src/internal.h @@ -289,6 +289,7 @@ upcast(dispatch_object_t dou) #include #include #endif +#include #ifdef __BLOCKS__ #if __has_include() diff --git a/src/io.c b/src/io.c index 24be10e8e..4631f32af 100644 --- a/src/io.c +++ b/src/io.c @@ -2561,11 +2561,11 @@ static size_t _dispatch_io_debug_attr(dispatch_io_t channel, char* buf, size_t bufsiz) { dispatch_queue_t target = channel->do_targetq; - return dsnprintf(buf, bufsiz, "type = %s, fd = 0x%x, %sfd_entry = %p, " + return dsnprintf(buf, bufsiz, "type = %s, fd = 0x%" PRIxPTR ", %sfd_entry = %p, " "queue = %p, target = %s[%p], barrier_queue = %p, barrier_group = " "%p, err = 0x%x, low = 0x%zx, high = 0x%zx, interval%s = %llu ", channel->params.type == DISPATCH_IO_STREAM ? "stream" : "random", - channel->fd_actual, channel->atomic_flags & DIO_STOPPED ? + (intptr_t)channel->fd_actual, channel->atomic_flags & DIO_STOPPED ? "stopped, " : channel->atomic_flags & DIO_CLOSED ? "closed, " : "", channel->fd_entry, channel->queue, target && target->dq_label ? target->dq_label : "", target, channel->barrier_queue, @@ -2595,13 +2595,13 @@ _dispatch_operation_debug_attr(dispatch_operation_t op, char* buf, { dispatch_queue_t target = op->do_targetq; dispatch_queue_t oqtarget = op->op_q ? op->op_q->do_targetq : NULL; - return dsnprintf(buf, bufsiz, "type = %s %s, fd = 0x%x, fd_entry = %p, " + return dsnprintf(buf, bufsiz, "type = %s %s, fd = 0x%" PRIxPTR ", fd_entry = %p, " "channel = %p, queue = %p -> %s[%p], target = %s[%p], " "offset = %lld, length = %zu, done = %zu, undelivered = %zu, " "flags = %u, err = 0x%x, low = 0x%zx, high = 0x%zx, " "interval%s = %llu ", op->params.type == DISPATCH_IO_STREAM ? "stream" : "random", op->direction == DOP_DIR_READ ? "read" : - "write", op->fd_entry ? op->fd_entry->fd : -1, op->fd_entry, + "write", (intptr_t)(op->fd_entry ? op->fd_entry->fd : -1), op->fd_entry, op->channel, op->op_q, oqtarget && oqtarget->dq_label ? oqtarget->dq_label : "", oqtarget, target && target->dq_label ? target->dq_label : "", target, (long long)op->offset, op->length, diff --git a/src/queue.c b/src/queue.c index e12b70455..338824c2f 100644 --- a/src/queue.c +++ b/src/queue.c @@ -7452,7 +7452,9 @@ _dispatch_sig_thread(void *ctxt DISPATCH_UNUSED) { // never returns, so burn bridges behind us _dispatch_clear_stack(0); -#if !defined(_WIN32) +#if defined(_WIN32) + for (;;) SuspendThread(GetCurrentThread()); +#else _dispatch_sigsuspend(); #endif } diff --git a/src/semaphore.c b/src/semaphore.c index 8b5be3b73..aba285189 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -76,7 +76,7 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) dsema->dsema_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, - "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig); + "value = %" PRId64 ", orig = %" PRId64 " }", dsema->dsema_value, dsema->dsema_orig); return offset; } @@ -198,7 +198,7 @@ _dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz) _dispatch_object_class_name(dg), dg); offset += _dispatch_object_debug_attr(dg, &buf[offset], bufsiz - offset); offset += dsnprintf(&buf[offset], bufsiz - offset, - "count = %d, gen = %d, waiters = %d, notifs = %d }", + "count = %"PRIu32", gen = %"PRIu32", waiters = %d, notifs = %d }", _dg_state_value(dg_state), _dg_state_gen(dg_state), (bool)(dg_state & DISPATCH_GROUP_HAS_WAITERS), (bool)(dg_state & DISPATCH_GROUP_HAS_NOTIFS)); From c1a96e9c9cccf974ba302ad10fb831ba97714106 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 27 Nov 2018 11:08:16 -0800 Subject: [PATCH 063/249] Merge pull request #432 from compnerd/warnings libdispatch: clean up some Win64 warnings Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 2e471c8cf..09384a5bb 100644 --- a/PATCHES +++ b/PATCHES @@ -461,3 +461,4 @@ github commits starting with 29bdc2f from [5bcd598] APPLIED rdar://54572081 [7874a92] APPLIED rdar://54572081 [619775e] APPLIED rdar://54572081 +[e3ae79b] APPLIED rdar://54572081 From 1f2105b61fed252023dd7cbd79a481ae8886ae3d Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 27 Nov 2018 11:04:05 -0800 Subject: [PATCH 064/249] build: make the static build work well This is needed to make the shared+static build for build-script work. The primary issue is the dependency on the module-maps doesn't work due to the lack of add_custom_command, so the target dependency cannot be hooked up. Additionally, undo the hack for the AST_NAME as we need that to be proper for the copy to succeed. Finally, make sure that the we set the location for the archive output correctly. Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 7 ++++--- src/CMakeLists.txt | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index dff4ebe23..60f8b45a3 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -62,9 +62,7 @@ function(add_swift_target target) if(AST_SHARED OR BUILD_SHARED_LIBS) set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_SHARED_LIBRARY_PREFIX}${target}${CMAKE_SHARED_LIBRARY_SUFFIX}) else() - # NOTE(compnerd) this is a hack for the computation of the - # basename/dirname below for the static path. - set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${target}) + set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_STATIC_LIBRARY_PREFIX}${target}${CMAKE_STATIC_LIBRARY_SUFFIX}) endif() else() set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${target}${CMAKE_EXECUTABLE_SUFFIX}) @@ -155,10 +153,13 @@ function(add_swift_target target) add_library(${target}-static STATIC ${objs}) add_dependencies(${target}-static ${AST_DEPENDS}) get_filename_component(ast_output_bn ${AST_OUTPUT} NAME) + string(REGEX REPLACE "^${CMAKE_STATIC_LIBRARY_PREFIX}" "" ast_output_bn ${ast_output_bn}) + string(REGEX REPLACE "${CMAKE_STATIC_LIBRARY_SUFFIX}$" "" ast_output_bn ${ast_output_bn}) get_filename_component(ast_output_dn ${AST_OUTPUT} DIRECTORY) set_target_properties(${target}-static PROPERTIES LINKER_LANGUAGE C + ARCHIVE_OUTPUT_DIRECTORY ${ast_output_dn} OUTPUT_DIRECTORY ${ast_output_dn} OUTPUT_NAME ${ast_output_bn}) add_custom_target(${target} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index d41f4d305..06396ca2a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -120,7 +120,7 @@ if(ENABLE_SWIFT) -fblocks -fmodule-map-file=${PROJECT_SOURCE_DIR}/dispatch/module.modulemap DEPENDS - ${PROJECT_SOURCE_DIR}/dispatch/module.modulemap + module-maps DispatchStubs LINK_FLAGS ${use_ld_flag} From f3b0e48579f1509e875244adc8bdb9a338aeb88e Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 27 Nov 2018 12:36:32 -0800 Subject: [PATCH 065/249] Merge pull request #433 from compnerd/static-build build: make the static build work well Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 09384a5bb..b1d22b497 100644 --- a/PATCHES +++ b/PATCHES @@ -462,3 +462,4 @@ github commits starting with 29bdc2f from [7874a92] APPLIED rdar://54572081 [619775e] APPLIED rdar://54572081 [e3ae79b] APPLIED rdar://54572081 +[fb368f6] APPLIED rdar://54572081 From 0e44d51019bb6425af86962fbcfe4056ca53400d Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 1 Dec 2018 11:10:22 -0800 Subject: [PATCH 066/249] build: build libdispatch as position independent When building libdispatch as a static library ensure that it is built as a PIC library. This is implicit when building a shared library. If the library is not built shared, we cannot statically link it on Unix targets. This partially resolves SR-9384! Signed-off-by: Kim Topley --- src/CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 06396ca2a..4790ac6d9 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -54,6 +54,11 @@ add_library(dispatch shims/time.h shims/tsd.h shims/yield.h) + +set_target_properties(dispatch + PROPERTIES + POSITION_INDEPENDENT_CODE YES) + if(WIN32) target_sources(dispatch PRIVATE From 0ab1a7b44f29a84ea743567513b35aeba6f06de9 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 3 Dec 2018 07:03:10 -0800 Subject: [PATCH 067/249] Merge pull request #434 from compnerd/pic build: build libdispatch as position independent Signed-off-by: Kim Topley --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index b1d22b497..2b4e4ae4f 100644 --- a/PATCHES +++ b/PATCHES @@ -463,3 +463,4 @@ github commits starting with 29bdc2f from [619775e] APPLIED rdar://54572081 [e3ae79b] APPLIED rdar://54572081 [fb368f6] APPLIED rdar://54572081 +[afa6cc3] APPLIED rdar://54572081 From 7932b0a52ac3c32800f72a2cbefaffe1951d3efb Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 12 Jan 2019 12:43:44 -0800 Subject: [PATCH 068/249] dispatch: add support for runloops on Windows This is used by the CoreFoundation port which is used by Foundation. Provide the functions so that we can build Foundation. The API change here matches the definition and declaration and has no impact on the ABI. Signed-off-by: Kim Topley --- private/private.h | 12 +- src/CMakeLists.txt | 1 - src/queue.c | 625 +-- src/shims/generic_win_stubs.c | 24 - src/swift/DispatchStubs.cc | 5 + src/unifdef.Md0Qny | 7981 +++++++++++++++++++++++++++++++++ 6 files changed, 8017 insertions(+), 631 deletions(-) delete mode 100644 src/shims/generic_win_stubs.c create mode 100644 src/unifdef.Md0Qny diff --git a/private/private.h b/private/private.h index df93d9a9f..795f06459 100644 --- a/private/private.h +++ b/private/private.h @@ -184,7 +184,7 @@ void _dispatch_prohibit_transition_to_multithreaded(bool prohibit); #define DISPATCH_COCOA_COMPAT 0 #endif -#if DISPATCH_COCOA_COMPAT +#if DISPATCH_COCOA_COMPAT || defined(_WIN32) #define DISPATCH_CF_SPI_VERSION 20160712 @@ -192,6 +192,8 @@ void _dispatch_prohibit_transition_to_multithreaded(bool prohibit); typedef mach_port_t dispatch_runloop_handle_t; #elif defined(__linux__) || defined(__FreeBSD__) typedef int dispatch_runloop_handle_t; +#elif defined(_WIN32) +typedef void *dispatch_runloop_handle_t; #else #error "runloop support not implemented on this platform" #endif @@ -220,12 +222,14 @@ dispatch_queue_serial_t _dispatch_runloop_root_queue_create_4CF(const char *_Nullable label, unsigned long flags); -#if TARGET_OS_MAC +#if TARGET_OS_MAC || defined(_WIN32) API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW -mach_port_t +dispatch_runloop_handle_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t queue); +#endif +#if TARGET_OS_MAC API_AVAILABLE(macos(10.13.2), ios(11.2), tvos(11.2), watchos(4.2)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW bool @@ -256,7 +260,7 @@ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT void (*_Nullable _dispatch_end_NSAutoReleasePool)(void *); -#endif /* DISPATCH_COCOA_COMPAT */ +#endif /* DISPATCH_COCOA_COMPAT || defined(_WIN32) */ API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 4790ac6d9..4da1b3f15 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -63,7 +63,6 @@ if(WIN32) target_sources(dispatch PRIVATE shims/generic_sys_queue.h - shims/generic_win_stubs.c shims/generic_win_stubs.h shims/getprogname.c) endif() diff --git a/src/queue.c b/src/queue.c index 338824c2f..7a6a0b3a9 100644 --- a/src/queue.c +++ b/src/queue.c @@ -234,6 +234,10 @@ const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { DC_VTABLE_ENTRY(MACH_IPC_HANDOFF, .do_invoke = _dispatch_mach_ipc_handoff_invoke), #endif +#if !defined(__OPEN_SOURCE__) && OS_VENTURE_ENABLE + DC_VTABLE_ENTRY(VENTURE_DRAIN, + .do_invoke = _os_venture_drain_continuation_invoke), +#endif // !defined(__OPEN_SOURCE__) && OS_VENTURE_ENABLE }; DISPATCH_NOINLINE @@ -3546,603 +3550,6 @@ static bool _dispatch_queue_drain_should_narrow_slow(uint64_t now, dispatch_invoke_context_t dic) { - if (dic->dic_next_narrow_check != DISPATCH_THREAD_IS_NARROWING) { - pthread_priority_t pp = _dispatch_get_priority(); - dispatch_qos_t qos = _dispatch_qos_from_pp(pp); - if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) { - DISPATCH_CLIENT_CRASH(pp, "Thread QoS corruption"); - } - size_t idx = DISPATCH_QOS_BUCKET(qos); - os_atomic(uint64_t) *deadline = &_dispatch_narrowing_deadlines[idx]; - uint64_t oldval, newval = now + _dispatch_narrow_check_interval(); - - dic->dic_next_narrow_check = newval; - os_atomic_rmw_loop(deadline, oldval, newval, relaxed, { - if (now < oldval) { - os_atomic_rmw_loop_give_up(return false); - } - }); - - if (!_pthread_workqueue_should_narrow(pp)) { - return false; - } - dic->dic_next_narrow_check = DISPATCH_THREAD_IS_NARROWING; - } - return true; -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_drain_should_narrow(dispatch_invoke_context_t dic) -{ - uint64_t next_check = dic->dic_next_narrow_check; - if (unlikely(next_check)) { - uint64_t now = _dispatch_approximate_time(); - if (unlikely(next_check < now)) { - return _dispatch_queue_drain_should_narrow_slow(now, dic); - } - } - return false; -} -#else -#define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) -#define _dispatch_queue_drain_should_narrow(dic) false -#endif - -/* - * Drain comes in 2 flavours (serial/concurrent) and 2 modes - * (redirecting or not). - * - * Serial - * ~~~~~~ - * Serial drain is about serial queues (width == 1). It doesn't support - * the redirecting mode, which doesn't make sense, and treats all continuations - * as barriers. Bookkeeping is minimal in serial flavour, most of the loop - * is optimized away. - * - * Serial drain stops if the width of the queue grows to larger than 1. - * Going through a serial drain prevents any recursive drain from being - * redirecting. - * - * Concurrent - * ~~~~~~~~~~ - * When in non-redirecting mode (meaning one of the target queues is serial), - * non-barriers and barriers alike run in the context of the drain thread. - * Slow non-barrier items are still all signaled so that they can make progress - * toward the dispatch_sync() that will serialize them all . - * - * In redirecting mode, non-barrier work items are redirected downward. - * - * Concurrent drain stops if the width of the queue becomes 1, so that the - * queue drain moves to the more efficient serial mode. - */ -DISPATCH_ALWAYS_INLINE -static dispatch_queue_wakeup_target_t -_dispatch_lane_drain(dispatch_lane_t dq, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags, uint64_t *owned_ptr, bool serial_drain) -{ - dispatch_queue_t orig_tq = dq->do_targetq; - dispatch_thread_frame_s dtf; - struct dispatch_object_s *dc = NULL, *next_dc; - uint64_t dq_state, owned = *owned_ptr; - - if (unlikely(!dq->dq_items_tail)) return NULL; - - _dispatch_thread_frame_push(&dtf, dq); - if (serial_drain || _dq_state_is_in_barrier(owned)) { - // we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL` - // but width can change while draining barrier work items, so we only - // convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER` - owned = DISPATCH_QUEUE_IN_BARRIER; - } else { - owned &= DISPATCH_QUEUE_WIDTH_MASK; - } - - dc = _dispatch_queue_get_head(dq); - goto first_iteration; - - for (;;) { - dispatch_assert(dic->dic_barrier_waiter == NULL); - dc = next_dc; - if (unlikely(!dc)) { - if (!dq->dq_items_tail) { - break; - } - dc = _dispatch_queue_get_head(dq); - } - if (unlikely(_dispatch_needs_to_return_to_kernel())) { - _dispatch_return_to_kernel(); - } - if (unlikely(serial_drain != (dq->dq_width == 1))) { - break; - } - if (unlikely(_dispatch_queue_drain_should_narrow(dic))) { - break; - } - if (likely(flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { - dispatch_workloop_t dwl = (dispatch_workloop_t)_dispatch_get_wlh(); - if (unlikely(_dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos)) { - break; - } - } - -first_iteration: - dq_state = os_atomic_load(&dq->dq_state, relaxed); - if (unlikely(_dq_state_is_suspended(dq_state))) { - break; - } - if (unlikely(orig_tq != dq->do_targetq)) { - break; - } - - if (serial_drain || _dispatch_object_is_barrier(dc)) { - if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) { - if (!_dispatch_queue_try_upgrade_full_width(dq, owned)) { - goto out_with_no_width; - } - owned = DISPATCH_QUEUE_IN_BARRIER; - } - if (_dispatch_object_is_sync_waiter(dc) && - !(flags & DISPATCH_INVOKE_THREAD_BOUND)) { - dic->dic_barrier_waiter = dc; - goto out_with_barrier_waiter; - } - next_dc = _dispatch_queue_pop_head(dq, dc); - } else { - if (owned == DISPATCH_QUEUE_IN_BARRIER) { - // we just ran barrier work items, we have to make their - // effect visible to other sync work items on other threads - // that may start coming in after this point, hence the - // release barrier - os_atomic_xor2o(dq, dq_state, owned, release); - owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - } else if (unlikely(owned == 0)) { - if (_dispatch_object_is_waiter(dc)) { - // sync "readers" don't observe the limit - _dispatch_queue_reserve_sync_width(dq); - } else if (!_dispatch_queue_try_acquire_async(dq)) { - goto out_with_no_width; - } - owned = DISPATCH_QUEUE_WIDTH_INTERVAL; - } - - next_dc = _dispatch_queue_pop_head(dq, dc); - if (_dispatch_object_is_waiter(dc)) { - owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; - _dispatch_non_barrier_waiter_redirect_or_wake(dq, dc); - continue; - } - - if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) { - owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; - // This is a re-redirect, overrides have already been applied by - // _dispatch_continuation_async* - // However we want to end up on the root queue matching `dc` - // qos, so pick up the current override of `dq` which includes - // dc's override (and maybe more) - _dispatch_continuation_redirect_push(dq, dc, - _dispatch_queue_max_qos(dq)); - continue; - } - } - - _dispatch_continuation_pop_inline(dc, dic, flags, dq); - } - - if (owned == DISPATCH_QUEUE_IN_BARRIER) { - // if we're IN_BARRIER we really own the full width too - owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - } - if (dc) { - owned = _dispatch_queue_adjust_owned(dq, owned, dc); - } - *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; - *owned_ptr |= owned; - _dispatch_thread_frame_pop(&dtf); - return dc ? dq->do_targetq : NULL; - -out_with_no_width: - *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; - _dispatch_thread_frame_pop(&dtf); - return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; - -out_with_barrier_waiter: - if (unlikely(flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS)) { - DISPATCH_INTERNAL_CRASH(0, - "Deferred continuation on source, mach channel or mgr"); - } - _dispatch_thread_frame_pop(&dtf); - return dq->do_targetq; -} - -DISPATCH_NOINLINE -static dispatch_queue_wakeup_target_t -_dispatch_lane_concurrent_drain(dispatch_lane_class_t dqu, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, - uint64_t *owned) -{ - return _dispatch_lane_drain(dqu._dl, dic, flags, owned, false); -} - -DISPATCH_NOINLINE -dispatch_queue_wakeup_target_t -_dispatch_lane_serial_drain(dispatch_lane_class_t dqu, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, - uint64_t *owned) -{ - flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; - return _dispatch_lane_drain(dqu._dl, dic, flags, owned, true); -} - -void -_dispatch_queue_invoke_finish(dispatch_queue_t dq, - dispatch_invoke_context_t dic, dispatch_queue_t tq, uint64_t owned) -{ - struct dispatch_object_s *dc = dic->dic_barrier_waiter; - dispatch_qos_t qos = dic->dic_barrier_waiter_bucket; - if (dc) { - dic->dic_barrier_waiter = NULL; - dic->dic_barrier_waiter_bucket = DISPATCH_QOS_UNSPECIFIED; - owned &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; -#if DISPATCH_INTROSPECTION - dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; - dsc->dsc_from_async = true; -#endif - if (qos) { - return _dispatch_workloop_drain_barrier_waiter(upcast(dq)._dwl, - dc, qos, DISPATCH_WAKEUP_CONSUME_2, owned); - } - return _dispatch_lane_drain_barrier_waiter(upcast(dq)._dl, dc, - DISPATCH_WAKEUP_CONSUME_2, owned); - } - - uint64_t old_state, new_state, enqueued = DISPATCH_QUEUE_ENQUEUED; - if (tq == DISPATCH_QUEUE_WAKEUP_MGR) { - enqueued = DISPATCH_QUEUE_ENQUEUED_ON_MGR; - } - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state - owned; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state |= DISPATCH_QUEUE_DIRTY; - if (_dq_state_is_runnable(new_state) && - !_dq_state_is_enqueued(new_state)) { - // drain was not interupted for suspension - // we will reenqueue right away, just put ENQUEUED back - new_state |= enqueued; - } - }); - old_state -= owned; - if (_dq_state_received_override(old_state)) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_basepri_override_qos(_dq_state_max_qos(new_state)); - } - if ((old_state ^ new_state) & enqueued) { - dispatch_assert(_dq_state_is_enqueued(new_state)); - return _dispatch_queue_push_queue(tq, dq, new_state); - } - return _dispatch_release_2_tailcall(dq); -} - -void -_dispatch_lane_activate(dispatch_lane_class_t dq) -{ - dispatch_queue_t tq = dq._dl->do_targetq; - dispatch_priority_t pri = dq._dl->dq_priority; - - // Normalize priority: keep the fallback only when higher than the floor - if (_dispatch_priority_fallback_qos(pri) <= _dispatch_priority_qos(pri) || - (_dispatch_priority_qos(pri) && - !(pri & DISPATCH_PRIORITY_FLAG_FLOOR))) { - pri &= ~DISPATCH_PRIORITY_FALLBACK_QOS_MASK; - pri &= ~DISPATCH_PRIORITY_FLAG_FALLBACK; - dq._dl->dq_priority = pri; - } - tq = _dispatch_queue_priority_inherit_from_target(dq, tq); - _dispatch_lane_inherit_wlh_from_target(dq._dl, tq); -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_wakeup_target_t -_dispatch_lane_invoke2(dispatch_lane_t dq, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags, uint64_t *owned) -{ - dispatch_queue_t otq = dq->do_targetq; - dispatch_queue_t cq = _dispatch_queue_get_current(); - - if (unlikely(cq != otq)) { - return otq; - } - if (dq->dq_width == 1) { - return _dispatch_lane_serial_drain(dq, dic, flags, owned); - } - return _dispatch_lane_concurrent_drain(dq, dic, flags, owned); -} - -DISPATCH_NOINLINE -void -_dispatch_lane_invoke(dispatch_lane_t dq, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags) -{ - _dispatch_queue_class_invoke(dq, dic, flags, 0, _dispatch_lane_invoke2); -} - -#pragma mark - -#pragma mark dispatch_workloop_t - -#define _dispatch_wl(dwl, qos) os_mpsc(dwl, dwl, s[DISPATCH_QOS_BUCKET(qos)]) -#define _dispatch_workloop_looks_empty(dwl, qos) \ - os_mpsc_looks_empty(_dispatch_wl(dwl, qos)) -#define _dispatch_workloop_get_head(dwl, qos) \ - os_mpsc_get_head(_dispatch_wl(dwl, qos)) -#define _dispatch_workloop_pop_head(dwl, qos, dc) \ - os_mpsc_pop_head(_dispatch_wl(dwl, qos), dc, do_next) -#define _dispatch_workloop_push_update_tail(dwl, qos, dou) \ - os_mpsc_push_update_tail(_dispatch_wl(dwl, qos), dou, do_next) -#define _dispatch_workloop_push_update_prev(dwl, qos, prev, dou) \ - os_mpsc_push_update_prev(_dispatch_wl(dwl, qos), prev, dou, do_next) - -dispatch_workloop_t -dispatch_workloop_copy_current(void) -{ - dispatch_workloop_t dwl = _dispatch_wlh_to_workloop(_dispatch_get_wlh()); - if (likely(dwl)) { - _os_object_retain_with_resurrect(dwl->_as_os_obj); - return dwl; - } - return NULL; -} - -bool -dispatch_workloop_is_current(dispatch_workloop_t dwl) -{ - return _dispatch_get_wlh() == (dispatch_wlh_t)dwl; -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_workloop_role_bits(void) -{ -#if DISPATCH_USE_KEVENT_WORKLOOP - if (likely(_dispatch_kevent_workqueue_enabled)) { - return DISPATCH_QUEUE_ROLE_BASE_WLH; - } -#endif - return DISPATCH_QUEUE_ROLE_BASE_ANON; -} - -bool -_dispatch_workloop_should_yield_4NW(void) -{ - dispatch_workloop_t dwl = _dispatch_wlh_to_workloop(_dispatch_get_wlh()); - if (likely(dwl)) { - return _dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos; - } - return false; -} - -DISPATCH_NOINLINE -static dispatch_workloop_t -_dispatch_workloop_create(const char *label, uint64_t dq_state) -{ - dispatch_queue_flags_t dqf = DQF_AUTORELEASE_ALWAYS; - dispatch_workloop_t dwl; - - if (label) { - const char *tmp = _dispatch_strdup_if_mutable(label); - if (tmp != label) { - dqf |= DQF_LABEL_NEEDS_FREE; - label = tmp; - } - } - - dq_state |= _dispatch_workloop_role_bits(); - - dwl = _dispatch_queue_alloc(workloop, dqf, 1, dq_state)._dwl; - dwl->dq_label = label; - dwl->do_targetq = _dispatch_get_default_queue(true); - if (!(dq_state & DISPATCH_QUEUE_INACTIVE)) { - dwl->dq_priority = DISPATCH_PRIORITY_FLAG_OVERCOMMIT | - _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT); - } - _dispatch_object_debug(dwl, "%s", __func__); - return _dispatch_introspection_queue_create(dwl)._dwl; -} - -dispatch_workloop_t -dispatch_workloop_create(const char *label) -{ - return _dispatch_workloop_create(label, 0); -} - -dispatch_workloop_t -dispatch_workloop_create_inactive(const char *label) -{ - return _dispatch_workloop_create(label, DISPATCH_QUEUE_INACTIVE); -} - -void -dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t dwl, - dispatch_autorelease_frequency_t frequency) -{ - if (frequency == DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM) { - _dispatch_queue_atomic_flags_set_and_clear(dwl, - DQF_AUTORELEASE_ALWAYS, DQF_AUTORELEASE_NEVER); - } else { - _dispatch_queue_atomic_flags_set_and_clear(dwl, - DQF_AUTORELEASE_NEVER, DQF_AUTORELEASE_ALWAYS); - } - _dispatch_queue_setter_assert_inactive(dwl); -} - -DISPATCH_ALWAYS_INLINE -static void -_dispatch_workloop_attributes_dispose(dispatch_workloop_t dwl) -{ - if (dwl->dwl_attr) { - free(dwl->dwl_attr); - } -} - -DISPATCH_ALWAYS_INLINE -static bool -_dispatch_workloop_has_kernel_attributes(dispatch_workloop_t dwl) -{ - return dwl->dwl_attr && (dwl->dwl_attr->dwla_flags & - (DISPATCH_WORKLOOP_ATTR_HAS_SCHED | - DISPATCH_WORKLOOP_ATTR_HAS_POLICY | - DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT)); -} - -void -dispatch_workloop_set_scheduler_priority(dispatch_workloop_t dwl, int priority, - uint64_t flags) -{ - _dispatch_queue_setter_assert_inactive(dwl); - _dispatch_workloop_attributes_alloc_if_needed(dwl); - - if (priority) { - dwl->dwl_attr->dwla_sched.sched_priority = priority; - dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_SCHED; - } else { - dwl->dwl_attr->dwla_sched.sched_priority = 0; - dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_SCHED; - } - - if (flags & DISPATCH_WORKLOOP_FIXED_PRIORITY) { - dwl->dwl_attr->dwla_policy = POLICY_RR; - dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_POLICY; - } else { - dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; - } -} - -void -dispatch_workloop_set_qos_class_floor(dispatch_workloop_t dwl, - qos_class_t cls, int relpri, uint64_t flags) -{ - _dispatch_queue_setter_assert_inactive(dwl); - _dispatch_workloop_attributes_alloc_if_needed(dwl); - - dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); - - if (qos) { - dwl->dwl_attr->dwla_pri = _dispatch_priority_make(qos, relpri); - dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS; - } else { - dwl->dwl_attr->dwla_pri = 0; - dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS; - } - - if (flags & DISPATCH_WORKLOOP_FIXED_PRIORITY) { - dwl->dwl_attr->dwla_policy = POLICY_RR; - dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_POLICY; - } else { - dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; - } -} - -void -dispatch_workloop_set_qos_class(dispatch_workloop_t dwl, - qos_class_t cls, uint64_t flags) -{ - dispatch_workloop_set_qos_class_floor(dwl, cls, 0, flags); -} - -void -dispatch_workloop_set_cpupercent(dispatch_workloop_t dwl, uint8_t percent, - uint32_t refillms) -{ - _dispatch_queue_setter_assert_inactive(dwl); - _dispatch_workloop_attributes_alloc_if_needed(dwl); - - if ((dwl->dwl_attr->dwla_flags & (DISPATCH_WORKLOOP_ATTR_HAS_SCHED | - DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS)) == 0) { - DISPATCH_CLIENT_CRASH(0, "workloop qos class or priority must be " - "set before cpupercent"); - } - - dwl->dwl_attr->dwla_cpupercent.percent = percent; - dwl->dwl_attr->dwla_cpupercent.refillms = refillms; - dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT; -} - -#if DISPATCH_IOHID_SPI -void -_dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t dwl, - dispatch_pthread_root_queue_observer_hooks_t observer_hooks) -{ - _dispatch_queue_setter_assert_inactive(dwl); - _dispatch_workloop_attributes_alloc_if_needed(dwl); - - dwl->dwl_attr->dwla_observers = *observer_hooks; - dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS; -} -#endif - -static void -_dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, - pthread_attr_t *attr) -{ - uint64_t old_state, new_state; - dispatch_queue_global_t dprq; - - dprq = dispatch_pthread_root_queue_create( - "com.apple.libdispatch.workloop_fallback", 0, attr, NULL); - - dwl->do_targetq = dprq->_as_dq; - _dispatch_retain(dprq); - dispatch_release(dprq); - - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, relaxed, { - new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK; - new_state |= DISPATCH_QUEUE_ROLE_BASE_ANON; - }); -} - -static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { - DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), - .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, - .do_ctxt = NULL, - .dq_label = "com.apple.root.workloop-custom", - .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), - .dq_priority = _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT) | - DISPATCH_PRIORITY_SATURATED_OVERRIDE, - .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, - .dgq_thread_pool_size = 1, -}; - -static void -_dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) -{ - dispatch_workloop_attr_t dwla = dwl->dwl_attr; - pthread_attr_t attr; - - pthread_attr_init(&attr); - if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS) { - dwl->dq_priority |= dwla->dwla_pri | DISPATCH_PRIORITY_FLAG_FLOOR; - } - if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_SCHED) { - pthread_attr_setschedparam(&attr, &dwla->dwla_sched); - // _dispatch_async_and_wait_should_always_async detects when a queue - // targets a root queue that is not part of the root queues array in - // order to force async_and_wait to async. We want this path to always - // be taken on workloops that have a scheduler priority set. - dwl->do_targetq = - (dispatch_queue_t)_dispatch_custom_workloop_root_queue._as_dq; - } - if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_POLICY) { - pthread_attr_setschedpolicy(&attr, dwla->dwla_policy); - } - if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT) { - pthread_attr_setcpupercent_np(&attr, dwla->dwla_cpupercent.percent, - (unsigned long)dwla->dwla_cpupercent.refillms); - } - if (_dispatch_workloop_has_kernel_attributes(dwl)) { - int rv = _pthread_workloop_create((uint64_t)dwl, 0, &attr); - switch (rv) { - case 0: - dwla->dwla_flags |= DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY; - break; case ENOTSUP: /* simulator fallback */ _dispatch_workloop_activate_simulator_fallback(dwl, &attr); @@ -6919,7 +6326,7 @@ _dispatch_pthread_root_queue_dispose(dispatch_queue_global_t dq, DISPATCH_STATIC_GLOBAL(bool _dispatch_program_is_probably_callback_driven); -#if DISPATCH_COCOA_COMPAT +#if DISPATCH_COCOA_COMPAT || defined(_WIN32) DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_main_q_handle_pred); DISPATCH_ALWAYS_INLINE @@ -6930,6 +6337,8 @@ _dispatch_runloop_handle_is_valid(dispatch_runloop_handle_t handle) return MACH_PORT_VALID(handle); #elif defined(__linux__) return handle >= 0; +#elif defined(_WIN32) + return handle != INVALID_HANDLE_VALUE; #else #error "runloop support not implemented on this platform" #endif @@ -6944,6 +6353,8 @@ _dispatch_runloop_queue_get_handle(dispatch_lane_t dq) #elif defined(__linux__) // decode: 0 is a valid fd, so offset by 1 to distinguish from NULL return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt) - 1; +#elif defined(_WIN32) + return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt); #else #error "runloop support not implemented on this platform" #endif @@ -6959,6 +6370,8 @@ _dispatch_runloop_queue_set_handle(dispatch_lane_t dq, #elif defined(__linux__) // encode: 0 is a valid fd, so offset by 1 to distinguish from NULL dq->do_ctxt = (void *)(uintptr_t)(handle + 1); +#elif defined(_WIN32) + dq->do_ctxt = (void *)(uintptr_t)handle; #else #error "runloop support not implemented on this platform" #endif @@ -7013,6 +6426,8 @@ _dispatch_runloop_queue_handle_init(void *ctxt) } } handle = fd; +#elif defined(_WIN32) + handle = INVALID_HANDLE_VALUE; #else #error "runloop support not implemented on this platform" #endif @@ -7039,11 +6454,15 @@ _dispatch_runloop_queue_handle_dispose(dispatch_lane_t dq) #elif defined(__linux__) int rc = close(handle); (void)dispatch_assume_zero(rc); +#elif defined(_WIN32) + CloseHandle(handle); #else #error "runloop support not implemented on this platform" #endif } +#endif // DISPATCH_COCOA_COMPAT || defined(_WIN32) +#if DISPATCH_COCOA_COMPAT static inline void _dispatch_runloop_queue_class_poke(dispatch_lane_t dq) { @@ -7116,6 +6535,8 @@ _dispatch_runloop_queue_poke(dispatch_lane_t dq, dispatch_qos_t qos, } } +#if DISPATCH_COCOA_COMPAT || defined(_WIN32) + DISPATCH_ALWAYS_INLINE static inline dispatch_qos_t _dispatch_runloop_queue_reset_max_qos(dispatch_lane_t dq) @@ -7354,7 +6775,7 @@ _dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t dq) _dispatch_runloop_queue_wakeup(upcast(dq)._dl, 0, false); } -#if TARGET_OS_MAC +#if TARGET_OS_MAC || defined(_WIN32) dispatch_runloop_handle_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) { @@ -7365,10 +6786,10 @@ _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) } #endif -#endif // DISPATCH_COCOA_COMPAT +#endif // DISPATCH_COCOA_COMPAT || defined(_WIN32) #pragma mark - #pragma mark dispatch_main_queue -#if DISPATCH_COCOA_COMPAT +#if DISPATCH_COCOA_COMPAT || defined(_WIN32) dispatch_runloop_handle_t _dispatch_get_main_queue_handle_4CF(void) @@ -7402,7 +6823,7 @@ _dispatch_main_queue_callback_4CF( _dispatch_main_q.dq_side_suspend_cnt = false; } -#endif // DISPATCH_COCOA_COMPAT +#endif // DISPATCH_COCOA_COMPAT || defined(_WIN32) DISPATCH_NOINLINE void diff --git a/src/shims/generic_win_stubs.c b/src/shims/generic_win_stubs.c deleted file mode 100644 index 67b6f5134..000000000 --- a/src/shims/generic_win_stubs.c +++ /dev/null @@ -1,24 +0,0 @@ -#include "internal.h" - -/* - * This file contains stubbed out functions we are using during - * the initial Windows port. When the port is complete, this file - * should be empty (and thus removed). - */ - -void -_dispatch_runloop_queue_dispose(dispatch_queue_t dq DISPATCH_UNUSED, - bool *allow_free DISPATCH_UNUSED) -{ - WIN_PORT_ERROR(); -} - -void -_dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq DISPATCH_UNUSED) -{ - WIN_PORT_ERROR(); -} - -/* - * Stubbed out static data - */ diff --git a/src/swift/DispatchStubs.cc b/src/swift/DispatchStubs.cc index 594f66648..0625cc91f 100644 --- a/src/swift/DispatchStubs.cc +++ b/src/swift/DispatchStubs.cc @@ -13,7 +13,11 @@ #include #include +#if defined(__ELF__) || defined(__MACH__) || defined(__WASM__) #define DISPATCH_RUNTIME_STDLIB_INTERFACE __attribute__((__visibility__("default"))) +#else +#define DISPATCH_RUNTIME_STDLIB_INTERFACE __declspec(dllexport) +#endif #if USE_OBJC @protocol OS_dispatch_source; @@ -54,6 +58,7 @@ static void _dispatch_overlay_constructor() { #endif /* USE_OBJC */ #if !USE_OBJC +DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" void * objc_retainAutoreleasedReturnValue(void *obj); #endif diff --git a/src/unifdef.Md0Qny b/src/unifdef.Md0Qny new file mode 100644 index 000000000..0c9fcddba --- /dev/null +++ b/src/unifdef.Md0Qny @@ -0,0 +1,7981 @@ +/* + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#if HAVE_MACH +#include "protocol.h" // _dispatch_send_wakeup_runloop_thread +#endif + +static inline void _dispatch_root_queues_init(void); +static void _dispatch_lane_barrier_complete(dispatch_lane_class_t dqu, + dispatch_qos_t qos, dispatch_wakeup_flags_t flags); +static void _dispatch_lane_non_barrier_complete(dispatch_lane_t dq, + dispatch_wakeup_flags_t flags); +#if HAVE_PTHREAD_WORKQUEUE_QOS +static inline void _dispatch_queue_wakeup_with_override( + dispatch_queue_class_t dq, uint64_t dq_state, + dispatch_wakeup_flags_t flags); +#endif +static void _dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, + struct dispatch_object_s *dc, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags, uint64_t owned); +static inline bool +_dispatch_async_and_wait_should_always_async(dispatch_queue_class_t dqu, + uint64_t dq_state); + +#pragma mark - +#pragma mark dispatch_assert_queue + +DISPATCH_NOINLINE DISPATCH_NORETURN +static void +_dispatch_assert_queue_fail(dispatch_queue_t dq, bool expected) +{ + _dispatch_client_assert_fail( + "Block was %sexpected to execute on queue [%s]", + expected ? "" : "not ", dq->dq_label ?: ""); +} + +DISPATCH_NOINLINE DISPATCH_NORETURN +static void +_dispatch_assert_queue_barrier_fail(dispatch_queue_t dq) +{ + _dispatch_client_assert_fail( + "Block was expected to act as a barrier on queue [%s]", + dq->dq_label ?: ""); +} + +void +dispatch_assert_queue(dispatch_queue_t dq) +{ + unsigned long metatype = dx_metatype(dq); + if (unlikely(metatype != _DISPATCH_LANE_TYPE && + metatype != _DISPATCH_WORKLOOP_TYPE)) { + DISPATCH_CLIENT_CRASH(metatype, "invalid queue passed to " + "dispatch_assert_queue()"); + } + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (likely(_dq_state_drain_locked_by_self(dq_state))) { + return; + } + if (likely(_dispatch_thread_frame_find_queue(dq))) { + return; + } + _dispatch_assert_queue_fail(dq, true); +} + +void +dispatch_assert_queue_not(dispatch_queue_t dq) +{ + unsigned long metatype = dx_metatype(dq); + if (unlikely(metatype != _DISPATCH_LANE_TYPE && + metatype != _DISPATCH_WORKLOOP_TYPE)) { + DISPATCH_CLIENT_CRASH(metatype, "invalid queue passed to " + "dispatch_assert_queue_not()"); + } + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dq_state_drain_locked_by_self(dq_state))) { + _dispatch_assert_queue_fail(dq, false); + } + if (unlikely(_dispatch_thread_frame_find_queue(dq))) { + _dispatch_assert_queue_fail(dq, false); + } +} + +void +dispatch_assert_queue_barrier(dispatch_queue_t dq) +{ + dispatch_assert_queue(dq); + + if (likely(dq->dq_width == 1)) { + return; + } + + if (likely(dq->do_targetq)) { + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (likely(_dq_state_is_in_barrier(dq_state))) { + return; + } + } + + _dispatch_assert_queue_barrier_fail(dq); +} + +#pragma mark - +#pragma mark _dispatch_set_priority_and_mach_voucher +#if HAVE_PTHREAD_WORKQUEUE_QOS + +DISPATCH_NOINLINE +void +_dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, + mach_voucher_t kv) +{ + _pthread_set_flags_t pflags = (_pthread_set_flags_t)0; + if (pp && _dispatch_set_qos_class_enabled) { + pthread_priority_t old_pri = _dispatch_get_priority(); + if (pp != old_pri) { + if (old_pri & _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG) { + pflags |= _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND; + // when we unbind, overcomitness can flip, so we need to learn + // it from the defaultpri, see _dispatch_priority_compute_update + pp |= (_dispatch_get_basepri() & + DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + } else { + // else we need to keep the one that is set in the current pri + pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + } + if (likely(old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { + pflags |= _PTHREAD_SET_SELF_QOS_FLAG; + } + uint64_t mgr_dq_state = + os_atomic_load2o(&_dispatch_mgr_q, dq_state, relaxed); + if (unlikely(_dq_state_drain_locked_by_self(mgr_dq_state))) { + DISPATCH_INTERNAL_CRASH(pp, + "Changing the QoS while on the manager queue"); + } + if (unlikely(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { + DISPATCH_INTERNAL_CRASH(pp, "Cannot raise oneself to manager"); + } + if (old_pri & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) { + DISPATCH_INTERNAL_CRASH(old_pri, + "Cannot turn a manager thread into a normal one"); + } + } + } + if (kv != VOUCHER_NO_MACH_VOUCHER) { +#if VOUCHER_USE_MACH_VOUCHER + pflags |= _PTHREAD_SET_SELF_VOUCHER_FLAG; +#endif + } + if (!pflags) return; + int r = _pthread_set_properties_self(pflags, pp, kv); + if (r == EINVAL) { + DISPATCH_INTERNAL_CRASH(pp, "_pthread_set_properties_self failed"); + } + (void)dispatch_assume_zero(r); +} + +DISPATCH_NOINLINE +voucher_t +_dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, + voucher_t v, dispatch_thread_set_self_t flags) +{ + voucher_t ov = DISPATCH_NO_VOUCHER; + mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER; + if (v != DISPATCH_NO_VOUCHER) { + bool retained = flags & DISPATCH_VOUCHER_CONSUME; + ov = _voucher_get(); + if (ov == v && (flags & DISPATCH_VOUCHER_REPLACE)) { + if (retained && v) _voucher_release_no_dispose(v); + ov = DISPATCH_NO_VOUCHER; + } else { + if (!retained && v) _voucher_retain(v); + kv = _voucher_swap_and_get_mach_voucher(ov, v); + } + } + if (!(flags & DISPATCH_THREAD_PARK)) { + _dispatch_set_priority_and_mach_voucher_slow(priority, kv); + } + if (ov != DISPATCH_NO_VOUCHER && (flags & DISPATCH_VOUCHER_REPLACE)) { + if (ov) _voucher_release(ov); + ov = DISPATCH_NO_VOUCHER; + } + return ov; +} +#endif +#pragma mark - +#pragma mark dispatch_continuation_t + +static void _dispatch_async_redirect_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +static void _dispatch_queue_override_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +static void _dispatch_workloop_stealer_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); + +const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { + DC_VTABLE_ENTRY(ASYNC_REDIRECT, + .do_invoke = _dispatch_async_redirect_invoke), +#if HAVE_MACH + DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN, + .do_invoke = _dispatch_mach_send_barrier_drain_invoke), + DC_VTABLE_ENTRY(MACH_SEND_BARRIER, + .do_invoke = _dispatch_mach_barrier_invoke), + DC_VTABLE_ENTRY(MACH_RECV_BARRIER, + .do_invoke = _dispatch_mach_barrier_invoke), + DC_VTABLE_ENTRY(MACH_ASYNC_REPLY, + .do_invoke = _dispatch_mach_msg_async_reply_invoke), +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS + DC_VTABLE_ENTRY(WORKLOOP_STEALING, + .do_invoke = _dispatch_workloop_stealer_invoke), + DC_VTABLE_ENTRY(OVERRIDE_STEALING, + .do_invoke = _dispatch_queue_override_invoke), + DC_VTABLE_ENTRY(OVERRIDE_OWNING, + .do_invoke = _dispatch_queue_override_invoke), +#endif +#if HAVE_MACH + DC_VTABLE_ENTRY(MACH_IPC_HANDOFF, + .do_invoke = _dispatch_mach_ipc_handoff_invoke), +#endif +}; + +DISPATCH_NOINLINE +static void DISPATCH_TSD_DTOR_CC +_dispatch_cache_cleanup(void *value) +{ + dispatch_continuation_t dc, next_dc = value; + + while ((dc = next_dc)) { + next_dc = dc->do_next; + _dispatch_continuation_free_to_heap(dc); + } +} + +static void +_dispatch_force_cache_cleanup(void) +{ + dispatch_continuation_t dc; + dc = _dispatch_thread_getspecific(dispatch_cache_key); + if (dc) { + _dispatch_thread_setspecific(dispatch_cache_key, NULL); + _dispatch_cache_cleanup(dc); + } +} + +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE +DISPATCH_NOINLINE +void +_dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) +{ + _dispatch_continuation_free_to_heap(dc); + dispatch_continuation_t next_dc; + dc = _dispatch_thread_getspecific(dispatch_cache_key); + int cnt; + if (!dc || (cnt = dc->dc_cache_cnt - + _dispatch_continuation_cache_limit) <= 0) { + return; + } + do { + next_dc = dc->do_next; + _dispatch_continuation_free_to_heap(dc); + } while (--cnt && (dc = next_dc)); + _dispatch_thread_setspecific(dispatch_cache_key, next_dc); +} +#endif + +DISPATCH_NOINLINE +void +_dispatch_continuation_pop(dispatch_object_t dou, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, dispatch_queue_class_t dqu) +{ + _dispatch_continuation_pop_inline(dou, dic, flags, dqu._dq); +} + +#pragma mark - +#pragma mark dispatch_block_create + +#if __BLOCKS__ + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_block_flags_valid(dispatch_block_flags_t flags) +{ + return ((flags & ~DISPATCH_BLOCK_API_MASK) == 0); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_block_flags_t +_dispatch_block_normalize_flags(dispatch_block_flags_t flags) +{ + if (flags & (DISPATCH_BLOCK_NO_QOS_CLASS|DISPATCH_BLOCK_DETACHED)) { + flags |= DISPATCH_BLOCK_HAS_PRIORITY; + } + if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) { + flags &= ~(dispatch_block_flags_t)DISPATCH_BLOCK_INHERIT_QOS_CLASS; + } + return flags; +} + +static inline dispatch_block_t +_dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, + voucher_t voucher, pthread_priority_t pri, dispatch_block_t block) +{ + dispatch_block_flags_t unmodified_flags = flags; + pthread_priority_t unmodified_pri = pri; + + flags = _dispatch_block_normalize_flags(flags); + bool assign = (flags & DISPATCH_BLOCK_ASSIGN_CURRENT); + + if (!(flags & DISPATCH_BLOCK_HAS_VOUCHER)) { + if (flags & DISPATCH_BLOCK_DETACHED) { + voucher = VOUCHER_NULL; + flags |= DISPATCH_BLOCK_HAS_VOUCHER; + } else if (flags & DISPATCH_BLOCK_NO_VOUCHER) { + voucher = DISPATCH_NO_VOUCHER; + flags |= DISPATCH_BLOCK_HAS_VOUCHER; + } else if (assign) { +#if OS_VOUCHER_ACTIVITY_SPI + voucher = VOUCHER_CURRENT; +#endif + flags |= DISPATCH_BLOCK_HAS_VOUCHER; + } + } +#if OS_VOUCHER_ACTIVITY_SPI + if (voucher == VOUCHER_CURRENT) { + voucher = _voucher_get(); + } +#endif + if (assign && !(flags & DISPATCH_BLOCK_HAS_PRIORITY)) { + pri = _dispatch_priority_propagate(); + flags |= DISPATCH_BLOCK_HAS_PRIORITY; + } + dispatch_block_t db = _dispatch_block_create(flags, voucher, pri, block); + +#if DISPATCH_DEBUG + dispatch_assert(_dispatch_block_get_data(db)); +#endif + + _dispatch_trace_block_create_with_voucher_and_priority(db, + _dispatch_Block_invoke(block), unmodified_flags, + ((unmodified_flags & DISPATCH_BLOCK_HAS_PRIORITY) ? unmodified_pri : + (unsigned long)UINT32_MAX), + _dispatch_get_priority(), pri); + return db; +} + +dispatch_block_t +dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block) +{ + if (!_dispatch_block_flags_valid(flags)) return DISPATCH_BAD_INPUT; + return _dispatch_block_create_with_voucher_and_priority(flags, NULL, 0, + block); +} + +dispatch_block_t +dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, + dispatch_qos_class_t qos_class, int relative_priority, + dispatch_block_t block) +{ + if (!_dispatch_block_flags_valid(flags) || + !_dispatch_qos_class_valid(qos_class, relative_priority)) { + return DISPATCH_BAD_INPUT; + } + flags |= DISPATCH_BLOCK_HAS_PRIORITY; + pthread_priority_t pri = 0; +#if HAVE_PTHREAD_WORKQUEUE_QOS + pri = _pthread_qos_class_encode(qos_class, relative_priority, 0); +#endif + return _dispatch_block_create_with_voucher_and_priority(flags, NULL, + pri, block); +} + +dispatch_block_t +dispatch_block_create_with_voucher(dispatch_block_flags_t flags, + voucher_t voucher, dispatch_block_t block) +{ + if (!_dispatch_block_flags_valid(flags)) return DISPATCH_BAD_INPUT; + flags |= DISPATCH_BLOCK_HAS_VOUCHER; + flags &= ~DISPATCH_BLOCK_NO_VOUCHER; + return _dispatch_block_create_with_voucher_and_priority(flags, voucher, 0, + block); +} + +dispatch_block_t +dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, + voucher_t voucher, dispatch_qos_class_t qos_class, + int relative_priority, dispatch_block_t block) +{ + if (!_dispatch_block_flags_valid(flags) || + !_dispatch_qos_class_valid(qos_class, relative_priority)) { + return DISPATCH_BAD_INPUT; + } + flags |= (DISPATCH_BLOCK_HAS_VOUCHER|DISPATCH_BLOCK_HAS_PRIORITY); + flags &= ~(DISPATCH_BLOCK_NO_VOUCHER|DISPATCH_BLOCK_NO_QOS_CLASS); + pthread_priority_t pri = 0; +#if HAVE_PTHREAD_WORKQUEUE_QOS + pri = _pthread_qos_class_encode(qos_class, relative_priority, 0); +#endif + return _dispatch_block_create_with_voucher_and_priority(flags, voucher, + pri, block); +} + +void +dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block) +{ + if (!_dispatch_block_flags_valid(flags)) { + DISPATCH_CLIENT_CRASH(flags, "Invalid flags passed to " + "dispatch_block_perform()"); + } + flags = _dispatch_block_normalize_flags(flags); + + voucher_t voucher = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_DETACHED) { + voucher = VOUCHER_NULL; + flags |= DISPATCH_BLOCK_HAS_VOUCHER; + } + + struct dispatch_block_private_data_s dbpds = + DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block, voucher); + return _dispatch_block_invoke_direct(&dbpds); +} + +void +_dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd) +{ + dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)dbcpd; + dispatch_block_flags_t flags = dbpd->dbpd_flags; + unsigned int atomic_flags = dbpd->dbpd_atomic_flags; + if (unlikely(atomic_flags & DBF_WAITED)) { + DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " + "run more than once and waited for"); + } + if (atomic_flags & DBF_CANCELED) goto out; + + pthread_priority_t op = 0, p = 0; + op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority); + if (op) { + p = dbpd->dbpd_priority; + } + voucher_t ov, v = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { + v = dbpd->dbpd_voucher; + } + ov = _dispatch_set_priority_and_voucher(p, v, 0); + dbpd->dbpd_thread = _dispatch_tid_self(); + _dispatch_client_callout(dbpd->dbpd_block, + _dispatch_Block_invoke(dbpd->dbpd_block)); + _dispatch_reset_priority_and_voucher(op, ov); +out: + if ((atomic_flags & DBF_PERFORM) == 0) { + if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { + dispatch_group_leave(dbpd->dbpd_group); + } + } +} + +void +_dispatch_block_sync_invoke(void *block) +{ + dispatch_block_t b = block; + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); + dispatch_block_flags_t flags = dbpd->dbpd_flags; + unsigned int atomic_flags = dbpd->dbpd_atomic_flags; + if (unlikely(atomic_flags & DBF_WAITED)) { + DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " + "run more than once and waited for"); + } + if (atomic_flags & DBF_CANCELED) goto out; + + voucher_t ov = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { + ov = _dispatch_adopt_priority_and_set_voucher(0, dbpd->dbpd_voucher, 0); + } + dbpd->dbpd_block(); + _dispatch_reset_voucher(ov, 0); +out: + if ((atomic_flags & DBF_PERFORM) == 0) { + if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { + dispatch_group_leave(dbpd->dbpd_group); + } + } + + dispatch_queue_t boost_dq; + boost_dq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + if (boost_dq) { + // balances dispatch_{,barrier_,}sync + _dispatch_release_2(boost_dq); + } +} + +#define DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE 0x1 + +DISPATCH_NOINLINE +static void +_dispatch_block_async_invoke2(dispatch_block_t b, unsigned long invoke_flags) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); + unsigned int atomic_flags = dbpd->dbpd_atomic_flags; + if (unlikely(atomic_flags & DBF_WAITED)) { + DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " + "run more than once and waited for"); + } + + if (likely(!(atomic_flags & DBF_CANCELED))) { + dbpd->dbpd_block(); + } + if ((atomic_flags & DBF_PERFORM) == 0) { + if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { + dispatch_group_leave(dbpd->dbpd_group); + } + } + + dispatch_queue_t boost_dq; + boost_dq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + if (boost_dq) { + // balances dispatch_{,barrier_,group_}async + _dispatch_release_2(boost_dq); + } + + if (invoke_flags & DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE) { + Block_release(b); + } +} + +static void +_dispatch_block_async_invoke(void *block) +{ + _dispatch_block_async_invoke2(block, 0); +} + +static void +_dispatch_block_async_invoke_and_release(void *block) +{ + _dispatch_block_async_invoke2(block, DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE); +} + +void +dispatch_block_cancel(dispatch_block_t db) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + if (unlikely(!dbpd)) { + DISPATCH_CLIENT_CRASH(0, "Invalid block object passed to " + "dispatch_block_cancel()"); + } + (void)os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed); +} + +intptr_t +dispatch_block_testcancel(dispatch_block_t db) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + if (unlikely(!dbpd)) { + DISPATCH_CLIENT_CRASH(0, "Invalid block object passed to " + "dispatch_block_testcancel()"); + } + return (bool)(dbpd->dbpd_atomic_flags & DBF_CANCELED); +} + +intptr_t +dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + if (unlikely(!dbpd)) { + DISPATCH_CLIENT_CRASH(0, "Invalid block object passed to " + "dispatch_block_wait()"); + } + + unsigned int flags = os_atomic_or_orig2o(dbpd, dbpd_atomic_flags, + DBF_WAITING, relaxed); + if (unlikely(flags & (DBF_WAITED | DBF_WAITING))) { + DISPATCH_CLIENT_CRASH(flags, "A block object may not be waited for " + "more than once"); + } + + // If we know the queue where this block is + // enqueued, or the thread that's executing it, then we should boost + // it here. + + pthread_priority_t pp = _dispatch_get_priority(); + + dispatch_queue_t boost_dq; + boost_dq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + if (boost_dq) { + // release balances dispatch_{,barrier_,group_}async. + // Can't put the queue back in the timeout case: the block might + // finish after we fell out of group_wait and see our NULL, so + // neither of us would ever release. Side effect: After a _wait + // that times out, subsequent waits will not boost the qos of the + // still-running block. + dx_wakeup(boost_dq, _dispatch_qos_from_pp(pp), + DISPATCH_WAKEUP_BLOCK_WAIT | DISPATCH_WAKEUP_CONSUME_2); + } + + mach_port_t boost_th = dbpd->dbpd_thread; + if (boost_th) { + _dispatch_thread_override_start(boost_th, pp, dbpd); + } + + int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); + if (unlikely(performed > 1 || (boost_th && boost_dq))) { + DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " + "run more than once and waited for"); + } + + intptr_t ret = dispatch_group_wait(dbpd->dbpd_group, timeout); + + if (boost_th) { + _dispatch_thread_override_end(boost_th, dbpd); + } + + if (ret) { + // timed out: reverse our changes + os_atomic_and2o(dbpd, dbpd_atomic_flags, ~DBF_WAITING, relaxed); + } else { + os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_WAITED, relaxed); + // don't need to re-test here: the second call would see + // the first call's WAITING + } + + return ret; +} + +void +dispatch_block_notify(dispatch_block_t db, dispatch_queue_t queue, + dispatch_block_t notification_block) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + if (!dbpd) { + DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " + "dispatch_block_notify()"); + } + int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); + if (unlikely(performed > 1)) { + DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " + "run more than once and observed"); + } + + return dispatch_group_notify(dbpd->dbpd_group, queue, notification_block); +} + +DISPATCH_NOINLINE +dispatch_qos_t +_dispatch_continuation_init_slow(dispatch_continuation_t dc, + dispatch_queue_t dq, dispatch_block_flags_t flags) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(dc->dc_ctxt); + dispatch_block_flags_t block_flags = dbpd->dbpd_flags; + uintptr_t dc_flags = dc->dc_flags; + pthread_priority_t pp = 0; + + // balanced in d_block_async_invoke_and_release or d_block_wait + if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { + _dispatch_retain_2(dq); + } + + if (dc_flags & DC_FLAG_CONSUME) { + dc->dc_func = _dispatch_block_async_invoke_and_release; + } else { + dc->dc_func = _dispatch_block_async_invoke; + } + + flags |= block_flags; + if (block_flags & DISPATCH_BLOCK_HAS_PRIORITY) { + pp = dbpd->dbpd_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + } else if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { + // _dispatch_source_handler_alloc is calling is and doesn't want us + // to propagate priorities + pp = 0; + } else { + pp = _dispatch_priority_propagate(); + } + _dispatch_continuation_priority_set(dc, dq, pp, flags); + if (block_flags & DISPATCH_BLOCK_BARRIER) { + dc_flags |= DC_FLAG_BARRIER; + } + if (block_flags & DISPATCH_BLOCK_HAS_VOUCHER) { + voucher_t v = dbpd->dbpd_voucher; + dc->dc_voucher = (v && v != DISPATCH_NO_VOUCHER) ? _voucher_retain(v) + : v; + _dispatch_voucher_debug("continuation[%p] set", dc->dc_voucher, dc); + _dispatch_voucher_ktrace_dc_push(dc); + } else { + _dispatch_continuation_voucher_set(dc, flags); + } + dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA; + dc->dc_flags = dc_flags; + return _dispatch_qos_from_pp(dc->dc_priority); +} + +#endif // __BLOCKS__ +#pragma mark - +#pragma mark dispatch_barrier_async + +DISPATCH_NOINLINE +static void +_dispatch_async_f_slow(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, dispatch_block_flags_t flags, + uintptr_t dc_flags) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); + dispatch_qos_t qos; + + qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, flags, dc_flags); + _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); +} + +DISPATCH_NOINLINE +void +dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_BARRIER; + dispatch_qos_t qos; + + if (likely(!dc)) { + return _dispatch_async_f_slow(dq, ctxt, func, 0, dc_flags); + } + + qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, 0, dc_flags); + _dispatch_continuation_async(dq, dc, qos, dc_flags); +} + +DISPATCH_NOINLINE +void +_dispatch_barrier_async_detached_f(dispatch_queue_class_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->dc_flags = DC_FLAG_CONSUME | DC_FLAG_BARRIER | DC_FLAG_ALLOCATED; + dc->dc_func = func; + dc->dc_ctxt = ctxt; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + _dispatch_trace_item_push(dq, dc); + dx_push(dq._dq, dc, 0); +} + +#ifdef __BLOCKS__ +void +dispatch_barrier_async(dispatch_queue_t dq, dispatch_block_t work) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_BARRIER; + dispatch_qos_t qos; + + qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags); + _dispatch_continuation_async(dq, dc, qos, dc_flags); +} +#endif + +#pragma mark - +#pragma mark dispatch_async + +void +_dispatch_async_redirect_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) +{ + dispatch_thread_frame_s dtf; + struct dispatch_continuation_s *other_dc = dc->dc_other; + dispatch_invoke_flags_t ctxt_flags = (dispatch_invoke_flags_t)dc->dc_ctxt; + // if we went through _dispatch_root_queue_push_override, + // the "right" root queue was stuffed into dc_func + dispatch_queue_global_t assumed_rq = (dispatch_queue_global_t)dc->dc_func; + dispatch_lane_t dq = dc->dc_data; + dispatch_queue_t rq, old_dq; + dispatch_priority_t old_dbp; + + if (ctxt_flags) { + flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK; + flags |= ctxt_flags; + } + old_dq = _dispatch_queue_get_current(); + if (assumed_rq) { + old_dbp = _dispatch_root_queue_identity_assume(assumed_rq); + _dispatch_set_basepri(dq->dq_priority); + } else { + old_dbp = _dispatch_set_basepri(dq->dq_priority); + } + + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_NO_INTROSPECTION; + _dispatch_thread_frame_push(&dtf, dq); + _dispatch_continuation_pop_forwarded(dc, dc_flags, NULL, { + _dispatch_continuation_pop(other_dc, dic, flags, dq); + }); + _dispatch_thread_frame_pop(&dtf); + if (assumed_rq) _dispatch_queue_set_current(old_dq); + _dispatch_reset_basepri(old_dbp); + + rq = dq->do_targetq; + while (unlikely(rq->do_targetq && rq != old_dq)) { + _dispatch_lane_non_barrier_complete(upcast(rq)._dl, 0); + rq = rq->do_targetq; + } + + // pairs with _dispatch_async_redirect_wrap + _dispatch_lane_non_barrier_complete(dq, DISPATCH_WAKEUP_CONSUME_2); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_async_redirect_wrap(dispatch_lane_t dq, dispatch_object_t dou) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + + dou._do->do_next = NULL; + dc->do_vtable = DC_VTABLE(ASYNC_REDIRECT); + dc->dc_func = NULL; + dc->dc_ctxt = (void *)(uintptr_t)_dispatch_queue_autorelease_frequency(dq); + dc->dc_data = dq; + dc->dc_other = dou._do; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + _dispatch_retain_2(dq); // released in _dispatch_async_redirect_invoke + return dc; +} + +DISPATCH_NOINLINE +static void +_dispatch_continuation_redirect_push(dispatch_lane_t dl, + dispatch_object_t dou, dispatch_qos_t qos) +{ + if (likely(!_dispatch_object_is_redirection(dou))) { + dou._dc = _dispatch_async_redirect_wrap(dl, dou); + } else if (!dou._dc->dc_ctxt) { + // find first queue in descending target queue order that has + // an autorelease frequency set, and use that as the frequency for + // this continuation. + dou._dc->dc_ctxt = (void *) + (uintptr_t)_dispatch_queue_autorelease_frequency(dl); + } + + dispatch_queue_t dq = dl->do_targetq; + if (!qos) qos = _dispatch_priority_qos(dq->dq_priority); + dx_push(dq, dou, qos); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, + dispatch_block_flags_t flags) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); + uintptr_t dc_flags = DC_FLAG_CONSUME; + dispatch_qos_t qos; + + if (unlikely(!dc)) { + return _dispatch_async_f_slow(dq, ctxt, func, flags, dc_flags); + } + + qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, flags, dc_flags); + _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); +} + +DISPATCH_NOINLINE +void +dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +{ + _dispatch_async_f(dq, ctxt, func, 0); +} + +DISPATCH_NOINLINE +void +dispatch_async_enforce_qos_class_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_async_f(dq, ctxt, func, DISPATCH_BLOCK_ENFORCE_QOS_CLASS); +} + +#ifdef __BLOCKS__ +void +dispatch_async(dispatch_queue_t dq, dispatch_block_t work) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DC_FLAG_CONSUME; + dispatch_qos_t qos; + + qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags); + _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); +} +#endif + +#pragma mark - +#pragma mark _dispatch_sync_invoke / _dispatch_sync_complete + +DISPATCH_ALWAYS_INLINE +static uint64_t +_dispatch_lane_non_barrier_complete_try_lock(dispatch_lane_t dq, + uint64_t old_state, uint64_t new_state, uint64_t owner_self) +{ + uint64_t full_width = new_state; + if (_dq_state_has_pending_barrier(new_state)) { + full_width -= DISPATCH_QUEUE_PENDING_BARRIER; + full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } else { + full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } + if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + new_state = full_width; + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state |= owner_self; + } else if (_dq_state_is_dirty(old_state)) { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } + return new_state; +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_lane_non_barrier_complete_finish(dispatch_lane_t dq, + dispatch_wakeup_flags_t flags, uint64_t old_state, uint64_t new_state) +{ + if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); + } + + if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + if (_dq_state_is_dirty(old_state)) { + // + // dependency ordering for dq state changes that were flushed + // and not acted upon + os_atomic_thread_fence(dependency); + dq = os_atomic_inject_dependency(dq, (unsigned long)old_state); + } + return _dispatch_lane_barrier_complete(dq, 0, flags); + } + + if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { + if (!(flags & DISPATCH_WAKEUP_CONSUME_2)) { + _dispatch_retain_2(dq); + } + dispatch_assert(!_dq_state_is_base_wlh(new_state)); + _dispatch_trace_item_push(dq->do_targetq, dq); + return dx_push(dq->do_targetq, dq, _dq_state_max_qos(new_state)); + } + + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + _dispatch_release_2_tailcall(dq); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_lane_non_barrier_complete(dispatch_lane_t dq, + dispatch_wakeup_flags_t flags) +{ + uint64_t old_state, new_state, owner_self = _dispatch_lock_value_for_self(); + + // see _dispatch_lane_resume() + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL; + if (unlikely(_dq_state_drain_locked(old_state))) { + // make drain_try_unlock() fail and reconsider whether there's + // enough width now for a new item + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (likely(_dq_state_is_runnable(new_state))) { + new_state = _dispatch_lane_non_barrier_complete_try_lock(dq, + old_state, new_state, owner_self); + } + }); + + _dispatch_lane_non_barrier_complete_finish(dq, flags, old_state, new_state); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_sync_function_invoke_inline(dispatch_queue_class_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_thread_frame_s dtf; + _dispatch_thread_frame_push(&dtf, dq); + _dispatch_client_callout(ctxt, func); + _dispatch_perfmon_workitem_inc(); + _dispatch_thread_frame_pop(&dtf); +} + +DISPATCH_NOINLINE +static void +_dispatch_sync_function_invoke(dispatch_queue_class_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_sync_function_invoke_inline(dq, ctxt, func); +} + +DISPATCH_NOINLINE +static void +_dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq, + uintptr_t dc_flags) +{ + bool barrier = (dc_flags & DC_FLAG_BARRIER); + do { + if (dq == stop_dq) return; + if (barrier) { + dx_wakeup(dq, 0, DISPATCH_WAKEUP_BARRIER_COMPLETE); + } else { + _dispatch_lane_non_barrier_complete(upcast(dq)._dl, 0); + } + dq = dq->do_targetq; + barrier = (dq->dq_width == 1); + } while (unlikely(dq->do_targetq)); +} + +DISPATCH_NOINLINE +static void +_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_class_t dq, + void *ctxt, dispatch_function_t func, uintptr_t dc_flags + DISPATCH_TRACE_ARG(void *dc)) +{ + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_trace_item_complete(dc); + _dispatch_sync_complete_recurse(dq._dq, NULL, dc_flags); +} + +DISPATCH_NOINLINE +static void +_dispatch_sync_invoke_and_complete(dispatch_lane_t dq, void *ctxt, + dispatch_function_t func DISPATCH_TRACE_ARG(void *dc)) +{ + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_trace_item_complete(dc); + _dispatch_lane_non_barrier_complete(dq, 0); +} + +/* + * For queues we can cheat and inline the unlock code, which is invalid + * for objects with a more complex state machine (sources or mach channels) + */ +DISPATCH_NOINLINE +static void +_dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq, + void *ctxt, dispatch_function_t func DISPATCH_TRACE_ARG(void *dc)) +{ + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_trace_item_complete(dc); + if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) { + return _dispatch_lane_barrier_complete(dq, 0, 0); + } + + // Presence of any of these bits requires more work that only + // _dispatch_*_barrier_complete() handles properly + // + // Note: testing for RECEIVED_OVERRIDE or RECEIVED_SYNC_WAIT without + // checking the role is sloppy, but is a super fast check, and neither of + // these bits should be set if the lock was never contended/discovered. + const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK | + DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY | + DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER | + DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; + uint64_t old_state, new_state; + + // similar to _dispatch_queue_drain_try_unlock + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + if (unlikely(old_state & fail_unlock_mask)) { + os_atomic_rmw_loop_give_up({ + return _dispatch_lane_barrier_complete(dq, 0, 0); + }); + } + }); + if (_dq_state_is_base_wlh(old_state)) { + _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); + } +} + +#pragma mark - +#pragma mark _dispatch_sync_wait / _dispatch_sync_waiter_wake + +DISPATCH_NOINLINE +static void +_dispatch_waiter_wake_wlh_anon(dispatch_sync_context_t dsc) +{ + if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) { + _dispatch_wqthread_override_start(dsc->dsc_waiter, + dsc->dsc_override_qos); + } + _dispatch_thread_event_signal(&dsc->dsc_event); +} + +DISPATCH_NOINLINE +static void +_dispatch_waiter_wake(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, + uint64_t old_state, uint64_t new_state) +{ + dispatch_wlh_t waiter_wlh = dsc->dc_data; + +#if DISPATCH_USE_KEVENT_WORKLOOP + // + // We need to interact with a workloop if any of the following 3 cases: + // 1. the current owner of the lock has a SYNC_WAIT knote to destroy + // 2. the next owner of the lock is a workloop, we need to make sure it has + // a SYNC_WAIT knote to destroy when it will later release the lock + // 3. the waiter is waiting on a workloop (which may be different from `wlh` + // if the hierarchy was mutated after the next owner started waiting) + // + // However, note that even when (2) is true, the next owner may be waiting + // without pushing (waiter_wlh == DISPATCH_WLH_ANON), in which case the next + // owner is really woken up when the thread event is signaled. + // +#endif + if (_dq_state_in_sync_transfer(old_state) || + _dq_state_in_sync_transfer(new_state) || + (waiter_wlh != DISPATCH_WLH_ANON)) { + _dispatch_event_loop_wake_owner(dsc, wlh, old_state, new_state); + } + if (unlikely(waiter_wlh == DISPATCH_WLH_ANON)) { + _dispatch_waiter_wake_wlh_anon(dsc); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_async_waiter_update(dispatch_sync_context_t dsc, + dispatch_queue_class_t dqu) +{ + dispatch_queue_t dq = dqu._dq; + dispatch_priority_t p = dq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; + if (p) { + pthread_priority_t pp = _dispatch_priority_to_pp_strip_flags(p); + if (pp > (dsc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { + dsc->dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG; + } + } + + if (dsc->dsc_autorelease == 0) { + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dqu); + dqf &= (dispatch_queue_flags_t)_DQF_AUTORELEASE_MASK; + dsc->dsc_autorelease = (uint8_t)(dqf / DQF_AUTORELEASE_ALWAYS); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_non_barrier_waiter_redirect_or_wake(dispatch_lane_t dq, + dispatch_object_t dou) +{ + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dou._dc; + uint64_t old_state; + + dispatch_assert(!(dsc->dc_flags & DC_FLAG_BARRIER)); + +again: + old_state = os_atomic_load2o(dq, dq_state, relaxed); + + if (dsc->dsc_override_qos < _dq_state_max_qos(old_state)) { + dsc->dsc_override_qos = (uint8_t)_dq_state_max_qos(old_state); + } + + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + _dispatch_async_waiter_update(dsc, dq); + } + + if (unlikely(_dq_state_is_inner_queue(old_state))) { + dispatch_queue_t tq = dq->do_targetq; + if (likely(tq->dq_width == 1)) { + dsc->dc_flags |= DC_FLAG_BARRIER; + } else { + dsc->dc_flags &= ~DC_FLAG_BARRIER; + if (_dispatch_queue_try_reserve_sync_width(upcast(tq)._dl)) { + dq = upcast(tq)._dl; + goto again; + } + } + return dx_push(tq, dsc, 0); + } + + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects + // dc_other to be the bottom queue of the graph + dsc->dc_other = dq; + } + return _dispatch_waiter_wake_wlh_anon(dsc); +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_waiter_redirect_or_wake(dispatch_queue_class_t dqu, + dispatch_object_t dc, dispatch_wakeup_flags_t flags, + uint64_t old_state, uint64_t new_state) +{ + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc._dc; + dispatch_queue_t dq = dqu._dq; + dispatch_wlh_t wlh = DISPATCH_WLH_ANON; + + if (dsc->dc_data == DISPATCH_WLH_ANON) { + if (dsc->dsc_override_qos < _dq_state_max_qos(old_state)) { + dsc->dsc_override_qos = (uint8_t)_dq_state_max_qos(old_state); + } + } + + if (_dq_state_is_base_wlh(old_state)) { + wlh = (dispatch_wlh_t)dq; + } else if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); + } + + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + if (_dq_state_is_base_wlh(old_state) && + _dq_state_is_enqueued_on_target(new_state)) { + // If the thread request still exists, we need to leave it a +1 + _dispatch_release_no_dispose(dq); + } else { + _dispatch_release_2_no_dispose(dq); + } + } else if (_dq_state_is_base_wlh(old_state) && + _dq_state_is_enqueued_on_target(old_state) && + !_dq_state_is_enqueued_on_target(new_state)) { + // If we cleared the enqueued bit, we're about to destroy the workloop + // thread request, and we need to consume its +1. + _dispatch_release_no_dispose(dq); + } + + // + // Past this point we are borrowing the reference of the sync waiter + // + if (unlikely(_dq_state_is_inner_queue(old_state))) { + dispatch_queue_t tq = dq->do_targetq; + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + _dispatch_async_waiter_update(dsc, dq); + } + if (likely(tq->dq_width == 1)) { + dsc->dc_flags |= DC_FLAG_BARRIER; + } else { + dispatch_lane_t dl = upcast(tq)._dl; + dsc->dc_flags &= ~DC_FLAG_BARRIER; + if (_dispatch_queue_try_reserve_sync_width(dl)) { + return _dispatch_non_barrier_waiter_redirect_or_wake(dl, dc); + } + } + // passing the QoS of `dq` helps pushing on low priority waiters with + // legacy workloops. +#if DISPATCH_INTROSPECTION + dsc->dsc_from_async = false; +#endif + return dx_push(tq, dsc, _dq_state_max_qos(old_state)); + } + +#if DISPATCH_INTROSPECTION + if (dsc->dsc_from_async) { + _dispatch_trace_runtime_event(async_sync_handoff, dq, 0); + } else { + _dispatch_trace_runtime_event(sync_sync_handoff, dq, 0); + } +#endif // DISPATCH_INTROSPECTION + + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // Falling into case (2) of _dispatch_async_and_wait_f_slow, dc_other is + // the bottom queue + dsc->dc_other = dq; + } + return _dispatch_waiter_wake(dsc, wlh, old_state, new_state); +} + +DISPATCH_NOINLINE +static void +_dispatch_lane_drain_barrier_waiter(dispatch_lane_t dq, + struct dispatch_object_s *dc, dispatch_wakeup_flags_t flags, + uint64_t enqueued_bits) +{ + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; + struct dispatch_object_s *next_dc; + uint64_t next_owner = 0, old_state, new_state; + + next_owner = _dispatch_lock_value_from_tid(dsc->dsc_waiter); + next_dc = _dispatch_queue_pop_head(dq, dc); + +transfer_lock_again: + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state |= next_owner; + + if (_dq_state_is_base_wlh(old_state)) { + new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; + if (next_dc) { + // we know there's a next item, keep the enqueued bit if any + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + next_dc = os_atomic_load2o(dq, dq_items_head, relaxed); + goto transfer_lock_again; + }); + } else { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + new_state &= ~DISPATCH_QUEUE_ENQUEUED; + } + } else { + new_state -= enqueued_bits; + } + }); + + return _dispatch_barrier_waiter_redirect_or_wake(dq, dc, flags, + old_state, new_state); +} + +DISPATCH_NOINLINE +static void +_dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target, + uint64_t owned) +{ + uint64_t old_state, new_state, enqueue; + dispatch_queue_t tq; + + if (target == DISPATCH_QUEUE_WAKEUP_MGR) { + tq = _dispatch_mgr_q._as_dq; + enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; + } else if (target) { + tq = (target == DISPATCH_QUEUE_WAKEUP_TARGET) ? dq->do_targetq : target; + enqueue = DISPATCH_QUEUE_ENQUEUED; + } else { + tq = NULL; + enqueue = 0; + } + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state - owned, qos); + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + if (unlikely(_dq_state_is_suspended(old_state))) { + if (likely(_dq_state_is_base_wlh(old_state))) { + new_state &= ~DISPATCH_QUEUE_ENQUEUED; + } + } else if (enqueue) { + if (!_dq_state_is_enqueued(old_state)) { + new_state |= enqueue; + } + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + // just renew the drain lock with an acquire barrier, to see + // what the enqueuer that set DIRTY has done. + // the xor generates better assembly as DISPATCH_QUEUE_DIRTY + // is already in a register + os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE; + return dx_wakeup(dq, qos, flags); + }); + } else { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + } + }); + old_state -= owned; + dispatch_assert(_dq_state_drain_locked_by_self(old_state)); + dispatch_assert(!_dq_state_is_enqueued_on_manager(old_state)); + + if (_dq_state_is_enqueued(new_state)) { + _dispatch_trace_runtime_event(sync_async_handoff, dq, 0); + } + +#if DISPATCH_USE_KEVENT_WORKLOOP + if (_dq_state_is_base_wlh(old_state)) { + // - Only non-"du_is_direct" sources & mach channels can be enqueued + // on the manager. + // + // - Only dispatch_source_cancel_and_wait() and + // dispatch_source_set_*_handler() use the barrier complete codepath, + // none of which are used by mach channels. + // + // Hence no source-ish object can both be a workloop and need to use the + // manager at the same time. + dispatch_assert(!_dq_state_is_enqueued_on_manager(new_state)); + if (_dq_state_is_enqueued_on_target(old_state) || + _dq_state_is_enqueued_on_target(new_state) || + _dq_state_received_sync_wait(old_state) || + _dq_state_in_sync_transfer(old_state)) { + return _dispatch_event_loop_end_ownership((dispatch_wlh_t)dq, + old_state, new_state, flags); + } + _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } + return; + } +#endif + + if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); + } + + if (tq) { + if (likely((old_state ^ new_state) & enqueue)) { + dispatch_assert(_dq_state_is_enqueued(new_state)); + dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2); + return _dispatch_queue_push_queue(tq, dq, new_state); + } +#if HAVE_PTHREAD_WORKQUEUE_QOS + // when doing sync to async handoff + // if the queue received an override we have to forecefully redrive + // the same override so that a new stealer is enqueued because + // the previous one may be gone already + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_wakeup_with_override(dq, new_state, flags); + } +#endif + } + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_lane_drain_non_barriers(dispatch_lane_t dq, + struct dispatch_object_s *dc, dispatch_wakeup_flags_t flags) +{ + size_t owned_width = dq->dq_width; + struct dispatch_object_s *next_dc; + + // see _dispatch_lane_drain, go in non barrier mode, and drain items + + os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_IN_BARRIER, release); + + do { + if (likely(owned_width)) { + owned_width--; + } else if (_dispatch_object_is_waiter(dc)) { + // sync "readers" don't observe the limit + _dispatch_queue_reserve_sync_width(dq); + } else if (!_dispatch_queue_try_acquire_async(dq)) { + // no width left + break; + } + next_dc = _dispatch_queue_pop_head(dq, dc); + if (_dispatch_object_is_waiter(dc)) { + _dispatch_non_barrier_waiter_redirect_or_wake(dq, dc); + } else { + _dispatch_continuation_redirect_push(dq, dc, + _dispatch_queue_max_qos(dq)); + } +drain_again: + dc = next_dc; + } while (dc && !_dispatch_object_is_barrier(dc)); + + uint64_t old_state, new_state, owner_self = _dispatch_lock_value_for_self(); + uint64_t owned = owned_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + + if (dc) { + owned = _dispatch_queue_adjust_owned(dq, owned, dc); + } + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = old_state - owned; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_DIRTY; + + // similar to _dispatch_lane_non_barrier_complete(): + // if by the time we get here all redirected non barrier syncs are + // done and returned their width to the queue, we may be the last + // chance for the next item to run/be re-driven. + if (unlikely(dc)) { + new_state |= DISPATCH_QUEUE_DIRTY; + new_state = _dispatch_lane_non_barrier_complete_try_lock(dq, + old_state, new_state, owner_self); + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + next_dc = os_atomic_load2o(dq, dq_items_head, relaxed); + goto drain_again; + }); + } + }); + + old_state -= owned; + _dispatch_lane_non_barrier_complete_finish(dq, flags, old_state, new_state); +} + +DISPATCH_NOINLINE +static void +_dispatch_lane_barrier_complete(dispatch_lane_class_t dqu, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_lane_t dq = dqu._dl; + + if (dq->dq_items_tail && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) { + struct dispatch_object_s *dc = _dispatch_queue_get_head(dq); + if (likely(dq->dq_width == 1 || _dispatch_object_is_barrier(dc))) { + if (_dispatch_object_is_waiter(dc)) { + return _dispatch_lane_drain_barrier_waiter(dq, dc, flags, 0); + } + } else if (dq->dq_width > 1 && !_dispatch_object_is_barrier(dc)) { + return _dispatch_lane_drain_non_barriers(dq, dc, flags); + } + + if (!(flags & DISPATCH_WAKEUP_CONSUME_2)) { + _dispatch_retain_2(dq); + flags |= DISPATCH_WAKEUP_CONSUME_2; + } + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } + + uint64_t owned = DISPATCH_QUEUE_IN_BARRIER + + dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + return _dispatch_lane_class_barrier_complete(dq, qos, flags, target, owned); +} + +static void +_dispatch_async_and_wait_invoke(void *ctxt) +{ + dispatch_sync_context_t dsc = ctxt; + dispatch_queue_t top_dq = dsc->dc_other; + dispatch_invoke_flags_t iflags; + + // the block runs on the thread the queue is bound to and not + // on the calling thread, but we want to see the calling thread + // dispatch thread frames, so we fake the link, and then undo it + iflags = dsc->dsc_autorelease * DISPATCH_INVOKE_AUTORELEASE_ALWAYS; + dispatch_invoke_with_autoreleasepool(iflags, { + dispatch_thread_frame_s dtf; + _dispatch_introspection_sync_begin(top_dq); + _dispatch_thread_frame_push_and_rebase(&dtf, top_dq, &dsc->dsc_dtf); + _dispatch_client_callout(dsc->dsc_ctxt, dsc->dsc_func); + _dispatch_thread_frame_pop(&dtf); + }); + + // communicate back to _dispatch_async_and_wait_f_slow and + // _dispatch_sync_f_slow on which queue the work item was invoked + // so that the *_complete_recurse() call stops unlocking when it reaches it + dsc->dc_other = _dispatch_queue_get_current(); + dsc->dsc_func = NULL; + + if (dsc->dc_data == DISPATCH_WLH_ANON) { + _dispatch_thread_event_signal(&dsc->dsc_event); // release + } else { + _dispatch_event_loop_cancel_waiter(dsc); + } +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_wait_prepare(dispatch_queue_t dq) +{ + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + if (_dq_state_is_suspended(old_state) || + !_dq_state_is_base_wlh(old_state)) { + os_atomic_rmw_loop_give_up(return old_state); + } + if (!_dq_state_drain_locked(old_state) || + _dq_state_in_sync_transfer(old_state)) { + os_atomic_rmw_loop_give_up(return old_state); + } + new_state = old_state | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; + }); + return new_state; +} + +static void +_dispatch_wait_compute_wlh(dispatch_lane_t dq, dispatch_sync_context_t dsc) +{ + bool needs_locking = _dispatch_queue_is_mutable(dq); + + if (needs_locking) { + dsc->dsc_release_storage = true; + _dispatch_queue_sidelock_lock(dq); + } + + dispatch_queue_t tq = dq->do_targetq; + uint64_t tq_state = _dispatch_wait_prepare(tq); + + if (_dq_state_is_suspended(tq_state) || + _dq_state_is_base_anon(tq_state)) { + dsc->dsc_release_storage = false; + dsc->dc_data = DISPATCH_WLH_ANON; + } else if (_dq_state_is_base_wlh(tq_state)) { + if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { + dsc->dsc_wlh_is_workloop = true; + dsc->dsc_release_storage = false; + } else if (dsc->dsc_release_storage) { + _dispatch_queue_retain_storage(tq); + } + dsc->dc_data = (dispatch_wlh_t)tq; + } else { + _dispatch_wait_compute_wlh(upcast(tq)._dl, dsc); + } + if (needs_locking) { + if (dsc->dsc_wlh_is_workloop) { + _dispatch_queue_atomic_flags_clear(dq, DQF_MUTABLE); + } + _dispatch_queue_sidelock_unlock(dq); + } +} + +DISPATCH_NOINLINE +static void +__DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq) +{ + uint64_t dq_state = _dispatch_wait_prepare(dq); + if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) { + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "dispatch_sync called on queue " + "already owned by current thread"); + } + + // Blocks submitted to the main thread MUST run on the main thread, and + // dispatch_async_and_wait also executes on the remote context rather than + // the current thread. + // + // For both these cases we need to save the frame linkage for the sake of + // _dispatch_async_and_wait_invoke + _dispatch_thread_frame_save_state(&dsc->dsc_dtf); + + if (_dq_state_is_suspended(dq_state) || + _dq_state_is_base_anon(dq_state)) { + dsc->dc_data = DISPATCH_WLH_ANON; + } else if (_dq_state_is_base_wlh(dq_state)) { + dsc->dc_data = (dispatch_wlh_t)dq; + } else { + _dispatch_wait_compute_wlh(upcast(dq)._dl, dsc); + } + + if (dsc->dc_data == DISPATCH_WLH_ANON) { + dsc->dsc_override_qos_floor = dsc->dsc_override_qos = + (uint8_t)_dispatch_get_basepri_override_qos_floor(); + _dispatch_thread_event_init(&dsc->dsc_event); + } + dx_push(dq, dsc, _dispatch_qos_from_pp(dsc->dc_priority)); + _dispatch_trace_runtime_event(sync_wait, dq, 0); + if (dsc->dc_data == DISPATCH_WLH_ANON) { + _dispatch_thread_event_wait(&dsc->dsc_event); // acquire + } else { + _dispatch_event_loop_wait_for_ownership(dsc); + } + if (dsc->dc_data == DISPATCH_WLH_ANON) { + _dispatch_thread_event_destroy(&dsc->dsc_event); + // If _dispatch_sync_waiter_wake() gave this thread an override, + // ensure that the root queue sees it. + if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) { + _dispatch_set_basepri_override_qos(dsc->dsc_override_qos); + } + } +} + +#pragma mark - +#pragma mark _dispatch_barrier_trysync_or_async_f + +DISPATCH_NOINLINE +static void +_dispatch_barrier_trysync_or_async_f_complete(dispatch_lane_t dq, + void *ctxt, dispatch_function_t func, uint32_t flags) +{ + dispatch_wakeup_flags_t wflags = DISPATCH_WAKEUP_BARRIER_COMPLETE; + + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) { + uint64_t dq_state = os_atomic_sub2o(dq, dq_state, + DISPATCH_QUEUE_SUSPEND_INTERVAL, relaxed); + if (!_dq_state_is_suspended(dq_state)) { + wflags |= DISPATCH_WAKEUP_CONSUME_2; + } + } + dx_wakeup(dq, 0, wflags); +} + +// Use for mutation of queue-/source-internal state only +// ignores target queue hierarchy! +DISPATCH_NOINLINE +void +_dispatch_barrier_trysync_or_async_f(dispatch_lane_t dq, void *ctxt, + dispatch_function_t func, uint32_t flags) +{ + dispatch_tid tid = _dispatch_tid_self(); + uint64_t suspend_count = (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) ? 1 : 0; + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync_and_suspend(dq, tid, + suspend_count))) { + return _dispatch_barrier_async_detached_f(dq, ctxt, func); + } + if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) { + _dispatch_retain_2(dq); // see _dispatch_lane_suspend + } + _dispatch_barrier_trysync_or_async_f_complete(dq, ctxt, func, flags); +} + +#pragma mark - +#pragma mark dispatch_sync / dispatch_barrier_sync + +DISPATCH_NOINLINE +static void +_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt, + dispatch_function_t func, uintptr_t top_dc_flags, + dispatch_queue_class_t dqu, uintptr_t dc_flags) +{ + dispatch_queue_t top_dq = top_dqu._dq; + dispatch_queue_t dq = dqu._dq; + if (unlikely(!dq->do_targetq)) { + return _dispatch_sync_function_invoke(dq, ctxt, func); + } + + pthread_priority_t pp = _dispatch_get_priority(); + struct dispatch_sync_context_s dsc = { + .dc_flags = DC_FLAG_SYNC_WAITER | dc_flags, + .dc_func = _dispatch_async_and_wait_invoke, + .dc_ctxt = &dsc, + .dc_other = top_dq, + .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, + .dc_voucher = _voucher_get(), + .dsc_func = func, + .dsc_ctxt = ctxt, + .dsc_waiter = _dispatch_tid_self(), + }; + + _dispatch_trace_item_push(top_dq, &dsc); + __DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq); + + if (dsc.dsc_func == NULL) { + // dsc_func being cleared means that the block ran on another thread ie. + // case (2) as listed in _dispatch_async_and_wait_f_slow. + dispatch_queue_t stop_dq = dsc.dc_other; + return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags); + } + + _dispatch_introspection_sync_begin(top_dq); + _dispatch_trace_item_pop(top_dq, &dsc); + _dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags + DISPATCH_TRACE_ARG(&dsc)); +} + +DISPATCH_NOINLINE +static void +_dispatch_sync_recurse(dispatch_lane_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) +{ + dispatch_tid tid = _dispatch_tid_self(); + dispatch_queue_t tq = dq->do_targetq; + + do { + if (likely(tq->dq_width == 1)) { + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) { + return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq, + DC_FLAG_BARRIER); + } + } else { + dispatch_queue_concurrent_t dl = upcast(tq)._dl; + if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) { + return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq, 0); + } + } + tq = tq->do_targetq; + } while (unlikely(tq->do_targetq)); + + _dispatch_introspection_sync_begin(dq); + _dispatch_sync_invoke_and_complete_recurse(dq, ctxt, func, dc_flags + DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop( + dq, ctxt, func, dc_flags))); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) +{ + dispatch_tid tid = _dispatch_tid_self(); + + if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) { + DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync"); + } + + dispatch_lane_t dl = upcast(dq)._dl; + // The more correct thing to do would be to merge the qos of the thread + // that just acquired the barrier lock into the queue state. + // + // However this is too expensive for the fast path, so skip doing it. + // The chosen tradeoff is that if an enqueue on a lower priority thread + // contends with this fast path, this thread may receive a useless override. + // + // Global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) { + return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl, + DC_FLAG_BARRIER | dc_flags); + } + + if (unlikely(dl->do_targetq->do_targetq)) { + return _dispatch_sync_recurse(dl, ctxt, func, + DC_FLAG_BARRIER | dc_flags); + } + _dispatch_introspection_sync_begin(dl); + _dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func + DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop( + dq, ctxt, func, dc_flags | DC_FLAG_BARRIER))); +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) +{ + _dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags); +} + +DISPATCH_NOINLINE +void +dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_barrier_sync_f_inline(dq, ctxt, func, 0); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) +{ + if (likely(dq->dq_width == 1)) { + return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags); + } + + if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) { + DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync"); + } + + dispatch_lane_t dl = upcast(dq)._dl; + // Global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE + if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) { + return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags); + } + + if (unlikely(dq->do_targetq->do_targetq)) { + return _dispatch_sync_recurse(dl, ctxt, func, dc_flags); + } + _dispatch_introspection_sync_begin(dl); + _dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG( + _dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags))); +} + +DISPATCH_NOINLINE +static void +_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, + uintptr_t dc_flags) +{ + _dispatch_sync_f_inline(dq, ctxt, func, dc_flags); +} + +DISPATCH_NOINLINE +void +dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +{ + _dispatch_sync_f_inline(dq, ctxt, func, 0); +} + +#ifdef __BLOCKS__ +DISPATCH_NOINLINE +static void +_dispatch_sync_block_with_privdata(dispatch_queue_t dq, dispatch_block_t work, + uintptr_t dc_flags) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work); + pthread_priority_t op = 0, p = 0; + dispatch_block_flags_t flags = dbpd->dbpd_flags; + + if (flags & DISPATCH_BLOCK_BARRIER) { + dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA | DC_FLAG_BARRIER; + } else { + dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA; + } + + op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority); + if (op) { + p = dbpd->dbpd_priority; + } + voucher_t ov, v = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { + v = dbpd->dbpd_voucher; + } + ov = _dispatch_set_priority_and_voucher(p, v, 0); + + // balanced in d_block_sync_invoke or d_block_wait + if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { + _dispatch_retain_2(dq); + } + if (dc_flags & DC_FLAG_BARRIER) { + _dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke, + dc_flags); + } else { + _dispatch_sync_f(dq, work, _dispatch_block_sync_invoke, dc_flags); + } + _dispatch_reset_priority_and_voucher(op, ov); +} + +void +dispatch_barrier_sync(dispatch_queue_t dq, dispatch_block_t work) +{ + uintptr_t dc_flags = DC_FLAG_BARRIER | DC_FLAG_BLOCK; + if (unlikely(_dispatch_block_has_private_data(work))) { + return _dispatch_sync_block_with_privdata(dq, work, dc_flags); + } + _dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags); +} + +DISPATCH_NOINLINE +void +dispatch_sync(dispatch_queue_t dq, dispatch_block_t work) +{ + uintptr_t dc_flags = DC_FLAG_BLOCK; + if (unlikely(_dispatch_block_has_private_data(work))) { + return _dispatch_sync_block_with_privdata(dq, work, dc_flags); + } + _dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags); +} +#endif // __BLOCKS__ + +#pragma mark - +#pragma mark dispatch_async_and_wait + +DISPATCH_ALWAYS_INLINE +static inline dispatch_wlh_t +_dispatch_fake_wlh(dispatch_queue_t dq) +{ + dispatch_wlh_t new_wlh = DISPATCH_WLH_ANON; + if (likely(dx_metatype(dq) == _DISPATCH_WORKLOOP_TYPE) || + _dq_state_is_base_wlh(os_atomic_load2o(dq, dq_state, relaxed))) { + new_wlh = (dispatch_wlh_t)dq; + } + dispatch_wlh_t old_wlh = _dispatch_get_wlh(); + _dispatch_thread_setspecific(dispatch_wlh_key, new_wlh); + return old_wlh; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_restore_wlh(dispatch_wlh_t wlh) +{ + _dispatch_thread_setspecific(dispatch_wlh_key, wlh); +} + +DISPATCH_NOINLINE +static void +_dispatch_async_and_wait_invoke_and_complete_recurse(dispatch_queue_t dq, + dispatch_sync_context_t dsc, dispatch_queue_t bottom_q, + uintptr_t top_dc_flags) +{ + dispatch_invoke_flags_t iflags; + dispatch_wlh_t old_wlh = _dispatch_fake_wlh(bottom_q); + + iflags = dsc->dsc_autorelease * DISPATCH_INVOKE_AUTORELEASE_ALWAYS; + dispatch_invoke_with_autoreleasepool(iflags, { + dispatch_block_flags_t bflags = DISPATCH_BLOCK_HAS_PRIORITY; + dispatch_thread_frame_s dtf; + pthread_priority_t op = 0, p = dsc->dc_priority; + voucher_t ov, v = dsc->dc_voucher; + + _dispatch_introspection_sync_begin(dq); + _dispatch_thread_frame_push(&dtf, dq); + op = _dispatch_block_invoke_should_set_priority(bflags, p); + ov = _dispatch_set_priority_and_voucher(op ? p : 0, v, 0); + _dispatch_trace_item_pop(dq, dsc); + _dispatch_client_callout(dsc->dsc_ctxt, dsc->dsc_func); + _dispatch_perfmon_workitem_inc(); + _dispatch_reset_priority_and_voucher(op, ov); + _dispatch_thread_frame_pop(&dtf); + }); + + _dispatch_trace_item_complete(dsc); + + _dispatch_restore_wlh(old_wlh); + _dispatch_sync_complete_recurse(dq, NULL, top_dc_flags); +} + +DISPATCH_NOINLINE +static void +_dispatch_async_and_wait_f_slow(dispatch_queue_t dq, uintptr_t top_dc_flags, + dispatch_sync_context_t dsc, dispatch_queue_t tq) +{ + /* dc_other is an in-out parameter. + * + * As an in-param, it specifies the top queue on which the blocking + * primitive is called. + * + * As an out-param, it refers to the queue up till which we have the drain + * lock. This is slightly different depending on how we come out of + * _WAIT_FOR_QUEUE. + * + * Case 1: + * If the continuation is to be invoked on another thread - for + * async_and_wait, or we ran on a thread bound main queue - then someone + * already called _dispatch_async_and_wait_invoke which invoked the block + * already. dc_other as an outparam here tells the enqueuer the queue up + * till which the enqueuer got the drain lock so that we know what to unlock + * on the way out. This is the case whereby the enqueuer owns part of the + * locks in the queue hierachy (but not all). + * + * Case 2: + * If the continuation is to be invoked on the enqueuing thread - because + * we were contending with another sync or async_and_wait - then enqueuer + * return from _WAIT_FOR_QUEUE without having invoked the block. The + * enqueuer has had the locks for the rest of the queue hierachy handed off + * to it so dc_other specifies the queue up till which it has the locks + * which in this case, is up till the bottom queue in the hierachy. So it + * needs to unlock everything up till the bottom queue, on the way out. + */ + + __DISPATCH_WAIT_FOR_QUEUE__(dsc, tq); + + if (unlikely(dsc->dsc_func == NULL)) { + // see _dispatch_async_and_wait_invoke + dispatch_queue_t stop_dq = dsc->dc_other; + return _dispatch_sync_complete_recurse(dq, stop_dq, top_dc_flags); + } + + // see _dispatch_*_redirect_or_wake + dispatch_queue_t bottom_q = dsc->dc_other; + return _dispatch_async_and_wait_invoke_and_complete_recurse(dq, dsc, + bottom_q, top_dc_flags); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_async_and_wait_should_always_async(dispatch_queue_class_t dqu, + uint64_t dq_state) +{ + // If the queue is anchored at a pthread root queue for which we can't + // mirror attributes, then we need to take the async path. + return !_dq_state_is_inner_queue(dq_state) && + !_dispatch_is_in_root_queues_array(dqu._dq->do_targetq); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_async_and_wait_recurse_one(dispatch_queue_t dq, + dispatch_sync_context_t dsc, dispatch_tid tid, uintptr_t dc_flags) +{ + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dispatch_async_and_wait_should_always_async(dq, dq_state))) { + // Remove the async_and_wait flag but drive down the slow path so that + // we do the synchronous wait. We are guaranteed that dq is the base + // queue. + // + // We're falling down to case (1) of _dispatch_async_and_wait_f_slow so + // set dc_other to dq + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + dsc->dc_other = dq; + return false; + } + if (likely(dc_flags & DC_FLAG_BARRIER)) { + return _dispatch_queue_try_acquire_barrier_sync(dq, tid); + } + return _dispatch_queue_try_reserve_sync_width(upcast(dq)._dl); +} + +DISPATCH_NOINLINE +static void +_dispatch_async_and_wait_recurse(dispatch_queue_t top_dq, + dispatch_sync_context_t dsc, dispatch_tid tid, uintptr_t top_flags) +{ + dispatch_queue_t dq = top_dq; + uintptr_t dc_flags = top_flags; + + _dispatch_trace_item_push(top_dq, dsc); + + for (;;) { + if (unlikely(!_dispatch_async_and_wait_recurse_one(dq, dsc, tid, + dc_flags))) { + return _dispatch_async_and_wait_f_slow(top_dq, top_flags, dsc, dq); + } + + _dispatch_async_waiter_update(dsc, dq); + if (likely(!dq->do_targetq->do_targetq)) break; + dq = dq->do_targetq; + if (likely(dq->dq_width == 1)) { + dc_flags |= DC_FLAG_BARRIER; + } else { + dc_flags &= ~DC_FLAG_BARRIER; + } + dsc->dc_flags = dc_flags; + } + + _dispatch_async_and_wait_invoke_and_complete_recurse(top_dq, dsc, dq, + top_flags); +} + +DISPATCH_NOINLINE +static void +_dispatch_async_and_wait_f(dispatch_queue_t dq, + void *ctxt, dispatch_function_t func, uintptr_t dc_flags) +{ + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_tid tid = _dispatch_tid_self(); + struct dispatch_sync_context_s dsc = { + .dc_flags = dc_flags, + .dc_func = _dispatch_async_and_wait_invoke, + .dc_ctxt = &dsc, + .dc_other = dq, + .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, + .dc_voucher = _voucher_get(), + .dsc_func = func, + .dsc_ctxt = ctxt, + .dsc_waiter = tid, + }; + + return _dispatch_async_and_wait_recurse(dq, &dsc, tid, dc_flags); +} + +DISPATCH_NOINLINE +void +dispatch_async_and_wait_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + if (unlikely(!dq->do_targetq)) { + return _dispatch_sync_function_invoke(dq, ctxt, func); + } + + uintptr_t dc_flags = DC_FLAG_ASYNC_AND_WAIT; + if (likely(dq->dq_width == 1)) dc_flags |= DC_FLAG_BARRIER; + return _dispatch_async_and_wait_f(dq, ctxt, func, dc_flags); +} + +DISPATCH_NOINLINE +void +dispatch_barrier_async_and_wait_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + if (unlikely(!dq->do_targetq)) { + return _dispatch_sync_function_invoke(dq, ctxt, func); + } + + uintptr_t dc_flags = DC_FLAG_ASYNC_AND_WAIT | DC_FLAG_BARRIER; + return _dispatch_async_and_wait_f(dq, ctxt, func, dc_flags); +} + +#ifdef __BLOCKS__ +DISPATCH_NOINLINE +static void +_dispatch_async_and_wait_block_with_privdata(dispatch_queue_t dq, + dispatch_block_t work, uintptr_t dc_flags) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work); + dispatch_block_flags_t flags = dbpd->dbpd_flags; + pthread_priority_t pp; + voucher_t v; + + if (dbpd->dbpd_flags & DISPATCH_BLOCK_BARRIER) { + dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA | DC_FLAG_BARRIER; + } else { + dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA; + } + + if (_dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority)){ + pp = dbpd->dbpd_priority; + } else { + pp = _dispatch_get_priority(); + } + if (dbpd->dbpd_flags & DISPATCH_BLOCK_HAS_VOUCHER) { + v = dbpd->dbpd_voucher; + } else { + v = _voucher_get(); + } + + // balanced in d_block_sync_invoke or d_block_wait + if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { + _dispatch_retain_2(dq); + } + + dispatch_tid tid = _dispatch_tid_self(); + struct dispatch_sync_context_s dsc = { + .dc_flags = dc_flags, + .dc_func = _dispatch_async_and_wait_invoke, + .dc_ctxt = &dsc, + .dc_other = dq, + .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, + .dc_voucher = v, + .dsc_func = _dispatch_block_sync_invoke, + .dsc_ctxt = work, + .dsc_waiter = tid, + }; + + return _dispatch_async_and_wait_recurse(dq, &dsc, tid, dc_flags); +} + +void +dispatch_barrier_async_and_wait(dispatch_queue_t dq, dispatch_block_t work) +{ + if (unlikely(!dq->do_targetq)) { + return dispatch_barrier_sync(dq, work); + } + + uintptr_t dc_flags = DC_FLAG_ASYNC_AND_WAIT | DC_FLAG_BLOCK|DC_FLAG_BARRIER; + if (unlikely(_dispatch_block_has_private_data(work))) { + return _dispatch_async_and_wait_block_with_privdata(dq, work, dc_flags); + } + + dispatch_function_t func = _dispatch_Block_invoke(work); + return _dispatch_async_and_wait_f(dq, work, func, dc_flags); +} + +void +dispatch_async_and_wait(dispatch_queue_t dq, dispatch_block_t work) +{ + if (unlikely(!dq->do_targetq)) { + return dispatch_sync(dq, work); + } + + uintptr_t dc_flags = DC_FLAG_ASYNC_AND_WAIT | DC_FLAG_BLOCK; + if (likely(dq->dq_width == 1)) dc_flags |= DC_FLAG_BARRIER; + if (unlikely(_dispatch_block_has_private_data(work))) { + return _dispatch_async_and_wait_block_with_privdata(dq, work, dc_flags); + } + + dispatch_function_t func = _dispatch_Block_invoke(work); + return _dispatch_async_and_wait_f(dq, work, func, dc_flags); +} +#endif // __BLOCKS__ + +#pragma mark - +#pragma mark dispatch_queue_specific + +static void +_dispatch_queue_specific_head_dispose_slow(void *ctxt) +{ + dispatch_queue_specific_head_t dqsh = ctxt; + dispatch_queue_specific_t dqs, tmp; + + TAILQ_FOREACH_SAFE(dqs, &dqsh->dqsh_entries, dqs_entry, tmp) { + dispatch_assert(dqs->dqs_destructor); + _dispatch_client_callout(dqs->dqs_ctxt, dqs->dqs_destructor); + free(dqs); + } + free(dqsh); +} + +static void +_dispatch_queue_specific_head_dispose(dispatch_queue_specific_head_t dqsh) +{ + dispatch_queue_t rq = _dispatch_get_default_queue(false); + dispatch_queue_specific_t dqs, tmp; + TAILQ_HEAD(, dispatch_queue_specific_s) entries = + TAILQ_HEAD_INITIALIZER(entries); + + TAILQ_CONCAT(&entries, &dqsh->dqsh_entries, dqs_entry); + TAILQ_FOREACH_SAFE(dqs, &entries, dqs_entry, tmp) { + if (dqs->dqs_destructor) { + TAILQ_INSERT_TAIL(&dqsh->dqsh_entries, dqs, dqs_entry); + } else { + free(dqs); + } + } + + if (TAILQ_EMPTY(&dqsh->dqsh_entries)) { + free(dqsh); + } else { + _dispatch_barrier_async_detached_f(rq, dqsh, + _dispatch_queue_specific_head_dispose_slow); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_queue_init_specific(dispatch_queue_t dq) +{ + dispatch_queue_specific_head_t dqsh; + + dqsh = _dispatch_calloc(1, sizeof(struct dispatch_queue_specific_head_s)); + TAILQ_INIT(&dqsh->dqsh_entries); + if (unlikely(!os_atomic_cmpxchg2o(dq, dq_specific_head, + NULL, dqsh, release))) { + _dispatch_queue_specific_head_dispose(dqsh); + } +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_specific_t +_dispatch_queue_specific_find(dispatch_queue_specific_head_t dqsh, + const void *key) +{ + dispatch_queue_specific_t dqs; + + TAILQ_FOREACH(dqs, &dqsh->dqsh_entries, dqs_entry) { + if (dqs->dqs_key == key) { + return dqs; + } + } + return NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_admits_specific(dispatch_queue_t dq) +{ + if (dx_metatype(dq) == _DISPATCH_LANE_TYPE) { + return (dx_type(dq) == DISPATCH_QUEUE_MAIN_TYPE || + !dx_hastypeflag(dq, QUEUE_BASE)); + } + return dx_metatype(dq) == _DISPATCH_WORKLOOP_TYPE; +} + +DISPATCH_NOINLINE +void +dispatch_queue_set_specific(dispatch_queue_t dq, const void *key, + void *ctxt, dispatch_function_t destructor) +{ + if (unlikely(!key)) { + return; + } + dispatch_queue_t rq = _dispatch_get_default_queue(false); + dispatch_queue_specific_head_t dqsh = dq->dq_specific_head; + dispatch_queue_specific_t dqs; + + if (unlikely(!_dispatch_queue_admits_specific(dq))) { + DISPATCH_CLIENT_CRASH(0, + "Queue doesn't support dispatch_queue_set_specific"); + } + + if (ctxt && !dqsh) { + _dispatch_queue_init_specific(dq); + dqsh = dq->dq_specific_head; + } else if (!dqsh) { + return; + } + + _dispatch_unfair_lock_lock(&dqsh->dqsh_lock); + dqs = _dispatch_queue_specific_find(dqsh, key); + if (dqs) { + if (dqs->dqs_destructor) { + _dispatch_barrier_async_detached_f(rq, dqs->dqs_ctxt, + dqs->dqs_destructor); + } + if (ctxt) { + dqs->dqs_ctxt = ctxt; + dqs->dqs_destructor = destructor; + } else { + TAILQ_REMOVE(&dqsh->dqsh_entries, dqs, dqs_entry); + free(dqs); + } + } else if (ctxt) { + dqs = _dispatch_calloc(1, sizeof(struct dispatch_queue_specific_s)); + dqs->dqs_key = key; + dqs->dqs_ctxt = ctxt; + dqs->dqs_destructor = destructor; + TAILQ_INSERT_TAIL(&dqsh->dqsh_entries, dqs, dqs_entry); + } + + _dispatch_unfair_lock_unlock(&dqsh->dqsh_lock); +} + +DISPATCH_ALWAYS_INLINE +static inline void * +_dispatch_queue_get_specific_inline(dispatch_queue_t dq, const void *key) +{ + dispatch_queue_specific_head_t dqsh = dq->dq_specific_head; + dispatch_queue_specific_t dqs; + void *ctxt = NULL; + + if (likely(_dispatch_queue_admits_specific(dq) && dqsh)) { + _dispatch_unfair_lock_lock(&dqsh->dqsh_lock); + dqs = _dispatch_queue_specific_find(dqsh, key); + if (dqs) ctxt = dqs->dqs_ctxt; + _dispatch_unfair_lock_unlock(&dqsh->dqsh_lock); + } + return ctxt; +} + +DISPATCH_NOINLINE +void * +dispatch_queue_get_specific(dispatch_queue_t dq, const void *key) +{ + if (unlikely(!key)) { + return NULL; + } + return _dispatch_queue_get_specific_inline(dq, key); +} + +DISPATCH_NOINLINE +void * +dispatch_get_specific(const void *key) +{ + dispatch_queue_t dq = _dispatch_queue_get_current(); + void *ctxt = NULL; + + if (likely(key && dq)) { + do { + ctxt = _dispatch_queue_get_specific_inline(dq, key); + dq = dq->do_targetq; + } while (unlikely(ctxt == NULL && dq)); + } + return ctxt; +} + +#pragma mark - +#pragma mark dispatch_queue_t / dispatch_lane_t + +void +dispatch_queue_set_label_nocopy(dispatch_queue_t dq, const char *label) +{ + if (unlikely(_dispatch_object_is_global(dq))) { + return; + } + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dq); + if (unlikely(dqf & DQF_LABEL_NEEDS_FREE)) { + DISPATCH_CLIENT_CRASH(dq, "Cannot change label for this queue"); + } + dq->dq_label = label; +} + +static inline bool +_dispatch_base_lane_is_wlh(dispatch_lane_t dq, dispatch_queue_t tq) +{ +#if DISPATCH_USE_KEVENT_WORKLOOP + if (unlikely(!_dispatch_kevent_workqueue_enabled)) { + return false; + } + if (dx_type(dq) == DISPATCH_QUEUE_NETWORK_EVENT_TYPE) { + return true; + } + if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) { + // Sources don't support sync waiters, so the ones that never change QoS + // don't benefit from any of the workloop features which have overhead, + // so just use the workqueue kqueue for these. + if (likely(!upcast(dq)._ds->ds_refs->du_can_be_wlh)) { + return false; + } + dispatch_assert(upcast(dq)._ds->ds_refs->du_is_direct); + } + return dq->dq_width == 1 && _dispatch_is_in_root_queues_array(tq); +#else + (void)dq; (void)tq; + return false; +#endif // DISPATCH_USE_KEVENT_WORKLOOP +} + +static void +_dispatch_lane_inherit_wlh_from_target(dispatch_lane_t dq, dispatch_queue_t tq) +{ + uint64_t old_state, new_state, role; + + if (!dx_hastypeflag(tq, QUEUE_ROOT)) { + role = DISPATCH_QUEUE_ROLE_INNER; + } else if (_dispatch_base_lane_is_wlh(dq, tq)) { + role = DISPATCH_QUEUE_ROLE_BASE_WLH; + } else { + role = DISPATCH_QUEUE_ROLE_BASE_ANON; + } + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK; + new_state |= role; + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(break); + } + }); + + if (_dq_state_is_base_wlh(old_state) && !_dq_state_is_base_wlh(new_state)) { + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (ddi && ddi->ddi_wlh == (dispatch_wlh_t)dq) { + _dispatch_event_loop_leave_immediate(new_state); + } + } + if (!dx_hastypeflag(tq, QUEUE_ROOT)) { + dispatch_queue_flags_t clear = 0, set = DQF_TARGETED; + if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { + clear |= DQF_MUTABLE; +#if !DISPATCH_ALLOW_NON_LEAF_RETARGET + } else { + clear |= DQF_MUTABLE; +#endif + } + if (clear) { + _dispatch_queue_atomic_flags_set_and_clear(tq, set, clear); + } else { + _dispatch_queue_atomic_flags_set(tq, set); + } + } +} + +dispatch_priority_t +_dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, + dispatch_wlh_t *wlh_out) +{ + dispatch_priority_t dpri = dq->dq_priority; + dispatch_priority_t p = dpri & DISPATCH_PRIORITY_REQUESTED_MASK; + dispatch_qos_t fallback = _dispatch_priority_fallback_qos(dpri); + dispatch_queue_t tq = dq->do_targetq; + dispatch_wlh_t wlh = DISPATCH_WLH_ANON; + + if (_dq_state_is_base_wlh(dq->dq_state)) { + wlh = (dispatch_wlh_t)dq; + } + + while (unlikely(!dx_hastypeflag(tq, QUEUE_ROOT))) { + if (unlikely(tq == _dispatch_mgr_q._as_dq)) { + if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; + return DISPATCH_PRIORITY_FLAG_MANAGER; + } + if (unlikely(_dispatch_queue_is_thread_bound(tq))) { + if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; + return tq->dq_priority; + } + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(tq))) { + // this queue may not be activated yet, so the queue graph may not + // have stabilized yet + _dispatch_ktrace2(DISPATCH_PERF_delayed_registration, dq, + dx_metatype(dq) == _DISPATCH_SOURCE_TYPE ? dq : NULL); + if (wlh_out) *wlh_out = NULL; + return 0; + } + + if (_dq_state_is_base_wlh(tq->dq_state)) { + wlh = (dispatch_wlh_t)tq; + if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { + _dispatch_queue_atomic_flags_clear(dq, DQF_MUTABLE); + } + } else if (unlikely(_dispatch_queue_is_mutable(tq))) { + // we're not allowed to dereference tq->do_targetq + _dispatch_ktrace2(DISPATCH_PERF_delayed_registration, dq, + dx_metatype(dq) == _DISPATCH_SOURCE_TYPE ? dq : NULL); + if (wlh_out) *wlh_out = NULL; + return 0; + } + + dispatch_priority_t tqp = tq->dq_priority; + + tq = tq->do_targetq; + if (tqp & DISPATCH_PRIORITY_FLAG_INHERITED) { + // if the priority is inherited, it means we got it from our target + // which has fallback and various magical flags that the code below + // will handle, so do not bother here. + break; + } + + if (!fallback) fallback = _dispatch_priority_fallback_qos(tqp); + tqp &= DISPATCH_PRIORITY_REQUESTED_MASK; + if (p < tqp) p = tqp; + } + + if (likely(_dispatch_is_in_root_queues_array(tq) || + tq->dq_serialnum == DISPATCH_QUEUE_SERIAL_NUMBER_WLF)) { + dispatch_priority_t rqp = tq->dq_priority; + + if (!fallback) fallback = _dispatch_priority_fallback_qos(rqp); + rqp &= DISPATCH_PRIORITY_REQUESTED_MASK; + if (p < rqp) p = rqp; + + p |= (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + if ((dpri & DISPATCH_PRIORITY_FLAG_FLOOR) || + !(dpri & DISPATCH_PRIORITY_REQUESTED_MASK)) { + p |= (dpri & DISPATCH_PRIORITY_FLAG_FLOOR); + if (fallback > _dispatch_priority_qos(p)) { + p |= _dispatch_priority_make_fallback(fallback); + } + } + if (wlh_out) *wlh_out = wlh; + return p; + } + + // pthread root queues opt out of QoS + if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; + return DISPATCH_PRIORITY_FLAG_MANAGER; +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_workloop_attributes_alloc_if_needed(dispatch_workloop_t dwl) +{ + if (unlikely(!dwl->dwl_attr)) { + dwl->dwl_attr = _dispatch_calloc(1, sizeof(dispatch_workloop_attr_s)); + } +} + +void +dispatch_set_qos_class_floor(dispatch_object_t dou, + dispatch_qos_class_t cls, int relpri) +{ + if (dx_cluster(dou._do) != _DISPATCH_QUEUE_CLUSTER) { + DISPATCH_CLIENT_CRASH(0, + "dispatch_set_qos_class_floor called on invalid object type"); + } + if (dx_metatype(dou._do) == _DISPATCH_WORKLOOP_TYPE) { + return dispatch_workloop_set_qos_class_floor(dou._dwl, cls, relpri, 0); + } + + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); + dispatch_priority_t pri = _dispatch_priority_make(qos, relpri); + dispatch_priority_t old_pri = dou._dq->dq_priority; + + if (pri) pri |= DISPATCH_PRIORITY_FLAG_FLOOR; + old_pri &= ~DISPATCH_PRIORITY_REQUESTED_MASK; + old_pri &= ~DISPATCH_PRIORITY_FLAG_FLOOR; + dou._dq->dq_priority = pri | old_pri; + + _dispatch_queue_setter_assert_inactive(dou._dq); +} + +void +dispatch_set_qos_class(dispatch_object_t dou, dispatch_qos_class_t cls, + int relpri) +{ + if (dx_cluster(dou._do) != _DISPATCH_QUEUE_CLUSTER || + dx_metatype(dou._do) == _DISPATCH_WORKLOOP_TYPE) { + DISPATCH_CLIENT_CRASH(0, + "dispatch_set_qos_class called on invalid object type"); + } + + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); + dispatch_priority_t pri = _dispatch_priority_make(qos, relpri); + dispatch_priority_t old_pri = dou._dq->dq_priority; + + old_pri &= ~DISPATCH_PRIORITY_REQUESTED_MASK; + old_pri &= ~DISPATCH_PRIORITY_FLAG_FLOOR; + dou._dq->dq_priority = pri | old_pri; + + _dispatch_queue_setter_assert_inactive(dou._dq); +} + +void +dispatch_set_qos_class_fallback(dispatch_object_t dou, dispatch_qos_class_t cls) +{ + if (dx_cluster(dou._do) != _DISPATCH_QUEUE_CLUSTER) { + DISPATCH_CLIENT_CRASH(0, + "dispatch_set_qos_class_fallback called on invalid object type"); + } + + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); + dispatch_priority_t pri = _dispatch_priority_make_fallback(qos); + dispatch_priority_t old_pri = dou._dq->dq_priority; + + old_pri &= ~DISPATCH_PRIORITY_FALLBACK_QOS_MASK; + old_pri &= ~DISPATCH_PRIORITY_FLAG_FALLBACK; + dou._dq->dq_priority = pri | old_pri; + + _dispatch_queue_setter_assert_inactive(dou._dq); +} + +static dispatch_queue_t +_dispatch_queue_priority_inherit_from_target(dispatch_lane_class_t dq, + dispatch_queue_t tq) +{ + const dispatch_priority_t inherited = DISPATCH_PRIORITY_FLAG_INHERITED; + dispatch_priority_t pri = dq._dl->dq_priority; + + // This priority has been selected by the client, leave it alone + // However, when the client picked a QoS, we should adjust the target queue + // if it is a root queue to best match the ask + if (_dispatch_queue_priority_manually_selected(pri)) { + if (_dispatch_is_in_root_queues_array(tq)) { + dispatch_qos_t qos = _dispatch_priority_qos(pri); + if (!qos) qos = DISPATCH_QOS_DEFAULT; + tq = _dispatch_get_root_queue(qos, + pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)->_as_dq; + } + return tq; + } + + if (_dispatch_is_in_root_queues_array(tq)) { + // base queues need to know they target + // the default root queue so that _dispatch_queue_wakeup_qos() + // in _dispatch_queue_wakeup() can fallback to QOS_DEFAULT + // if no other priority was provided. + pri = tq->dq_priority | inherited; + } else if (pri & inherited) { + // if the FALLBACK flag is set on queues due to the code above + // we need to clear it if the queue is retargeted within a hierachy + // and is no longer a base queue. + pri &= ~DISPATCH_PRIORITY_FALLBACK_QOS_MASK; + pri &= ~DISPATCH_PRIORITY_FLAG_FALLBACK; + } + + dq._dl->dq_priority = pri; + return tq; +} + + +DISPATCH_NOINLINE +static dispatch_queue_t +_dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa, + dispatch_queue_t tq, bool legacy) +{ + dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa); + + // + // Step 1: Normalize arguments (qos, overcommit, tq) + // + + dispatch_qos_t qos = dqai.dqai_qos; +#if !HAVE_PTHREAD_WORKQUEUE_QOS + if (qos == DISPATCH_QOS_USER_INTERACTIVE) { + dqai.dqai_qos = qos = DISPATCH_QOS_USER_INITIATED; + } + if (qos == DISPATCH_QOS_MAINTENANCE) { + dqai.dqai_qos = qos = DISPATCH_QOS_BACKGROUND; + } +#endif // !HAVE_PTHREAD_WORKQUEUE_QOS + + _dispatch_queue_attr_overcommit_t overcommit = dqai.dqai_overcommit; + if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) { + if (tq->do_targetq) { + DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and " + "a non-global target queue"); + } + } + + if (tq && dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { + // Handle discrepancies between attr and target queue, attributes win + if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { + if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { + overcommit = _dispatch_queue_attr_overcommit_enabled; + } else { + overcommit = _dispatch_queue_attr_overcommit_disabled; + } + } + if (qos == DISPATCH_QOS_UNSPECIFIED) { + qos = _dispatch_priority_qos(tq->dq_priority); + } + tq = NULL; + } else if (tq && !tq->do_targetq) { + // target is a pthread or runloop root queue, setting QoS or overcommit + // is disallowed + if (overcommit != _dispatch_queue_attr_overcommit_unspecified) { + DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute " + "and use this kind of target queue"); + } + } else { + if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { + // Serial queues default to overcommit! + overcommit = dqai.dqai_concurrent ? + _dispatch_queue_attr_overcommit_disabled : + _dispatch_queue_attr_overcommit_enabled; + } + } + if (!tq) { + tq = _dispatch_get_root_queue( + qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos, + overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq; + if (unlikely(!tq)) { + DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute"); + } + } + + // + // Step 2: Initialize the queue + // + + if (legacy) { + // if any of these attributes is specified, use non legacy classes + if (dqai.dqai_inactive || dqai.dqai_autorelease_frequency) { + legacy = false; + } + } + + const void *vtable; + dispatch_queue_flags_t dqf = legacy ? DQF_MUTABLE : 0; + if (dqai.dqai_concurrent) { + vtable = DISPATCH_VTABLE(queue_concurrent); + } else { + vtable = DISPATCH_VTABLE(queue_serial); + } + switch (dqai.dqai_autorelease_frequency) { + case DISPATCH_AUTORELEASE_FREQUENCY_NEVER: + dqf |= DQF_AUTORELEASE_NEVER; + break; + case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM: + dqf |= DQF_AUTORELEASE_ALWAYS; + break; + } + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } + } + + dispatch_lane_t dq = _dispatch_object_alloc(vtable, + sizeof(struct dispatch_lane_s)); + _dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ? + DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER | + (dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0)); + + dq->dq_label = label; + dq->dq_priority = _dispatch_priority_make((dispatch_qos_t)dqai.dqai_qos, + dqai.dqai_relpri); + if (overcommit == _dispatch_queue_attr_overcommit_enabled) { + dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + } + if (!dqai.dqai_inactive) { + _dispatch_queue_priority_inherit_from_target(dq, tq); + _dispatch_lane_inherit_wlh_from_target(dq, tq); + } + _dispatch_retain(tq); + dq->do_targetq = tq; + _dispatch_object_debug(dq, "%s", __func__); + return _dispatch_trace_queue_create(dq)._dq; +} + +dispatch_queue_t +dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, + dispatch_queue_t tq) +{ + return _dispatch_lane_create_with_target(label, dqa, tq, false); +} + +dispatch_queue_t +dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) +{ + return _dispatch_lane_create_with_target(label, attr, + DISPATCH_TARGET_QUEUE_DEFAULT, true); +} + +dispatch_queue_t +dispatch_queue_create_with_accounting_override_voucher(const char *label, + dispatch_queue_attr_t attr, voucher_t voucher) +{ + (void)label; (void)attr; (void)voucher; + DISPATCH_CLIENT_CRASH(0, "Unsupported interface"); +} + +DISPATCH_NOINLINE +static void +_dispatch_queue_dispose(dispatch_queue_class_t dqu, bool *allow_free) +{ + dispatch_queue_specific_head_t dqsh; + dispatch_queue_t dq = dqu._dq; + + if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { + free((void*)dq->dq_label); + } + dqsh = os_atomic_xchg2o(dq, dq_specific_head, (void *)0x200, relaxed); + if (dqsh) _dispatch_queue_specific_head_dispose(dqsh); + + // fast path for queues that never got their storage retained + if (likely(os_atomic_load2o(dq, dq_sref_cnt, relaxed) == 0)) { + // poison the state with something that is suspended and is easy to spot + dq->dq_state = 0xdead000000000000; + return; + } + + // Take over freeing the memory from _dispatch_object_dealloc() + // + // As soon as we call _dispatch_queue_release_storage(), we forfeit + // the possibility for the caller of dx_dispose() to finalize the object + // so that responsibility is ours. + _dispatch_object_finalize(dq); + *allow_free = false; + dq->dq_label = ""; + dq->do_targetq = NULL; + dq->do_finalizer = NULL; + dq->do_ctxt = NULL; + return _dispatch_queue_release_storage(dq); +} + +void +_dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) +{ + dispatch_lane_t dq = dqu._dl; + if (unlikely(dq->dq_items_tail)) { + DISPATCH_CLIENT_CRASH(dq->dq_items_tail, + "Release of a queue while items are enqueued"); + } + dq->dq_items_head = (void *)0x200; + dq->dq_items_tail = (void *)0x200; + + uint64_t orig_dq_state, dq_state; + dq_state = orig_dq_state = os_atomic_load2o(dq, dq_state, relaxed); + + uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); + if (dx_hastypeflag(dq, QUEUE_ROOT)) { + initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; + } + dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + dq_state &= ~DISPATCH_QUEUE_DIRTY; + dq_state &= ~DISPATCH_QUEUE_ROLE_MASK; + if (unlikely(dq_state != initial_state)) { + if (_dq_state_drain_locked(dq_state)) { + DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, + "Release of a locked queue"); + } +#ifndef __LP64__ + orig_dq_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, + "Release of a queue with corrupt state"); + } + _dispatch_queue_dispose(dqu, allow_free); +} + +void +_dispatch_lane_dispose(dispatch_lane_t dq, bool *allow_free) +{ + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_trace_queue_dispose(dq); + _dispatch_lane_class_dispose(dq, allow_free); +} + +void +_dispatch_queue_xref_dispose(dispatch_queue_t dq) +{ + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + long state = (long)dq_state; + if (sizeof(long) < sizeof(uint64_t)) state = (long)(dq_state >> 32); + if (unlikely(dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK)) { + // Arguments for and against this assert are within 6705399 + DISPATCH_CLIENT_CRASH(state, "Release of an inactive object"); + } + DISPATCH_CLIENT_CRASH(dq_state, "Release of a suspended object"); + } + os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed); +} + +DISPATCH_NOINLINE +static void +_dispatch_lane_suspend_slow(dispatch_lane_t dq) +{ + uint64_t old_state, new_state, delta; + + _dispatch_queue_sidelock_lock(dq); + + // what we want to transfer (remove from dq_state) + delta = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL; + // but this is a suspend so add a suspend count at the same time + delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL; + if (dq->dq_side_suspend_cnt == 0) { + // we substract delta from dq_state, and we want to set this bit + delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; + } + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + // unsigned underflow of the substraction can happen because other + // threads could have touched this value while we were trying to acquire + // the lock, or because another thread raced us to do the same operation + // and got to the lock first. + if (unlikely(os_sub_overflow(old_state, delta, &new_state))) { + os_atomic_rmw_loop_give_up(goto retry); + } + }); + if (unlikely(os_add_overflow(dq->dq_side_suspend_cnt, + DISPATCH_QUEUE_SUSPEND_HALF, &dq->dq_side_suspend_cnt))) { + DISPATCH_CLIENT_CRASH(0, "Too many nested calls to dispatch_suspend()"); + } + return _dispatch_queue_sidelock_unlock(dq); + +retry: + _dispatch_queue_sidelock_unlock(dq); + return _dispatch_lane_suspend(dq); +} + +void +_dispatch_lane_suspend(dispatch_lane_t dq) +{ + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = DISPATCH_QUEUE_SUSPEND_INTERVAL; + if (unlikely(os_add_overflow(old_state, new_state, &new_state))) { + os_atomic_rmw_loop_give_up({ + return _dispatch_lane_suspend_slow(dq); + }); + } + }); + + if (!_dq_state_is_suspended(old_state)) { + // rdar://8181908 we need to extend the queue life for the duration + // of the call to wakeup at _dispatch_lane_resume() time. + _dispatch_retain_2(dq); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_lane_resume_slow(dispatch_lane_t dq) +{ + uint64_t old_state, new_state, delta; + + _dispatch_queue_sidelock_lock(dq); + + // what we want to transfer + delta = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL; + // but this is a resume so consume a suspend count at the same time + delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL; + switch (dq->dq_side_suspend_cnt) { + case 0: + goto retry; + case DISPATCH_QUEUE_SUSPEND_HALF: + // we will transition the side count to 0, so we want to clear this bit + delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; + break; + } + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + // unsigned overflow of the addition can happen because other + // threads could have touched this value while we were trying to acquire + // the lock, or because another thread raced us to do the same operation + // and got to the lock first. + if (unlikely(os_add_overflow(old_state, delta, &new_state))) { + os_atomic_rmw_loop_give_up(goto retry); + } + }); + dq->dq_side_suspend_cnt -= DISPATCH_QUEUE_SUSPEND_HALF; + return _dispatch_queue_sidelock_unlock(dq); + +retry: + _dispatch_queue_sidelock_unlock(dq); + return _dispatch_lane_resume(dq, DISPATCH_RESUME); +} + +DISPATCH_NOINLINE +static void +_dispatch_lane_resume_activate(dispatch_lane_t dq) +{ + if (dx_vtable(dq)->dq_activate) { + dx_vtable(dq)->dq_activate(dq); + } + + _dispatch_lane_resume(dq, DISPATCH_ACTIVATION_DONE); +} + +DISPATCH_NOINLINE +void +_dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) +{ + // covers all suspend and inactive bits, including side suspend bit + const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK; + uint64_t pending_barrier_width = + (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + uint64_t set_owner_and_set_full_width_and_in_barrier = + _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | + DISPATCH_QUEUE_IN_BARRIER; + + // backward compatibility: only dispatch sources can abuse + // dispatch_resume() to really mean dispatch_activate() + bool is_source = (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE); + uint64_t old_state, new_state; + + // + // Activation is a bit tricky as it needs to finalize before the wakeup. + // + // The inactive bits have 4 states: + // - 11: INACTIVE + // - 10: ACTIVATED, but not activating yet + // - 01: ACTIVATING right now + // - 00: fully active + // + // ACTIVATED is only used when the queue is otherwise also suspended. + // In that case the last resume will take over the activation. + // + // The ACTIVATING state is tricky because it may be cleared by sources + // firing, to avoid priority inversions problems such as rdar://45419440 + // where as soon as the kevent is installed, the source may fire + // before its activating state was cleared. + // + if (op == DISPATCH_ACTIVATE) { + // relaxed atomic because this doesn't publish anything, this is only + // about picking the thread that gets to finalize the activation + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + if (!_dq_state_is_inactive(old_state)) { + // object already active or activated + os_atomic_rmw_loop_give_up(return); + } + if (unlikely(_dq_state_suspend_cnt(old_state))) { + // { sc != 0, i = INACTIVE } -> i = ACTIVATED + new_state = old_state - DISPATCH_QUEUE_INACTIVE + + DISPATCH_QUEUE_ACTIVATED; + } else { + // { sc = 0, i = INACTIVE } -> i = ACTIVATING + new_state = old_state - DISPATCH_QUEUE_INACTIVE + + DISPATCH_QUEUE_ACTIVATING; + } + }); + } else if (op == DISPATCH_ACTIVATION_DONE) { + // release barrier needed to publish the effect of dq_activate() + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + if (unlikely(!(old_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK))) { + os_atomic_rmw_loop_give_up({ + // object activation was already concurrently done + // due to a concurrent DISPATCH_WAKEUP_CLEAR_ACTIVATING + // wakeup call. + // + // We still need to consume the internal refcounts because + // the wakeup doesn't take care of these. + return _dispatch_release_2_tailcall(dq); + }); + } + + new_state = old_state - DISPATCH_QUEUE_ACTIVATING; + if (!_dq_state_is_runnable(new_state)) { + // Out of width or still suspended. + // For the former, force _dispatch_lane_non_barrier_complete + // to reconsider whether it has work to do + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (_dq_state_drain_locked(new_state)) { + // still locked by someone else, make drain_try_unlock() fail + // and reconsider whether it has work to do + new_state |= DISPATCH_QUEUE_DIRTY; + } else { + // clear overrides and force a wakeup + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + } + }); + if (unlikely(new_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK)) { + DISPATCH_CLIENT_CRASH(dq, "Corrupt activation state"); + } + } else { + // release barrier needed to publish the effect of + // - dispatch_set_target_queue() + // - dispatch_set_*_handler() + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state; + if (is_source && (old_state & suspend_bits) == + DISPATCH_QUEUE_INACTIVE) { + // { sc = 0, i = INACTIVE } -> i = ACTIVATING + new_state -= DISPATCH_QUEUE_INACTIVE; + new_state += DISPATCH_QUEUE_ACTIVATING; + } else if (unlikely(os_sub_overflow(old_state, + DISPATCH_QUEUE_SUSPEND_INTERVAL, &new_state))) { + // underflow means over-resume or a suspend count transfer + // to the side count is needed + os_atomic_rmw_loop_give_up({ + if (!(old_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT)) { + goto over_resume; + } + return _dispatch_lane_resume_slow(dq); + }); + // + // below this, new_state = old_state - DISPATCH_QUEUE_SUSPEND_INTERVAL + // + } else if (_dq_state_is_activated(new_state)) { + // { sc = 1, i = ACTIVATED } -> i = ACTIVATING + new_state -= DISPATCH_QUEUE_ACTIVATED; + new_state += DISPATCH_QUEUE_ACTIVATING; + } else if (!_dq_state_is_runnable(new_state)) { + // Out of width or still suspended. + // For the former, force _dispatch_lane_non_barrier_complete + // to reconsider whether it has work to do + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (_dq_state_drain_locked(new_state)) { + // still locked by someone else, make drain_try_unlock() fail + // and reconsider whether it has work to do + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (!is_source && (_dq_state_has_pending_barrier(new_state) || + new_state + pending_barrier_width < + DISPATCH_QUEUE_WIDTH_FULL_BIT)) { + // if we can, acquire the full width drain lock + // and then perform a lock transfer + // + // However this is never useful for a source where there are no + // sync waiters, so never take the lock and do a plain wakeup + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state |= set_owner_and_set_full_width_and_in_barrier; + } else { + // clear overrides and force a wakeup + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + } + }); + } + + if (_dq_state_is_activating(new_state)) { + return _dispatch_lane_resume_activate(dq); + } + + if (_dq_state_is_suspended(new_state)) { + return; + } + + if (_dq_state_is_dirty(old_state)) { + // + // dependency ordering for dq state changes that were flushed + // and not acted upon + os_atomic_thread_fence(dependency); + dq = os_atomic_inject_dependency(dq, (unsigned long)old_state); + } + // Balancing the retain_2 done in suspend() for rdar://8181908 + dispatch_wakeup_flags_t flags = DISPATCH_WAKEUP_CONSUME_2; + if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE; + } else if (!_dq_state_is_runnable(new_state)) { + if (_dq_state_is_base_wlh(old_state)) { + _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); + } + return _dispatch_release_2(dq); + } + dispatch_assert(!_dq_state_received_sync_wait(old_state)); + dispatch_assert(!_dq_state_in_sync_transfer(old_state)); + return dx_wakeup(dq, _dq_state_max_qos(old_state), flags); + +over_resume: + if (unlikely(_dq_state_is_inactive(old_state))) { + DISPATCH_CLIENT_CRASH(dq, "Over-resume of an inactive object"); + } + DISPATCH_CLIENT_CRASH(dq, "Over-resume of an object"); +} + +const char * +dispatch_queue_get_label(dispatch_queue_t dq) +{ + if (unlikely(dq == DISPATCH_CURRENT_QUEUE_LABEL)) { + dq = _dispatch_queue_get_current_or_default(); + } + return dq->dq_label ? dq->dq_label : ""; +} + +qos_class_t +dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relpri_ptr) +{ + dispatch_priority_t pri = dq->dq_priority; + dispatch_qos_t qos = _dispatch_priority_qos(pri); + if (relpri_ptr) { + *relpri_ptr = qos ? _dispatch_priority_relpri(dq->dq_priority) : 0; + } + return _dispatch_qos_to_qos_class(qos); +} + +static void +_dispatch_lane_set_width(void *ctxt) +{ + int w = (int)(intptr_t)ctxt; // intentional truncation + uint32_t tmp; + dispatch_lane_t dq = upcast(_dispatch_queue_get_current())._dl; + + if (w >= 0) { + tmp = w ? (unsigned int)w : 1; + } else { + dispatch_qos_t qos = _dispatch_qos_from_pp(_dispatch_get_priority()); + switch (w) { + case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: + tmp = _dispatch_qos_max_parallelism(qos, + DISPATCH_MAX_PARALLELISM_PHYSICAL); + break; + case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS: + tmp = _dispatch_qos_max_parallelism(qos, + DISPATCH_MAX_PARALLELISM_ACTIVE); + break; + case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS: + default: + tmp = _dispatch_qos_max_parallelism(qos, 0); + break; + } + } + if (tmp > DISPATCH_QUEUE_WIDTH_MAX) { + tmp = DISPATCH_QUEUE_WIDTH_MAX; + } + + dispatch_queue_flags_t old_dqf, new_dqf; + os_atomic_rmw_loop2o(dq, dq_atomic_flags, old_dqf, new_dqf, relaxed, { + new_dqf = (old_dqf & DQF_FLAGS_MASK) | DQF_WIDTH(tmp); + }); + _dispatch_lane_inherit_wlh_from_target(dq, dq->do_targetq); + _dispatch_object_debug(dq, "%s", __func__); +} + +void +dispatch_queue_set_width(dispatch_queue_t dq, long width) +{ + unsigned long type = dx_type(dq); + if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) { + DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); + } else if (unlikely(type != DISPATCH_QUEUE_CONCURRENT_TYPE)) { + DISPATCH_CLIENT_CRASH(type, "Cannot set width of a serial queue"); + } + + if (likely((int)width >= 0)) { + dispatch_lane_t dl = upcast(dq)._dl; + _dispatch_barrier_trysync_or_async_f(dl, (void*)(intptr_t)width, + _dispatch_lane_set_width, DISPATCH_BARRIER_TRYSYNC_SUSPEND); + } else { + // The negative width constants need to execute on the queue to + // query the queue QoS + _dispatch_barrier_async_detached_f(dq, (void*)(intptr_t)width, + _dispatch_lane_set_width); + } +} + +static void +_dispatch_lane_legacy_set_target_queue(void *ctxt) +{ + dispatch_lane_t dq = upcast(_dispatch_queue_get_current())._dl; + dispatch_queue_t tq = ctxt; + dispatch_queue_t otq = dq->do_targetq; + + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { +#if DISPATCH_ALLOW_NON_LEAF_RETARGET + _dispatch_ktrace3(DISPATCH_PERF_non_leaf_retarget, dq, otq, tq); + _dispatch_bug_deprecated("Changing the target of a queue " + "already targeted by other dispatch objects"); +#else + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " + "already targeted by other dispatch objects"); +#endif + } + + tq = _dispatch_queue_priority_inherit_from_target(dq, tq); + _dispatch_lane_inherit_wlh_from_target(dq, tq); +#if HAVE_PTHREAD_WORKQUEUE_QOS + // see _dispatch_queue_wakeup() + _dispatch_queue_sidelock_lock(dq); +#endif + if (unlikely(!_dispatch_queue_is_mutable(dq))) { + /* serialize with _dispatch_mach_handoff_set_wlh */ + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of this object " + "after it has been activated"); + } + dq->do_targetq = tq; +#if HAVE_PTHREAD_WORKQUEUE_QOS + // see _dispatch_queue_wakeup() + _dispatch_queue_sidelock_unlock(dq); +#endif + + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_introspection_target_queue_changed(dq->_as_dq); + _dispatch_release_tailcall(otq); +} + +void +_dispatch_lane_set_target_queue(dispatch_lane_t dq, dispatch_queue_t tq) +{ + if (tq == DISPATCH_TARGET_QUEUE_DEFAULT) { + bool overcommit = (dq->dq_width == 1); + tq = _dispatch_get_default_queue(overcommit); + } + + if (_dispatch_lane_try_inactive_suspend(dq)) { + _dispatch_object_set_target_queue_inline(dq, tq); + return _dispatch_lane_resume(dq, DISPATCH_RESUME); + } + +#if !DISPATCH_ALLOW_NON_LEAF_RETARGET + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " + "already targeted by other dispatch objects"); + } +#endif + + if (unlikely(!_dispatch_queue_is_mutable(dq))) { +#if DISPATCH_ALLOW_NON_LEAF_RETARGET + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " + "already targeted by other dispatch objects"); + } +#endif + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of this object " + "after it has been activated"); + } + + unsigned long metatype = dx_metatype(dq); + switch (metatype) { + case _DISPATCH_LANE_TYPE: +#if DISPATCH_ALLOW_NON_LEAF_RETARGET + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + _dispatch_bug_deprecated("Changing the target of a queue " + "already targeted by other dispatch objects"); + } +#endif + break; + case _DISPATCH_SOURCE_TYPE: + _dispatch_ktrace1(DISPATCH_PERF_post_activate_retarget, dq); + _dispatch_bug_deprecated("Changing the target of a source " + "after it has been activated"); + break; + default: + DISPATCH_CLIENT_CRASH(metatype, "Unexpected dispatch object type"); + } + + _dispatch_retain(tq); + return _dispatch_barrier_trysync_or_async_f(dq, tq, + _dispatch_lane_legacy_set_target_queue, + DISPATCH_BARRIER_TRYSYNC_SUSPEND); +} + +#pragma mark - +#pragma mark _dispatch_queue_debug + +size_t +_dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) +{ + size_t offset = 0; + dispatch_queue_t target = dq->do_targetq; + const char *tlabel = target && target->dq_label ? target->dq_label : ""; + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + + offset += dsnprintf(&buf[offset], bufsiz - offset, "sref = %d, " + "target = %s[%p], width = 0x%x, state = 0x%016llx", + dq->dq_sref_cnt + 1, tlabel, target, dq->dq_width, + (unsigned long long)dq_state); + if (_dq_state_is_suspended(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", suspended = %d", + _dq_state_suspend_cnt(dq_state)); + } + if (_dq_state_is_inactive(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", inactive"); + } else if (_dq_state_is_activated(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", activated"); + } else if (_dq_state_is_activating(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", activating"); + } + if (_dq_state_is_enqueued(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", enqueued"); + } + if (_dq_state_is_dirty(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", dirty"); + } + dispatch_qos_t qos = _dq_state_max_qos(dq_state); + if (qos) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", max qos %d", qos); + } + mach_port_t owner = _dq_state_drain_owner(dq_state); + if (!_dispatch_queue_is_thread_bound(dq) && owner) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", draining on 0x%x", + owner); + } + if (_dq_state_is_in_barrier(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", in-barrier"); + } else { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", in-flight = %d", + _dq_state_used_width(dq_state, dq->dq_width)); + } + if (_dq_state_has_pending_barrier(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", pending-barrier"); + } + if (_dispatch_queue_is_thread_bound(dq)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", thread = 0x%x ", + owner); + } + return offset; +} + +size_t +_dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dq->dq_label ? dq->dq_label : _dispatch_object_class_name(dq), dq); + offset += _dispatch_object_debug_attr(dq, &buf[offset], bufsiz - offset); + offset += _dispatch_queue_debug_attr(dq, &buf[offset], bufsiz - offset); + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); + return offset; +} + +#if DISPATCH_PERF_MON + +#define DISPATCH_PERF_MON_BUCKETS 8 + +static struct { + uint64_t volatile time_total; + uint64_t volatile count_total; + uint64_t volatile thread_total; +} _dispatch_stats[DISPATCH_PERF_MON_BUCKETS] DISPATCH_ATOMIC64_ALIGN; +DISPATCH_USED static size_t _dispatch_stat_buckets = DISPATCH_PERF_MON_BUCKETS; + +void +_dispatch_queue_merge_stats(uint64_t start, bool trace, perfmon_thread_type type) +{ + uint64_t delta = _dispatch_uptime() - start; + unsigned long count; + int bucket = 0; + count = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); + _dispatch_thread_setspecific(dispatch_bcounter_key, NULL); + if (count == 0) { + bucket = 0; + if (trace) _dispatch_ktrace1(DISPATCH_PERF_MON_worker_useless, type); + } else { + bucket = MIN(DISPATCH_PERF_MON_BUCKETS - 1, + (int)sizeof(count) * CHAR_BIT - __builtin_clzl(count)); + os_atomic_add(&_dispatch_stats[bucket].count_total, count, relaxed); + } + os_atomic_add(&_dispatch_stats[bucket].time_total, delta, relaxed); + os_atomic_inc(&_dispatch_stats[bucket].thread_total, relaxed); + if (trace) { + _dispatch_ktrace3(DISPATCH_PERF_MON_worker_thread_end, count, delta, type); + } +} + +#endif + +#pragma mark - +#pragma mark dispatch queue/lane drain & invoke + +DISPATCH_NOINLINE +static void +_dispatch_return_to_kernel(void) +{ +#if DISPATCH_USE_KEVENT_WORKQUEUE + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (likely(ddi && ddi->ddi_wlh != DISPATCH_WLH_ANON)) { + dispatch_assert(ddi->ddi_wlh_servicing); + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + } else { + _dispatch_clear_return_to_kernel(); + } +#endif +} + +void +_dispatch_poll_for_events_4launchd(void) +{ + _dispatch_return_to_kernel(); +} + +#if DISPATCH_USE_WORKQUEUE_NARROWING +DISPATCH_STATIC_GLOBAL(os_atomic(uint64_t) +_dispatch_narrowing_deadlines[DISPATCH_QOS_NBUCKETS]); +#if !DISPATCH_TIME_UNIT_USES_NANOSECONDS +DISPATCH_STATIC_GLOBAL(uint64_t _dispatch_narrow_check_interval_cache); +#endif + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_narrow_check_interval(void) +{ +#if DISPATCH_TIME_UNIT_USES_NANOSECONDS + return 50 * NSEC_PER_MSEC; +#else + if (_dispatch_narrow_check_interval_cache == 0) { + _dispatch_narrow_check_interval_cache = + _dispatch_time_nano2mach(50 * NSEC_PER_MSEC); + } + return _dispatch_narrow_check_interval_cache; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_drain_init_narrowing_check_deadline(dispatch_invoke_context_t dic, + dispatch_priority_t pri) +{ + if (!(pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)) { + dic->dic_next_narrow_check = _dispatch_approximate_time() + + _dispatch_narrow_check_interval(); + } +} + +DISPATCH_NOINLINE +static bool +_dispatch_queue_drain_should_narrow_slow(uint64_t now, + dispatch_invoke_context_t dic) +{ + if (dic->dic_next_narrow_check != DISPATCH_THREAD_IS_NARROWING) { + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_qos_t qos = _dispatch_qos_from_pp(pp); + if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) { + DISPATCH_CLIENT_CRASH(pp, "Thread QoS corruption"); + } + size_t idx = DISPATCH_QOS_BUCKET(qos); + os_atomic(uint64_t) *deadline = &_dispatch_narrowing_deadlines[idx]; + uint64_t oldval, newval = now + _dispatch_narrow_check_interval(); + + dic->dic_next_narrow_check = newval; + os_atomic_rmw_loop(deadline, oldval, newval, relaxed, { + if (now < oldval) { + os_atomic_rmw_loop_give_up(return false); + } + }); + + if (!_pthread_workqueue_should_narrow(pp)) { + return false; + } + dic->dic_next_narrow_check = DISPATCH_THREAD_IS_NARROWING; + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_drain_should_narrow(dispatch_invoke_context_t dic) +{ + uint64_t next_check = dic->dic_next_narrow_check; + if (unlikely(next_check)) { + uint64_t now = _dispatch_approximate_time(); + if (unlikely(next_check < now)) { + return _dispatch_queue_drain_should_narrow_slow(now, dic); + } + } + return false; +} +#else +#define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) +#define _dispatch_queue_drain_should_narrow(dic) false +#endif + +/* + * Drain comes in 2 flavours (serial/concurrent) and 2 modes + * (redirecting or not). + * + * Serial + * ~~~~~~ + * Serial drain is about serial queues (width == 1). It doesn't support + * the redirecting mode, which doesn't make sense, and treats all continuations + * as barriers. Bookkeeping is minimal in serial flavour, most of the loop + * is optimized away. + * + * Serial drain stops if the width of the queue grows to larger than 1. + * Going through a serial drain prevents any recursive drain from being + * redirecting. + * + * Concurrent + * ~~~~~~~~~~ + * When in non-redirecting mode (meaning one of the target queues is serial), + * non-barriers and barriers alike run in the context of the drain thread. + * Slow non-barrier items are still all signaled so that they can make progress + * toward the dispatch_sync() that will serialize them all . + * + * In redirecting mode, non-barrier work items are redirected downward. + * + * Concurrent drain stops if the width of the queue becomes 1, so that the + * queue drain moves to the more efficient serial mode. + */ +DISPATCH_ALWAYS_INLINE +static dispatch_queue_wakeup_target_t +_dispatch_lane_drain(dispatch_lane_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned_ptr, bool serial_drain) +{ + dispatch_queue_t orig_tq = dq->do_targetq; + dispatch_thread_frame_s dtf; + struct dispatch_object_s *dc = NULL, *next_dc; + uint64_t dq_state, owned = *owned_ptr; + + if (unlikely(!dq->dq_items_tail)) return NULL; + + _dispatch_thread_frame_push(&dtf, dq); + if (serial_drain || _dq_state_is_in_barrier(owned)) { + // we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL` + // but width can change while draining barrier work items, so we only + // convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER` + owned = DISPATCH_QUEUE_IN_BARRIER; + } else { + owned &= DISPATCH_QUEUE_WIDTH_MASK; + } + + dc = _dispatch_queue_get_head(dq); + goto first_iteration; + + for (;;) { + dispatch_assert(dic->dic_barrier_waiter == NULL); + dc = next_dc; + if (unlikely(!dc)) { + if (!dq->dq_items_tail) { + break; + } + dc = _dispatch_queue_get_head(dq); + } + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } + if (unlikely(serial_drain != (dq->dq_width == 1))) { + break; + } + if (unlikely(_dispatch_queue_drain_should_narrow(dic))) { + break; + } + if (likely(flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { + dispatch_workloop_t dwl = (dispatch_workloop_t)_dispatch_get_wlh(); + if (unlikely(_dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos)) { + break; + } + } + +first_iteration: + dq_state = os_atomic_load(&dq->dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + break; + } + if (unlikely(orig_tq != dq->do_targetq)) { + break; + } + + if (serial_drain || _dispatch_object_is_barrier(dc)) { + if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) { + if (!_dispatch_queue_try_upgrade_full_width(dq, owned)) { + goto out_with_no_width; + } + owned = DISPATCH_QUEUE_IN_BARRIER; + } + if (_dispatch_object_is_sync_waiter(dc) && + !(flags & DISPATCH_INVOKE_THREAD_BOUND)) { + dic->dic_barrier_waiter = dc; + goto out_with_barrier_waiter; + } + next_dc = _dispatch_queue_pop_head(dq, dc); + } else { + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // we just ran barrier work items, we have to make their + // effect visible to other sync work items on other threads + // that may start coming in after this point, hence the + // release barrier + os_atomic_xor2o(dq, dq_state, owned, release); + owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } else if (unlikely(owned == 0)) { + if (_dispatch_object_is_waiter(dc)) { + // sync "readers" don't observe the limit + _dispatch_queue_reserve_sync_width(dq); + } else if (!_dispatch_queue_try_acquire_async(dq)) { + goto out_with_no_width; + } + owned = DISPATCH_QUEUE_WIDTH_INTERVAL; + } + + next_dc = _dispatch_queue_pop_head(dq, dc); + if (_dispatch_object_is_waiter(dc)) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_non_barrier_waiter_redirect_or_wake(dq, dc); + continue; + } + + if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + // This is a re-redirect, overrides have already been applied by + // _dispatch_continuation_async* + // However we want to end up on the root queue matching `dc` + // qos, so pick up the current override of `dq` which includes + // dc's override (and maybe more) + _dispatch_continuation_redirect_push(dq, dc, + _dispatch_queue_max_qos(dq)); + continue; + } + } + + _dispatch_continuation_pop_inline(dc, dic, flags, dq); + } + + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // if we're IN_BARRIER we really own the full width too + owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } + if (dc) { + owned = _dispatch_queue_adjust_owned(dq, owned, dc); + } + *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; + *owned_ptr |= owned; + _dispatch_thread_frame_pop(&dtf); + return dc ? dq->do_targetq : NULL; + +out_with_no_width: + *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; + _dispatch_thread_frame_pop(&dtf); + return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + +out_with_barrier_waiter: + if (unlikely(flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS)) { + DISPATCH_INTERNAL_CRASH(0, + "Deferred continuation on source, mach channel or mgr"); + } + _dispatch_thread_frame_pop(&dtf); + return dq->do_targetq; +} + +DISPATCH_NOINLINE +static dispatch_queue_wakeup_target_t +_dispatch_lane_concurrent_drain(dispatch_lane_class_t dqu, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned) +{ + return _dispatch_lane_drain(dqu._dl, dic, flags, owned, false); +} + +DISPATCH_NOINLINE +dispatch_queue_wakeup_target_t +_dispatch_lane_serial_drain(dispatch_lane_class_t dqu, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned) +{ + flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; + return _dispatch_lane_drain(dqu._dl, dic, flags, owned, true); +} + +void +_dispatch_queue_invoke_finish(dispatch_queue_t dq, + dispatch_invoke_context_t dic, dispatch_queue_t tq, uint64_t owned) +{ + struct dispatch_object_s *dc = dic->dic_barrier_waiter; + dispatch_qos_t qos = dic->dic_barrier_waiter_bucket; + if (dc) { + dic->dic_barrier_waiter = NULL; + dic->dic_barrier_waiter_bucket = DISPATCH_QOS_UNSPECIFIED; + owned &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; +#if DISPATCH_INTROSPECTION + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; + dsc->dsc_from_async = true; +#endif + if (qos) { + return _dispatch_workloop_drain_barrier_waiter(upcast(dq)._dwl, + dc, qos, DISPATCH_WAKEUP_CONSUME_2, owned); + } + return _dispatch_lane_drain_barrier_waiter(upcast(dq)._dl, dc, + DISPATCH_WAKEUP_CONSUME_2, owned); + } + + uint64_t old_state, new_state, enqueued = DISPATCH_QUEUE_ENQUEUED; + if (tq == DISPATCH_QUEUE_WAKEUP_MGR) { + enqueued = DISPATCH_QUEUE_ENQUEUED_ON_MGR; + } + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - owned; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state |= DISPATCH_QUEUE_DIRTY; + if (_dq_state_is_runnable(new_state) && + !_dq_state_is_enqueued(new_state)) { + // drain was not interupted for suspension + // we will reenqueue right away, just put ENQUEUED back + new_state |= enqueued; + } + }); + old_state -= owned; + if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(new_state)); + } + if ((old_state ^ new_state) & enqueued) { + dispatch_assert(_dq_state_is_enqueued(new_state)); + return _dispatch_queue_push_queue(tq, dq, new_state); + } + return _dispatch_release_2_tailcall(dq); +} + +void +_dispatch_lane_activate(dispatch_lane_class_t dq) +{ + dispatch_queue_t tq = dq._dl->do_targetq; + dispatch_priority_t pri = dq._dl->dq_priority; + + // Normalize priority: keep the fallback only when higher than the floor + if (_dispatch_priority_fallback_qos(pri) <= _dispatch_priority_qos(pri) || + (_dispatch_priority_qos(pri) && + !(pri & DISPATCH_PRIORITY_FLAG_FLOOR))) { + pri &= ~DISPATCH_PRIORITY_FALLBACK_QOS_MASK; + pri &= ~DISPATCH_PRIORITY_FLAG_FALLBACK; + dq._dl->dq_priority = pri; + } + tq = _dispatch_queue_priority_inherit_from_target(dq, tq); + _dispatch_lane_inherit_wlh_from_target(dq._dl, tq); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_wakeup_target_t +_dispatch_lane_invoke2(dispatch_lane_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned) +{ + dispatch_queue_t otq = dq->do_targetq; + dispatch_queue_t cq = _dispatch_queue_get_current(); + + if (unlikely(cq != otq)) { + return otq; + } + if (dq->dq_width == 1) { + return _dispatch_lane_serial_drain(dq, dic, flags, owned); + } + return _dispatch_lane_concurrent_drain(dq, dic, flags, owned); +} + +DISPATCH_NOINLINE +void +_dispatch_lane_invoke(dispatch_lane_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) +{ + _dispatch_queue_class_invoke(dq, dic, flags, 0, _dispatch_lane_invoke2); +} + +#pragma mark - +#pragma mark dispatch_workloop_t + +#define _dispatch_wl(dwl, qos) os_mpsc(dwl, dwl, s[DISPATCH_QOS_BUCKET(qos)]) +#define _dispatch_workloop_looks_empty(dwl, qos) \ + os_mpsc_looks_empty(_dispatch_wl(dwl, qos)) +#define _dispatch_workloop_get_head(dwl, qos) \ + os_mpsc_get_head(_dispatch_wl(dwl, qos)) +#define _dispatch_workloop_pop_head(dwl, qos, dc) \ + os_mpsc_pop_head(_dispatch_wl(dwl, qos), dc, do_next) +#define _dispatch_workloop_push_update_tail(dwl, qos, dou) \ + os_mpsc_push_update_tail(_dispatch_wl(dwl, qos), dou, do_next) +#define _dispatch_workloop_push_update_prev(dwl, qos, prev, dou) \ + os_mpsc_push_update_prev(_dispatch_wl(dwl, qos), prev, dou, do_next) + +dispatch_workloop_t +dispatch_workloop_copy_current(void) +{ + dispatch_workloop_t dwl = _dispatch_wlh_to_workloop(_dispatch_get_wlh()); + if (likely(dwl)) { + _os_object_retain_with_resurrect(dwl->_as_os_obj); + return dwl; + } + return NULL; +} + +bool +dispatch_workloop_is_current(dispatch_workloop_t dwl) +{ + return _dispatch_get_wlh() == (dispatch_wlh_t)dwl; +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_workloop_role_bits(void) +{ +#if DISPATCH_USE_KEVENT_WORKLOOP + if (likely(_dispatch_kevent_workqueue_enabled)) { + return DISPATCH_QUEUE_ROLE_BASE_WLH; + } +#endif + return DISPATCH_QUEUE_ROLE_BASE_ANON; +} + +bool +_dispatch_workloop_should_yield_4NW(void) +{ + dispatch_workloop_t dwl = _dispatch_wlh_to_workloop(_dispatch_get_wlh()); + if (likely(dwl)) { + return _dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos; + } + return false; +} + +DISPATCH_NOINLINE +static dispatch_workloop_t +_dispatch_workloop_create(const char *label, uint64_t dq_state) +{ + dispatch_queue_flags_t dqf = DQF_AUTORELEASE_ALWAYS; + dispatch_workloop_t dwl; + + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } + } + + dq_state |= _dispatch_workloop_role_bits(); + + dwl = _dispatch_queue_alloc(workloop, dqf, 1, dq_state)._dwl; + dwl->dq_label = label; + dwl->do_targetq = _dispatch_get_default_queue(true); + if (!(dq_state & DISPATCH_QUEUE_INACTIVE)) { + dwl->dq_priority = DISPATCH_PRIORITY_FLAG_OVERCOMMIT | + _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT); + } + _dispatch_object_debug(dwl, "%s", __func__); + return _dispatch_introspection_queue_create(dwl)._dwl; +} + +dispatch_workloop_t +dispatch_workloop_create(const char *label) +{ + return _dispatch_workloop_create(label, 0); +} + +dispatch_workloop_t +dispatch_workloop_create_inactive(const char *label) +{ + return _dispatch_workloop_create(label, DISPATCH_QUEUE_INACTIVE); +} + +void +dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t dwl, + dispatch_autorelease_frequency_t frequency) +{ + if (frequency == DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM) { + _dispatch_queue_atomic_flags_set_and_clear(dwl, + DQF_AUTORELEASE_ALWAYS, DQF_AUTORELEASE_NEVER); + } else { + _dispatch_queue_atomic_flags_set_and_clear(dwl, + DQF_AUTORELEASE_NEVER, DQF_AUTORELEASE_ALWAYS); + } + _dispatch_queue_setter_assert_inactive(dwl); +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_workloop_attributes_dispose(dispatch_workloop_t dwl) +{ + if (dwl->dwl_attr) { + free(dwl->dwl_attr); + } +} + +DISPATCH_ALWAYS_INLINE +static bool +_dispatch_workloop_has_kernel_attributes(dispatch_workloop_t dwl) +{ + return dwl->dwl_attr && (dwl->dwl_attr->dwla_flags & + (DISPATCH_WORKLOOP_ATTR_HAS_SCHED | + DISPATCH_WORKLOOP_ATTR_HAS_POLICY | + DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT)); +} + +void +dispatch_workloop_set_scheduler_priority(dispatch_workloop_t dwl, int priority, + uint64_t flags) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + if (priority) { + dwl->dwl_attr->dwla_sched.sched_priority = priority; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_SCHED; + } else { + dwl->dwl_attr->dwla_sched.sched_priority = 0; + dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_SCHED; + } + + if (flags & DISPATCH_WORKLOOP_FIXED_PRIORITY) { + dwl->dwl_attr->dwla_policy = POLICY_RR; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_POLICY; + } else { + dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; + } +} + +void +dispatch_workloop_set_qos_class_floor(dispatch_workloop_t dwl, + qos_class_t cls, int relpri, uint64_t flags) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); + + if (qos) { + dwl->dwl_attr->dwla_pri = _dispatch_priority_make(qos, relpri); + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS; + } else { + dwl->dwl_attr->dwla_pri = 0; + dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS; + } + + if (flags & DISPATCH_WORKLOOP_FIXED_PRIORITY) { + dwl->dwl_attr->dwla_policy = POLICY_RR; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_POLICY; + } else { + dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; + } +} + +void +dispatch_workloop_set_qos_class(dispatch_workloop_t dwl, + qos_class_t cls, uint64_t flags) +{ + dispatch_workloop_set_qos_class_floor(dwl, cls, 0, flags); +} + +void +dispatch_workloop_set_cpupercent(dispatch_workloop_t dwl, uint8_t percent, + uint32_t refillms) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + if ((dwl->dwl_attr->dwla_flags & (DISPATCH_WORKLOOP_ATTR_HAS_SCHED | + DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS)) == 0) { + DISPATCH_CLIENT_CRASH(0, "workloop qos class or priority must be " + "set before cpupercent"); + } + + dwl->dwl_attr->dwla_cpupercent.percent = percent; + dwl->dwl_attr->dwla_cpupercent.refillms = refillms; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT; +} + +#if DISPATCH_IOHID_SPI +void +_dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t dwl, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + dwl->dwl_attr->dwla_observers = *observer_hooks; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS; +} +#endif + +static void +_dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, + pthread_attr_t *attr) +{ + uint64_t old_state, new_state; + dispatch_queue_global_t dprq; + + dprq = dispatch_pthread_root_queue_create( + "com.apple.libdispatch.workloop_fallback", 0, attr, NULL); + + dwl->do_targetq = dprq->_as_dq; + _dispatch_retain(dprq); + dispatch_release(dprq); + + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, relaxed, { + new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK; + new_state |= DISPATCH_QUEUE_ROLE_BASE_ANON; + }); +} + +static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { + DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, + .do_ctxt = NULL, + .dq_label = "com.apple.root.workloop-custom", + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), + .dq_priority = _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT) | + DISPATCH_PRIORITY_SATURATED_OVERRIDE, + .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, + .dgq_thread_pool_size = 1, +}; + +static void +_dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) +{ + dispatch_workloop_attr_t dwla = dwl->dwl_attr; + pthread_attr_t attr; + + pthread_attr_init(&attr); + if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS) { + dwl->dq_priority |= dwla->dwla_pri | DISPATCH_PRIORITY_FLAG_FLOOR; + } + if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_SCHED) { + pthread_attr_setschedparam(&attr, &dwla->dwla_sched); + // _dispatch_async_and_wait_should_always_async detects when a queue + // targets a root queue that is not part of the root queues array in + // order to force async_and_wait to async. We want this path to always + // be taken on workloops that have a scheduler priority set. + dwl->do_targetq = + (dispatch_queue_t)_dispatch_custom_workloop_root_queue._as_dq; + } + if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_POLICY) { + pthread_attr_setschedpolicy(&attr, dwla->dwla_policy); + } + if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT) { + pthread_attr_setcpupercent_np(&attr, dwla->dwla_cpupercent.percent, + (unsigned long)dwla->dwla_cpupercent.refillms); + } + if (_dispatch_workloop_has_kernel_attributes(dwl)) { + int rv = _pthread_workloop_create((uint64_t)dwl, 0, &attr); + switch (rv) { + case 0: + dwla->dwla_flags |= DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY; + break; + case ENOTSUP: + /* simulator fallback */ + _dispatch_workloop_activate_simulator_fallback(dwl, &attr); + break; + default: + dispatch_assert_zero(rv); + } + } + pthread_attr_destroy(&attr); +} + +void +_dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) +{ + uint64_t dq_state = os_atomic_load2o(dwl, dq_state, relaxed); + uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1); + + initial_state |= _dispatch_workloop_role_bits(); + + if (unlikely(dq_state != initial_state)) { + if (_dq_state_drain_locked(dq_state)) { + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "Release of a locked workloop"); + } +#ifndef __LP64__ + dq_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "Release of a workloop with corrupt state"); + } + + _dispatch_object_debug(dwl, "%s", __func__); + _dispatch_introspection_queue_dispose(dwl); + + for (size_t i = 0; i < countof(dwl->dwl_tails); i++) { + if (unlikely(dwl->dwl_tails[i])) { + DISPATCH_CLIENT_CRASH(dwl->dwl_tails[i], + "Release of a workloop while items are enqueued"); + } + // trash the queue so that use after free will crash + dwl->dwl_tails[i] = (void *)0x200; + dwl->dwl_heads[i] = (void *)0x200; + } + + if (dwl->dwl_timer_heap) { + for (size_t i = 0; i < DISPATCH_TIMER_WLH_COUNT; i++) { + dispatch_assert(dwl->dwl_timer_heap[i].dth_count == 0); + } + free(dwl->dwl_timer_heap); + dwl->dwl_timer_heap = NULL; + } + + if (dwl->dwl_attr && (dwl->dwl_attr->dwla_flags & + DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY)) { + (void)dispatch_assume_zero(_pthread_workloop_destroy((uint64_t)dwl)); + } + _dispatch_workloop_attributes_dispose(dwl); + _dispatch_queue_dispose(dwl, allow_free); +} + +void +_dispatch_workloop_activate(dispatch_workloop_t dwl) +{ + // This transitions either: + // - from INACTIVE to ACTIVATING + // - or from ACTIVE to ACTIVE + uint64_t old_state = os_atomic_and_orig2o(dwl, dq_state, + ~DISPATCH_QUEUE_ACTIVATED, relaxed); + + if (likely(_dq_state_is_inactive(old_state))) { + if (dwl->dwl_attr) { + // Activation of a workloop with attributes forces us to create + // the workloop up front and register the attributes with the + // kernel. + _dispatch_workloop_activate_attributes(dwl); + } + if (!dwl->dq_priority) { + dwl->dq_priority = + _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT); + } + dwl->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + os_atomic_and2o(dwl, dq_state, ~DISPATCH_QUEUE_ACTIVATING, relaxed); + return _dispatch_workloop_wakeup(dwl, 0, DISPATCH_WAKEUP_CONSUME_2); + } +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_workloop_try_lower_max_qos(dispatch_workloop_t dwl, + dispatch_qos_t qos) +{ + uint64_t old_state, new_state, qos_bits = _dq_state_from_qos(qos); + + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, relaxed, { + if ((old_state & DISPATCH_QUEUE_MAX_QOS_MASK) <= qos_bits) { + os_atomic_rmw_loop_give_up(return true); + } + + if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + os_atomic_xor2o(dwl, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + return false; + }); + } + + new_state = old_state; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + new_state |= qos_bits; + }); + + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (likely(ddi)) { + ddi->ddi_wlh_needs_update = true; + _dispatch_return_to_kernel(); + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_wakeup_target_t +_dispatch_workloop_invoke2(dispatch_workloop_t dwl, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned) +{ + dispatch_workloop_attr_t dwl_attr = dwl->dwl_attr; + dispatch_thread_frame_s dtf; + struct dispatch_object_s *dc = NULL, *next_dc; + + if (dwl_attr && + (dwl_attr->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS)) { + _dispatch_set_pthread_root_queue_observer_hooks( + &dwl_attr->dwla_observers); + } + _dispatch_thread_frame_push(&dtf, dwl); + + for (;;) { + dispatch_qos_t qos; + for (qos = DISPATCH_QOS_MAX; qos >= DISPATCH_QOS_MIN; qos--) { + if (!_dispatch_workloop_looks_empty(dwl, qos)) break; + } + if (qos < DISPATCH_QOS_MIN) { + break; + } + if (unlikely(!_dispatch_workloop_try_lower_max_qos(dwl, qos))) { + continue; + } + dwl->dwl_drained_qos = (uint8_t)qos; + + dc = _dispatch_workloop_get_head(dwl, qos); + do { + if (_dispatch_object_is_sync_waiter(dc)) { + dic->dic_barrier_waiter_bucket = qos; + dic->dic_barrier_waiter = dc; + dwl->dwl_drained_qos = DISPATCH_QOS_UNSPECIFIED; + goto out_with_barrier_waiter; + } + next_dc = _dispatch_workloop_pop_head(dwl, qos, dc); + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } + + _dispatch_continuation_pop_inline(dc, dic, flags, dwl); + qos = dwl->dwl_drained_qos; + } while ((dc = next_dc) && (_dispatch_queue_max_qos(dwl) <= qos)); + } + + *owned = (*owned & DISPATCH_QUEUE_ENQUEUED) + + DISPATCH_QUEUE_IN_BARRIER + DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_thread_frame_pop(&dtf); + _dispatch_set_pthread_root_queue_observer_hooks(NULL); + return NULL; + +out_with_barrier_waiter: + _dispatch_thread_frame_pop(&dtf); + _dispatch_set_pthread_root_queue_observer_hooks(NULL); + return dwl->do_targetq; +} + +void +_dispatch_workloop_invoke(dispatch_workloop_t dwl, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) +{ + flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; + flags |= DISPATCH_INVOKE_WORKLOOP_DRAIN; + _dispatch_queue_class_invoke(dwl, dic, flags, 0, _dispatch_workloop_invoke2); +} + +DISPATCH_ALWAYS_INLINE +static bool +_dispatch_workloop_probe(dispatch_workloop_t dwl) +{ + dispatch_qos_t qos; + for (qos = DISPATCH_QOS_MAX; qos >= DISPATCH_QOS_MIN; qos--) { + if (!_dispatch_workloop_looks_empty(dwl, qos)) return true; + } + return false; +} + +DISPATCH_NOINLINE +static void +_dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, + struct dispatch_object_s *dc, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags, uint64_t enqueued_bits) +{ + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; + uint64_t next_owner = 0, old_state, new_state; + bool has_more_work; + + next_owner = _dispatch_lock_value_from_tid(dsc->dsc_waiter); + has_more_work = (_dispatch_workloop_pop_head(dwl, qos, dc) != NULL); + +transfer_lock_again: + if (!has_more_work) { + has_more_work = _dispatch_workloop_probe(dwl); + } + + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + new_state = old_state; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state |= next_owner; + + if (likely(_dq_state_is_base_wlh(old_state))) { + new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; + if (has_more_work) { + // we know there's a next item, keep the enqueued bit if any + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + os_atomic_xor2o(dwl, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + goto transfer_lock_again; + }); + } else { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + new_state &= ~DISPATCH_QUEUE_ENQUEUED; + } + } else { + new_state -= enqueued_bits; + } + }); + + return _dispatch_barrier_waiter_redirect_or_wake(dwl, dc, flags, + old_state, new_state); +} + +static void +_dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_qos_t wl_qos; + +again: + for (wl_qos = DISPATCH_QOS_MAX; wl_qos >= DISPATCH_QOS_MIN; wl_qos--) { + struct dispatch_object_s *dc; + + if (_dispatch_workloop_looks_empty(dwl, wl_qos)) continue; + dc = _dispatch_workloop_get_head(dwl, wl_qos); + + if (_dispatch_object_is_waiter(dc)) { + return _dispatch_workloop_drain_barrier_waiter(dwl, dc, wl_qos, + flags, 0); + } + + // We have work to do, we need to wake up + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } + + if (unlikely(target && !(flags & DISPATCH_WAKEUP_CONSUME_2))) { + _dispatch_retain_2(dwl); + flags |= DISPATCH_WAKEUP_CONSUME_2; + } + + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state, qos); + new_state -= DISPATCH_QUEUE_IN_BARRIER; + new_state -= DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + if (target) { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + // just renew the drain lock with an acquire barrier, to see + // what the enqueuer that set DIRTY has done. + // the xor generates better assembly as DISPATCH_QUEUE_DIRTY + // is already in a register + os_atomic_xor2o(dwl, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + goto again; + }); + } else if (likely(_dq_state_is_base_wlh(old_state))) { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + new_state &= ~DISPATCH_QUEUE_ENQUEUED; + } else { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + } + }); + dispatch_assert(_dq_state_drain_locked_by_self(old_state)); + dispatch_assert(!_dq_state_is_enqueued_on_manager(old_state)); + + if (_dq_state_is_enqueued(new_state)) { + _dispatch_trace_runtime_event(sync_async_handoff, dwl, 0); + } + +#if DISPATCH_USE_KEVENT_WORKLOOP + if (_dq_state_is_base_wlh(old_state)) { + // - Only non-"du_is_direct" sources & mach channels can be enqueued + // on the manager. + // + // - Only dispatch_source_cancel_and_wait() and + // dispatch_source_set_*_handler() use the barrier complete codepath, + // none of which are used by mach channels. + // + // Hence no source-ish object can both be a workloop and need to use the + // manager at the same time. + dispatch_assert(!_dq_state_is_enqueued_on_manager(new_state)); + if (_dq_state_is_enqueued_on_target(old_state) || + _dq_state_is_enqueued_on_target(new_state) || + _dq_state_received_sync_wait(old_state) || + _dq_state_in_sync_transfer(old_state)) { + return _dispatch_event_loop_end_ownership((dispatch_wlh_t)dwl, + old_state, new_state, flags); + } + _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dwl); + goto done; + } +#endif + + if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); + } + + if (target) { + if (likely((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED)) { + dispatch_assert(_dq_state_is_enqueued(new_state)); + dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2); + return _dispatch_queue_push_queue(dwl->do_targetq, dwl, new_state); + } +#if HAVE_PTHREAD_WORKQUEUE_QOS + // when doing sync to async handoff + // if the queue received an override we have to forecefully redrive + // the same override so that a new stealer is enqueued because + // the previous one may be gone already + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_wakeup_with_override(dwl, new_state, flags); + } +#endif + } + +#if DISPATCH_USE_KEVENT_WORKLOOP +done: +#endif + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dwl); + } +} + +#if HAVE_PTHREAD_WORKQUEUE_QOS +static void +_dispatch_workloop_stealer_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) +{ + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_NO_INTROSPECTION; + _dispatch_continuation_pop_forwarded(dc, dc_flags, NULL, { + dispatch_queue_t dq = dc->dc_data; + dx_invoke(dq, dic, flags | DISPATCH_INVOKE_STEALING); + }); +} + +DISPATCH_NOINLINE +static void +_dispatch_workloop_push_stealer(dispatch_workloop_t dwl, dispatch_queue_t dq, + dispatch_qos_t qos) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + + dc->do_vtable = DC_VTABLE(WORKLOOP_STEALING); + _dispatch_retain_2(dq); + dc->dc_func = NULL; + dc->dc_ctxt = dc; + dc->dc_other = NULL; + dc->dc_data = dq; + dc->dc_priority = DISPATCH_NO_PRIORITY; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + _dispatch_workloop_push(dwl, dc, qos); +} +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + +void +_dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { + return _dispatch_workloop_barrier_complete(dwl, qos, flags); + } + + if (unlikely(!(flags & DISPATCH_WAKEUP_CONSUME_2))) { + DISPATCH_INTERNAL_CRASH(flags, "Invalid way to wake up a workloop"); + } + + if (unlikely(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) { + goto done; + } + + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state, qos); + if (_dq_state_max_qos(new_state)) { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } + if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (new_state == old_state) { + os_atomic_rmw_loop_give_up(goto done); + } + }); + + if (unlikely(_dq_state_is_suspended(old_state))) { +#ifndef __LP64__ + old_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH(old_state, "Waking up an inactive workloop"); + } + if (likely((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED)) { + return _dispatch_queue_push_queue(dwl->do_targetq, dwl, new_state); + } +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (likely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { + return _dispatch_queue_wakeup_with_override(dwl, new_state, flags); + } +#endif // HAVE_PTHREAD_WORKQUEUE_QOS +done: + return _dispatch_release_2_tailcall(dwl); +} + +DISPATCH_NOINLINE +static void +_dispatch_workloop_push_waiter(dispatch_workloop_t dwl, + dispatch_sync_context_t dsc, dispatch_qos_t qos) +{ + struct dispatch_object_s *prev, *dc = (struct dispatch_object_s *)dsc; + + dispatch_priority_t p = _dispatch_priority_from_pp(dsc->dc_priority); + if (qos < _dispatch_priority_qos(p)) { + qos = _dispatch_priority_qos(p); + } + if (qos == DISPATCH_QOS_UNSPECIFIED) { + qos = DISPATCH_QOS_DEFAULT; + } + + prev = _dispatch_workloop_push_update_tail(dwl, qos, dc); + _dispatch_workloop_push_update_prev(dwl, qos, prev, dc); + if (likely(!os_mpsc_push_was_empty(prev))) return; + + uint64_t set_owner_and_set_full_width_and_in_barrier = + _dispatch_lock_value_for_self() | + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state, qos); + new_state |= DISPATCH_QUEUE_DIRTY; + if (unlikely(_dq_state_drain_locked(old_state))) { + // not runnable, so we should just handle overrides + } else if (_dq_state_is_enqueued(old_state)) { + // 32123779 let the event thread redrive since it's out already + } else { + // see _dispatch_queue_drain_try_lock + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state |= set_owner_and_set_full_width_and_in_barrier; + } + }); + + if ((dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) && + _dispatch_async_and_wait_should_always_async(dwl, new_state)) { + dsc->dc_other = dwl; + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + } + + dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); + + if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + return _dispatch_workloop_barrier_complete(dwl, qos, 0); + } +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_wakeup_with_override(dwl, new_state, 0); + } + } +#endif // HAVE_PTHREAD_WORKQUEUE_QOS +} + +void +_dispatch_workloop_push(dispatch_workloop_t dwl, dispatch_object_t dou, + dispatch_qos_t qos) +{ + struct dispatch_object_s *prev; + + if (unlikely(_dispatch_object_is_waiter(dou))) { + return _dispatch_workloop_push_waiter(dwl, dou._dsc, qos); + } + + if (qos < _dispatch_priority_qos(dwl->dq_priority)) { + qos = _dispatch_priority_qos(dwl->dq_priority); + } + if (qos == DISPATCH_QOS_UNSPECIFIED) { + qos = _dispatch_priority_fallback_qos(dwl->dq_priority); + } + prev = _dispatch_workloop_push_update_tail(dwl, qos, dou._do); + if (unlikely(os_mpsc_push_was_empty(prev))) { + _dispatch_retain_2_unsafe(dwl); + } + _dispatch_workloop_push_update_prev(dwl, qos, prev, dou._do); + if (unlikely(os_mpsc_push_was_empty(prev))) { + return _dispatch_workloop_wakeup(dwl, qos, DISPATCH_WAKEUP_CONSUME_2 | + DISPATCH_WAKEUP_MAKE_DIRTY); + } +} + +#pragma mark - +#pragma mark dispatch queue/lane push & wakeup + +#if HAVE_PTHREAD_WORKQUEUE_QOS +static void +_dispatch_queue_override_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) +{ + dispatch_queue_t old_rq = _dispatch_queue_get_current(); + dispatch_queue_global_t assumed_rq = dc->dc_other; + dispatch_priority_t old_dp; + dispatch_object_t dou; + uintptr_t dc_flags = DC_FLAG_CONSUME; + + dou._do = dc->dc_data; + old_dp = _dispatch_root_queue_identity_assume(assumed_rq); + if (dc_type(dc) == DISPATCH_CONTINUATION_TYPE(OVERRIDE_STEALING)) { + flags |= DISPATCH_INVOKE_STEALING; + dc_flags |= DC_FLAG_NO_INTROSPECTION; + } + _dispatch_continuation_pop_forwarded(dc, dc_flags, assumed_rq, { + if (_dispatch_object_has_vtable(dou._do)) { + dx_invoke(dou._dq, dic, flags); + } else { + _dispatch_continuation_invoke_inline(dou, flags, assumed_rq); + } + }); + _dispatch_reset_basepri(old_dp); + _dispatch_queue_set_current(old_rq); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_root_queue_push_needs_override(dispatch_queue_global_t rq, + dispatch_qos_t qos) +{ + dispatch_qos_t fallback = _dispatch_priority_fallback_qos(rq->dq_priority); + if (fallback) { + return qos && qos != fallback; + } + + dispatch_qos_t rqos = _dispatch_priority_qos(rq->dq_priority); + return rqos && qos > rqos; +} + +DISPATCH_NOINLINE +static void +_dispatch_root_queue_push_override(dispatch_queue_global_t orig_rq, + dispatch_object_t dou, dispatch_qos_t qos) +{ + bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit); + dispatch_continuation_t dc = dou._dc; + + if (_dispatch_object_is_redirection(dc)) { + // no double-wrap is needed, _dispatch_async_redirect_invoke will do + // the right thing + dc->dc_func = (void *)orig_rq; + } else { + dc = _dispatch_continuation_alloc(); + dc->do_vtable = DC_VTABLE(OVERRIDE_OWNING); + dc->dc_ctxt = dc; + dc->dc_other = orig_rq; + dc->dc_data = dou._do; + dc->dc_priority = DISPATCH_NO_PRIORITY; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + } + _dispatch_root_queue_push_inline(rq, dc, dc, 1); +} + +DISPATCH_NOINLINE +static void +_dispatch_root_queue_push_override_stealer(dispatch_queue_global_t orig_rq, + dispatch_queue_t dq, dispatch_qos_t qos) +{ + bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + + dc->do_vtable = DC_VTABLE(OVERRIDE_STEALING); + _dispatch_retain_2(dq); + dc->dc_func = NULL; + dc->dc_ctxt = dc; + dc->dc_other = orig_rq; + dc->dc_data = dq; + dc->dc_priority = DISPATCH_NO_PRIORITY; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + _dispatch_root_queue_push_inline(rq, dc, dc, 1); +} + +DISPATCH_NOINLINE +static void +_dispatch_queue_wakeup_with_override_slow(dispatch_queue_t dq, + uint64_t dq_state, dispatch_wakeup_flags_t flags) +{ + dispatch_qos_t oqos, qos = _dq_state_max_qos(dq_state); + dispatch_queue_t tq = dq->do_targetq; + mach_port_t owner; + bool locked; + + if (_dq_state_is_base_anon(dq_state)) { + if (!_dispatch_is_in_root_queues_array(tq)) { + // Do not try to override pthread root + // queues, it isn't supported and can cause things to run + // on the wrong hierarchy if we enqueue a stealer by accident + goto out; + } else if ((owner = _dq_state_drain_owner(dq_state))) { + (void)_dispatch_wqthread_override_start_check_owner(owner, qos, + &dq->dq_state_lock); + goto out; + } + + // avoid locking when we recognize the target queue as a global root + // queue it is gross, but is a very common case. The locking isn't + // needed because these target queues cannot go away. + locked = false; + } else if (likely(!_dispatch_queue_is_mutable(dq))) { + locked = false; + } else if (_dispatch_queue_sidelock_trylock(upcast(dq)._dl, qos)) { + // to traverse the tq chain safely we must + // lock it to ensure it cannot change + locked = true; + tq = dq->do_targetq; + _dispatch_ktrace1(DISPATCH_PERF_mutable_target, dq); + } else { + // + // Leading to being there, the current thread has: + // 1. enqueued an object on `dq` + // 2. raised the max_qos value, set RECEIVED_OVERRIDE on `dq` + // and didn't see an owner + // 3. tried and failed to acquire the side lock + // + // The side lock owner can only be one of three things: + // + // - The suspend/resume side count code. Besides being unlikely, + // it means that at this moment the queue is actually suspended, + // which transfers the responsibility of applying the override to + // the eventual dispatch_resume(). + // + // - A dispatch_set_target_queue() call. The fact that we saw no `owner` + // means that the trysync it does wasn't being drained when (2) + // happened which can only be explained by one of these interleavings: + // + // o `dq` became idle between when the object queued in (1) ran and + // the set_target_queue call and we were unlucky enough that our + // step (2) happened while this queue was idle. There is no reason + // to override anything anymore, the queue drained to completion + // while we were preempted, our job is done. + // + // o `dq` is queued but not draining during (1-2), then when we try + // to lock at (3) the queue is now draining a set_target_queue. + // This drainer must have seen the effects of (2) and that guy has + // applied our override. Our job is done. + // + // - Another instance of _dispatch_queue_wakeup_with_override_slow(), + // which is fine because trylock leaves a hint that we failed our + // trylock, causing the tryunlock below to fail and reassess whether + // a better override needs to be applied. + // + _dispatch_ktrace1(DISPATCH_PERF_mutable_target, dq); + goto out; + } + +apply_again: + if (dx_hastypeflag(tq, QUEUE_ROOT)) { + dispatch_queue_global_t rq = upcast(tq)._dgq; + if (qos > _dispatch_priority_qos(rq->dq_priority)) { + _dispatch_root_queue_push_override_stealer(rq, dq, qos); + } + } else if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { + _dispatch_workloop_push_stealer(upcast(tq)._dwl, dq, qos); + } else if (_dispatch_queue_need_override(tq, qos)) { + dx_wakeup(tq, qos, 0); + } + if (likely(!locked)) { + goto out; + } + while (unlikely(!_dispatch_queue_sidelock_tryunlock(upcast(dq)._dl))) { + // rdar://problem/24081326 + // + // Another instance of _dispatch_queue_wakeup_with_override() tried + // to acquire the side lock while we were running, and could have + // had a better override than ours to apply. + // + oqos = _dispatch_queue_max_qos(dq); + if (oqos > qos) { + qos = oqos; + // The other instance had a better priority than ours, override + // our thread, and apply the override that wasn't applied to `dq` + // because of us. + goto apply_again; + } + } + +out: + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_wakeup_with_override(dispatch_queue_class_t dq, + uint64_t dq_state, dispatch_wakeup_flags_t flags) +{ + dispatch_assert(_dq_state_should_override(dq_state)); + +#if DISPATCH_USE_KEVENT_WORKLOOP + if (likely(_dq_state_is_base_wlh(dq_state))) { + _dispatch_trace_runtime_event(worker_request, dq._dq, 1); + return _dispatch_event_loop_poke((dispatch_wlh_t)dq._dq, dq_state, + flags | DISPATCH_EVENT_LOOP_OVERRIDE); + } +#endif // DISPATCH_USE_KEVENT_WORKLOOP + return _dispatch_queue_wakeup_with_override_slow(dq._dq, dq_state, flags); +} +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + +DISPATCH_NOINLINE +void +_dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) +{ + dispatch_queue_t dq = dqu._dq; + dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT); + + if (target && !(flags & DISPATCH_WAKEUP_CONSUME_2)) { + _dispatch_retain_2(dq); + flags |= DISPATCH_WAKEUP_CONSUME_2; + } + + if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { + // + // _dispatch_lane_class_barrier_complete() is about what both regular + // queues and sources needs to evaluate, but the former can have sync + // handoffs to perform which _dispatch_lane_class_barrier_complete() + // doesn't handle, only _dispatch_lane_barrier_complete() does. + // + // _dispatch_lane_wakeup() is the one for plain queues that calls + // _dispatch_lane_barrier_complete(), and this is only taken for non + // queue types. + // + dispatch_assert(dx_metatype(dq) == _DISPATCH_SOURCE_TYPE); + qos = _dispatch_queue_wakeup_qos(dq, qos); + return _dispatch_lane_class_barrier_complete(upcast(dq)._dl, qos, + flags, target, DISPATCH_QUEUE_SERIAL_DRAIN_OWNED); + } + + if (target) { + uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED; + if (target == DISPATCH_QUEUE_WAKEUP_MGR) { + enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; + } + qos = _dispatch_queue_wakeup_qos(dq, qos); + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state, qos); + if (flags & DISPATCH_WAKEUP_CLEAR_ACTIVATING) { + // When an event is being delivered to a source because its + // unote was being registered before the ACTIVATING state + // had a chance to be cleared, we don't want to fail the wakeup + // which could lead to a priority inversion. + // + // Instead, these wakeups are allowed to finish the pending + // activation. + if (_dq_state_is_activating(old_state)) { + new_state &= ~DISPATCH_QUEUE_ACTIVATING; + } + } + if (likely(!_dq_state_is_suspended(new_state) && + !_dq_state_is_enqueued(old_state) && + (!_dq_state_drain_locked(old_state) || + (enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR && + _dq_state_is_base_wlh(old_state))))) { + new_state |= enqueue; + } + if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (new_state == old_state) { + os_atomic_rmw_loop_give_up(goto done); + } + }); + + if (likely((old_state ^ new_state) & enqueue)) { + dispatch_queue_t tq; + if (target == DISPATCH_QUEUE_WAKEUP_TARGET) { + // the rmw_loop above has no acquire barrier, as the last block + // of a queue asyncing to that queue is not an uncommon pattern + // and in that case the acquire would be completely useless + // + // so instead use depdendency ordering to read + // the targetq pointer. + os_atomic_thread_fence(dependency); + tq = os_atomic_load_with_dependency_on2o(dq, do_targetq, + (long)new_state); + } else { + tq = target; + } + dispatch_assert(_dq_state_is_enqueued(new_state)); + return _dispatch_queue_push_queue(tq, dq, new_state); + } +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_wakeup_with_override(dq, new_state, + flags); + } + } + } else if (qos) { + // + // Someone is trying to override the last work item of the queue. + // + uint64_t old_state, new_state; + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + // Avoid spurious override if the item was drained before we could + // apply an override + if (!_dq_state_drain_locked(old_state) && + !_dq_state_is_enqueued(old_state)) { + os_atomic_rmw_loop_give_up(goto done); + } + new_state = _dq_state_merge_qos(old_state, qos); + if (new_state == old_state) { + os_atomic_rmw_loop_give_up(goto done); + } + }); + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_wakeup_with_override(dq, new_state, flags); + } +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + } +done: + if (likely(flags & DISPATCH_WAKEUP_CONSUME_2)) { + return _dispatch_release_2_tailcall(dq); + } +} + +DISPATCH_NOINLINE +void +_dispatch_lane_wakeup(dispatch_lane_class_t dqu, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + + if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { + return _dispatch_lane_barrier_complete(dqu, qos, flags); + } + if (_dispatch_queue_class_probe(dqu)) { + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } + return _dispatch_queue_wakeup(dqu, qos, flags, target); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lane_push_waiter_should_wakeup(dispatch_lane_t dq, + dispatch_sync_context_t dsc) +{ + if (_dispatch_queue_is_thread_bound(dq)) { + return true; + } + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + return _dispatch_async_and_wait_should_always_async(dq, dq_state); + } + return false; +} + +DISPATCH_NOINLINE +static void +_dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, + dispatch_qos_t qos) +{ + uint64_t old_state, new_state; + + if (dsc->dc_data != DISPATCH_WLH_ANON) { + // The kernel will handle all the overrides / priorities on our behalf. + qos = 0; + } + + if (unlikely(_dispatch_queue_push_item(dq, dsc))) { + if (unlikely(_dispatch_lane_push_waiter_should_wakeup(dq, dsc))) { + // If this returns true, we know that we are pushing onto the base + // queue + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + dsc->dc_other = dq; + return dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); + } + + uint64_t pending_barrier_width = + (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + uint64_t set_owner_and_set_full_width_and_in_barrier = + _dispatch_lock_value_for_self() | + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state, qos); + new_state |= DISPATCH_QUEUE_DIRTY; + if (unlikely(_dq_state_drain_locked(old_state) || + !_dq_state_is_runnable(old_state))) { + // not runnable, so we should just handle overrides + } else if (_dq_state_is_base_wlh(old_state) && + _dq_state_is_enqueued(old_state)) { + // 32123779 let the event thread redrive since it's out already + } else if (_dq_state_has_pending_barrier(old_state) || + new_state + pending_barrier_width < + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + // see _dispatch_queue_drain_try_lock + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state |= set_owner_and_set_full_width_and_in_barrier; + } + }); + + if (_dq_state_is_base_wlh(old_state)) { + dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); + } + + if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + return _dispatch_lane_barrier_complete(dq, qos, 0); + } +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_wakeup_with_override(dq, new_state, 0); + } + } + } else if (unlikely(qos)) { + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = _dq_state_merge_qos(old_state, qos); + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(return); + } + }); + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_wakeup_with_override(dq, new_state, 0); + } +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + } +} + +DISPATCH_NOINLINE +void +_dispatch_lane_push(dispatch_lane_t dq, dispatch_object_t dou, + dispatch_qos_t qos) +{ + dispatch_wakeup_flags_t flags = 0; + struct dispatch_object_s *prev; + + if (unlikely(_dispatch_object_is_waiter(dou))) { + return _dispatch_lane_push_waiter(dq, dou._dsc, qos); + } + + dispatch_assert(!_dispatch_object_is_global(dq)); + qos = _dispatch_queue_push_qos(dq, qos); + + // If we are going to call dx_wakeup(), the queue must be retained before + // the item we're pushing can be dequeued, which means: + // - before we exchange the tail if we have to override + // - before we set the head if we made the queue non empty. + // Otherwise, if preempted between one of these and the call to dx_wakeup() + // the blocks submitted to the queue may release the last reference to the + // queue when invoked by _dispatch_lane_drain. + + prev = os_mpsc_push_update_tail(os_mpsc(dq, dq_items), dou._do, do_next); + if (unlikely(os_mpsc_push_was_empty(prev))) { + _dispatch_retain_2_unsafe(dq); + flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY; + } else if (unlikely(_dispatch_queue_need_override(dq, qos))) { + // There's a race here, _dispatch_queue_need_override may read a stale + // dq_state value. + // + // If it's a stale load from the same drain streak, given that + // the max qos is monotonic, too old a read can only cause an + // unnecessary attempt at overriding which is harmless. + // + // We'll assume here that a stale load from an a previous drain streak + // never happens in practice. + _dispatch_retain_2_unsafe(dq); + flags = DISPATCH_WAKEUP_CONSUME_2; + } + os_mpsc_push_update_prev(os_mpsc(dq, dq_items), prev, dou._do, do_next); + if (flags) { + return dx_wakeup(dq, qos, flags); + } +} + +DISPATCH_NOINLINE +void +_dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, + dispatch_qos_t qos) +{ + // reserving non barrier width + // doesn't fail if only the ENQUEUED bit is set (unlike its barrier + // width equivalent), so we have to check that this thread hasn't + // enqueued anything ahead of this call or we can break ordering + if (dq->dq_items_tail == NULL && + !_dispatch_object_is_waiter(dou) && + !_dispatch_object_is_barrier(dou) && + _dispatch_queue_try_acquire_async(dq)) { + return _dispatch_continuation_redirect_push(dq, dou, qos); + } + + _dispatch_lane_push(dq, dou, qos); +} + +#pragma mark - +#pragma mark dispatch_channel_t + +void +_dispatch_channel_dispose(dispatch_channel_t dch, bool *allow_free) +{ + dch->dch_callbacks = NULL; + _dispatch_lane_class_dispose(dch, allow_free); +} + +void +_dispatch_channel_xref_dispose(dispatch_channel_t dch) +{ + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dch->_as_dq); + if (callbacks->dcc_acknowledge_cancel && !(dqf & DSF_CANCELED)) { + DISPATCH_CLIENT_CRASH(dch, "Release of a channel that has not been " + "cancelled, but has a cancel acknowledgement callback"); + } + dx_wakeup(dch, 0, DISPATCH_WAKEUP_MAKE_DIRTY); +} + +typedef struct dispatch_channel_invoke_ctxt_s { + dispatch_channel_t dcic_dch; + dispatch_thread_frame_s dcic_dtf; + dispatch_invoke_context_t dcic_dic; + dispatch_invoke_flags_t dcic_flags; + dispatch_queue_wakeup_target_t dcic_tq; + struct dispatch_object_s *dcic_next_dc; + bool dcic_called_drain; +} dispatch_channel_invoke_ctxt_s; + +static bool +_dispatch_channel_invoke_cancel_check(dispatch_channel_t dch, + dispatch_channel_invoke_ctxt_t ctxt, + dispatch_channel_callbacks_t callbacks) +{ + bool rc = true; + if (!dch->dm_cancel_handler_called) { + if (_dispatch_queue_atomic_flags(dch) & DSF_CANCELED) { + dispatch_invoke_with_autoreleasepool(ctxt->dcic_flags, { + rc = callbacks->dcc_acknowledge_cancel(dch, dch->do_ctxt); + }); + if (rc) { + dch->dm_cancel_handler_called = true; + _dispatch_release_no_dispose(dch); + } else { + ctxt->dcic_tq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + } + } + return rc; +} + +static bool +_dispatch_channel_invoke_checks(dispatch_channel_t dch, + dispatch_channel_invoke_ctxt_t dcic, + dispatch_channel_callbacks_t callbacks) +{ + if (!_dispatch_channel_invoke_cancel_check(dch, dcic, callbacks)) { + return false; + } + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } + if (likely(dcic->dcic_flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { + dispatch_workloop_t dwl = (dispatch_workloop_t)_dispatch_get_wlh(); + if (unlikely(_dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos)) { + dcic->dcic_tq = dch->do_targetq; + return false; + } + } + if (unlikely(_dispatch_queue_drain_should_narrow(dcic->dcic_dic))) { + dcic->dcic_tq = dch->do_targetq; + return false; + } + uint64_t dq_state = os_atomic_load(&dch->dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + dcic->dcic_tq = dch->do_targetq; + return false; + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_wakeup_target_t +_dispatch_channel_invoke2(dispatch_channel_t dch, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned DISPATCH_UNUSED) +{ + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + dispatch_channel_invoke_ctxt_s dcic = { + .dcic_dch = dch, + .dcic_dic = dic, + .dcic_flags = flags & + ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN, + .dcic_tq = DISPATCH_QUEUE_WAKEUP_NONE, + }; + + _dispatch_thread_frame_push(&dcic.dcic_dtf, dch); + + if (!_dispatch_channel_invoke_cancel_check(dch, &dcic, callbacks)) { + goto out; + } + + do { + struct dispatch_object_s *dc = dcic.dcic_next_dc; + + if (unlikely(!dc)) { + if (!dch->dq_items_tail) { + break; + } + dc = _dispatch_queue_get_head(dch); + } + + if (unlikely(_dispatch_object_is_sync_waiter(dc))) { + DISPATCH_CLIENT_CRASH(0, "sync waiter found on channel"); + } + + if (_dispatch_object_is_channel_item(dc)) { + dcic.dcic_next_dc = dc; + dcic.dcic_called_drain = false; + dispatch_invoke_with_autoreleasepool(dcic.dcic_flags, { + if (callbacks->dcc_invoke(dch, &dcic, dch->do_ctxt)) { + if (unlikely(!dcic.dcic_called_drain)) { + DISPATCH_CLIENT_CRASH(0, "Channel didn't call " + "dispatch_channel_drain"); + } + } else { + dcic.dcic_tq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + }); + } else { + dcic.dcic_next_dc = _dispatch_queue_pop_head(dch, dc); + _dispatch_continuation_pop_inline(dc, dic, flags, dch); + if (!_dispatch_channel_invoke_checks(dch, &dcic, callbacks)) { + break; + } + } + } while (dcic.dcic_tq == DISPATCH_QUEUE_WAKEUP_NONE); + +out: + _dispatch_thread_frame_pop(&dcic.dcic_dtf); + return dcic.dcic_tq; +} + +void +_dispatch_channel_invoke(dispatch_channel_t dch, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) +{ + _dispatch_queue_class_invoke(dch, dic, flags, + DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS, _dispatch_channel_invoke2); +} + +void +dispatch_channel_foreach_work_item_peek_f( + dispatch_channel_invoke_ctxt_t dcic, + void *ctxt, dispatch_channel_enumerator_handler_t f) +{ + if (dcic->dcic_called_drain) { + DISPATCH_CLIENT_CRASH(0, "Called peek after drain"); + } + + dispatch_channel_t dch = dcic->dcic_dch; + struct dispatch_object_s *dc = dcic->dcic_next_dc; + + for (;;) { + dispatch_continuation_t dci = (dispatch_continuation_t)dc; + if (!_dispatch_object_is_channel_item(dc)) { + break; + } + if (!f(ctxt, dci->dc_ctxt)) { + break; + } + if (dc == dch->dq_items_tail) { + break; + } + dc = os_mpsc_get_next(dc, do_next); + } +} + +void +dispatch_channel_drain_f(dispatch_channel_invoke_ctxt_t dcic, + void *_Nullable ctxt, dispatch_channel_drain_handler_t f) +{ + dispatch_channel_t dch = dcic->dcic_dch; + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + struct dispatch_object_s *dc; + uintptr_t dcf = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; + void *unpop_item = NULL; + bool stop_invoke = false; + + if (dcic->dcic_called_drain) { + DISPATCH_CLIENT_CRASH(0, "Called drain twice in the same invoke"); + } + dcic->dcic_called_drain = true; + + do { + dc = dcic->dcic_next_dc; + if (unlikely(!dc)) { + if (!dch->dq_items_tail) { + break; + } + dc = _dispatch_queue_get_head(dch); + } + if (!_dispatch_object_is_channel_item(dc)) { + break; + } + + dcic->dcic_next_dc = _dispatch_queue_pop_head(dch, dc); + + _dispatch_continuation_pop_forwarded(upcast(dc)._dc, dcf, dch, { + dispatch_invoke_with_autoreleasepool(dcic->dcic_flags, { + stop_invoke = !f(ctxt, upcast(dc)._dc->dc_ctxt, &unpop_item); + }); + }); + if (unlikely(stop_invoke)) { + break; + } + } while (_dispatch_channel_invoke_checks(dch, dcic, callbacks)); + + if (unlikely(unpop_item)) { + dispatch_continuation_t dci = _dispatch_continuation_alloc(); + _dispatch_continuation_init_f(dci, dch, unpop_item, NULL, 0, dcf); + os_mpsc_undo_pop_head(os_mpsc(dch, dq_items), upcast(dci)._do, + dcic->dcic_next_dc, do_next); + dcic->dcic_next_dc = upcast(dci)._do; + } +} + +#ifdef __BLOCKS__ +void +dispatch_channel_foreach_work_item_peek( + dispatch_channel_invoke_ctxt_t dcic, + dispatch_channel_enumerator_block_t block) +{ + dispatch_channel_enumerator_handler_t f; + f = (dispatch_channel_enumerator_handler_t)_dispatch_Block_invoke(block); + dispatch_channel_foreach_work_item_peek_f(dcic, block, f); +} + +void +dispatch_channel_drain(dispatch_channel_invoke_ctxt_t dcic, + dispatch_channel_drain_block_t block) +{ + dispatch_channel_drain_handler_t f; + f = (dispatch_channel_drain_handler_t)_dispatch_Block_invoke(block); + dispatch_channel_drain_f(dcic, block, f); +} +#endif // __BLOCKS__ + +DISPATCH_NOINLINE +void +_dispatch_channel_wakeup(dispatch_channel_t dch, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_queue_t dq = dch->_as_dq; + + if (unlikely(!callbacks->dcc_probe(dch, dch->do_ctxt))) { + target = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } else if (_dispatch_queue_class_probe(dch)) { + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } else if (_dispatch_queue_atomic_flags(dq) & DSF_CANCELED) { + if (!dch->dm_cancel_handler_called) { + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } + } + + return _dispatch_queue_wakeup(dch, qos, flags, target); +} + +size_t +_dispatch_channel_debug(dispatch_channel_t dch, char *buf, size_t bufsiz) +{ + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dch); + size_t offset = 0; + + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + _dispatch_object_class_name(dch), dch); + offset += _dispatch_object_debug_attr(dch, &buf[offset], bufsiz - offset); + offset += _dispatch_queue_debug_attr(dch->_as_dq, &buf[offset], bufsiz - offset); + offset += dsnprintf(buf, bufsiz, "%s%s%s", + (dqf & DSF_CANCELED) ? "cancelled, " : "", + (dqf & DSF_NEEDS_EVENT) ? "needs-event, " : "", + (dqf & DSF_DELETED) ? "deleted, " : ""); + + return offset; +} + +dispatch_channel_t +dispatch_channel_create(const char *label, dispatch_queue_t tq, + void *ctxt, dispatch_channel_callbacks_t callbacks) +{ + dispatch_channel_t dch; + dispatch_queue_flags_t dqf = DSF_STRICT; + + if (callbacks->dcc_version < 1) { + DISPATCH_CLIENT_CRASH(callbacks->dcc_version, + "Unsupported callbacks version"); + } + + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } + } + + if (unlikely(!tq)) { + tq = _dispatch_get_default_queue(true); + } else { + _dispatch_retain((dispatch_queue_t _Nonnull)tq); + } + + dch = _dispatch_queue_alloc(channel, dqf, 1, + DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER)._dch; + dch->dq_label = label; + dch->do_targetq = tq; + dch->dch_callbacks = callbacks; + dch->do_ctxt = ctxt; + if (!callbacks->dcc_acknowledge_cancel) { + dch->dm_cancel_handler_called = true; + dch->do_ref_cnt--; + } + return dch; +} + +DISPATCH_NOINLINE +static void +_dispatch_channel_enqueue_slow(dispatch_channel_t dch, void *ctxt) +{ + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; + dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); + dispatch_qos_t qos; + + qos = _dispatch_continuation_init_f(dc, dch, ctxt, NULL, 0, dc_flags); + _dispatch_continuation_async(dch, dc, qos, dc->dc_flags); +} + +DISPATCH_NOINLINE +void +dispatch_channel_enqueue(dispatch_channel_t dch, void *ctxt) +{ + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; + dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); + dispatch_qos_t qos; + + if (unlikely(!dc)) { + return _dispatch_channel_enqueue_slow(dch, ctxt); + } + qos = _dispatch_continuation_init_f(dc, dch, ctxt, NULL, 0, dc_flags); + _dispatch_continuation_async(dch, dc, qos, dc->dc_flags); +} + +#ifndef __APPLE__ +#if __BLOCKS__ +void typeof(dispatch_channel_async) dispatch_channel_async + __attribute__((__alias__("dispatch_async"))); +#endif + +void typeof(dispatch_channel_async_f) dispatch_channel_async_f + __attribute__((__alias__("dispatch_async_f"))); +#endif + +void +dispatch_channel_wakeup(dispatch_channel_t dch, qos_class_t qos_class) +{ + dispatch_qos_t oqos = _dispatch_qos_from_qos_class(qos_class); + dx_wakeup(dch, oqos, DISPATCH_WAKEUP_MAKE_DIRTY); +} + +#pragma mark - +#pragma mark dispatch_mgr_queue + +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE +struct _dispatch_mgr_sched_s { + volatile int prio; + volatile qos_class_t qos; + int default_prio; + int policy; +#if defined(_WIN32) + HANDLE hThread; +#else + pthread_t tid; +#endif +}; + +DISPATCH_STATIC_GLOBAL(struct _dispatch_mgr_sched_s _dispatch_mgr_sched); +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mgr_sched_pred); + +#if HAVE_PTHREAD_WORKQUEUE_QOS +// TODO: switch to "event-reflector thread" property +// Must be kept in sync with list of qos classes in sys/qos.h +static int +_dispatch_mgr_sched_qos2prio(qos_class_t qos) +{ + if (qos == QOS_CLASS_MAINTENANCE) return 4; + switch (qos) { + case QOS_CLASS_BACKGROUND: return 4; + case QOS_CLASS_UTILITY: return 20; + case QOS_CLASS_DEFAULT: return 31; + case QOS_CLASS_USER_INITIATED: return 37; + case QOS_CLASS_USER_INTERACTIVE: return 47; + default: return 0; + } +} +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + +static void +_dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) +{ +#if !defined(_WIN32) + struct sched_param param; +#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES + dispatch_pthread_root_queue_context_t pqc = _dispatch_mgr_root_queue.do_ctxt; + pthread_attr_t *attr = &pqc->dpq_thread_attr; +#else + pthread_attr_t a, *attr = &a; +#endif + (void)dispatch_assume_zero(pthread_attr_init(attr)); + (void)dispatch_assume_zero(pthread_attr_getschedpolicy(attr, + &_dispatch_mgr_sched.policy)); + (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); +#if HAVE_PTHREAD_WORKQUEUE_QOS + qos_class_t qos = qos_class_main(); + if (qos == QOS_CLASS_DEFAULT) { + qos = QOS_CLASS_USER_INITIATED; // rdar://problem/17279292 + } + if (qos) { + _dispatch_mgr_sched.qos = qos; + param.sched_priority = _dispatch_mgr_sched_qos2prio(qos); + } +#endif + _dispatch_mgr_sched.default_prio = param.sched_priority; +#else // defined(_WIN32) + _dispatch_mgr_sched.policy = 0; + _dispatch_mgr_sched.default_prio = THREAD_PRIORITY_NORMAL; +#endif // defined(_WIN32) + _dispatch_mgr_sched.prio = _dispatch_mgr_sched.default_prio; +} +#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE + +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES +#if DISPATCH_USE_MGR_THREAD +#if !defined(_WIN32) +DISPATCH_NOINLINE +static pthread_t * +_dispatch_mgr_root_queue_init(void) +{ + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + dispatch_pthread_root_queue_context_t pqc = _dispatch_mgr_root_queue.do_ctxt; + pthread_attr_t *attr = &pqc->dpq_thread_attr; + struct sched_param param; + (void)dispatch_assume_zero(pthread_attr_setdetachstate(attr, + PTHREAD_CREATE_DETACHED)); +#if !DISPATCH_DEBUG + (void)dispatch_assume_zero(pthread_attr_setstacksize(attr, 64 * 1024)); +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS + qos_class_t qos = _dispatch_mgr_sched.qos; + if (qos) { + if (_dispatch_set_qos_class_enabled) { + (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, + qos, 0)); + } + } +#endif + param.sched_priority = _dispatch_mgr_sched.prio; + if (param.sched_priority > _dispatch_mgr_sched.default_prio) { + (void)dispatch_assume_zero(pthread_attr_setschedparam(attr, ¶m)); + } + return &_dispatch_mgr_sched.tid; +} +#else // defined(_WIN32) +DISPATCH_NOINLINE +static PHANDLE +_dispatch_mgr_root_queue_init(void) +{ + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + return &_dispatch_mgr_sched.hThread; +} +#endif // defined(_WIN32) + +static inline void +_dispatch_mgr_priority_apply(void) +{ +#if !defined(_WIN32) + struct sched_param param; + do { + param.sched_priority = _dispatch_mgr_sched.prio; + if (param.sched_priority > _dispatch_mgr_sched.default_prio) { + (void)dispatch_assume_zero(pthread_setschedparam( + _dispatch_mgr_sched.tid, _dispatch_mgr_sched.policy, + ¶m)); + } + } while (_dispatch_mgr_sched.prio > param.sched_priority); +#else // defined(_WIN32) + int nPriority = _dispatch_mgr_sched.prio; + do { + if (nPriority > _dispatch_mgr_sched.default_prio) { + // TODO(compnerd) set thread scheduling policy + dispatch_assume_zero(SetThreadPriority(_dispatch_mgr_sched.hThread, nPriority)); + nPriority = GetThreadPriority(_dispatch_mgr_sched.hThread); + } + } while (_dispatch_mgr_sched.prio > nPriority); +#endif // defined(_WIN32) +} + +DISPATCH_NOINLINE +static void +_dispatch_mgr_priority_init(void) +{ +#if !defined(_WIN32) + dispatch_pthread_root_queue_context_t pqc = _dispatch_mgr_root_queue.do_ctxt; + pthread_attr_t *attr = &pqc->dpq_thread_attr; + struct sched_param param; + (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); +#if HAVE_PTHREAD_WORKQUEUE_QOS + qos_class_t qos = 0; + (void)pthread_attr_get_qos_class_np(attr, &qos, NULL); + if (_dispatch_mgr_sched.qos > qos && _dispatch_set_qos_class_enabled) { + (void)pthread_set_qos_class_self_np(_dispatch_mgr_sched.qos, 0); + int p = _dispatch_mgr_sched_qos2prio(_dispatch_mgr_sched.qos); + if (p > param.sched_priority) { + param.sched_priority = p; + } + } +#endif + if (unlikely(_dispatch_mgr_sched.prio > param.sched_priority)) { + return _dispatch_mgr_priority_apply(); + } +#else // defined(_WIN32) + int nPriority = GetThreadPriority(_dispatch_mgr_sched.hThread); + if (slowpath(_dispatch_mgr_sched.prio > nPriority)) { + return _dispatch_mgr_priority_apply(); + } +#endif // defined(_WIN32) +} +#endif // DISPATCH_USE_MGR_THREAD + +#if !defined(_WIN32) +DISPATCH_NOINLINE +static void +_dispatch_mgr_priority_raise(const pthread_attr_t *attr) +{ + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + struct sched_param param; + (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); +#if HAVE_PTHREAD_WORKQUEUE_QOS + qos_class_t q, qos = 0; + (void)pthread_attr_get_qos_class_np((pthread_attr_t *)attr, &qos, NULL); + if (qos) { + param.sched_priority = _dispatch_mgr_sched_qos2prio(qos); + os_atomic_rmw_loop2o(&_dispatch_mgr_sched, qos, q, qos, relaxed, { + if (q >= qos) os_atomic_rmw_loop_give_up(break); + }); + } +#endif + int p, prio = param.sched_priority; + os_atomic_rmw_loop2o(&_dispatch_mgr_sched, prio, p, prio, relaxed, { + if (p >= prio) os_atomic_rmw_loop_give_up(return); + }); +#if DISPATCH_USE_KEVENT_WORKQUEUE + _dispatch_root_queues_init(); + if (_dispatch_kevent_workqueue_enabled) { + pthread_priority_t pp = 0; + if (prio > _dispatch_mgr_sched.default_prio) { + // The values of _PTHREAD_PRIORITY_SCHED_PRI_FLAG and + // _PTHREAD_PRIORITY_ROOTQUEUE_FLAG overlap, but that is not + // problematic in this case, since it the second one is only ever + // used on dq_priority fields. + // We never pass the _PTHREAD_PRIORITY_ROOTQUEUE_FLAG to a syscall, + // it is meaningful to libdispatch only. + pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; + } else if (qos) { + pp = _pthread_qos_class_encode(qos, 0, 0); + } + if (pp) { + int r = _pthread_workqueue_set_event_manager_priority(pp); + (void)dispatch_assume_zero(r); + } + return; + } +#endif +#if DISPATCH_USE_MGR_THREAD + if (_dispatch_mgr_sched.tid) { + return _dispatch_mgr_priority_apply(); + } +#endif +} +#endif // !defined(_WIN32) +#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_mgr_lock(struct dispatch_queue_static_s *dq) +{ + uint64_t old_state, new_state, set_owner_and_set_full_width = + _dispatch_lock_value_for_self() | DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + new_state = old_state; + if (unlikely(!_dq_state_is_runnable(old_state) || + _dq_state_drain_locked(old_state))) { + DISPATCH_INTERNAL_CRASH((uintptr_t)old_state, + "Locking the manager should not fail"); + } + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state |= set_owner_and_set_full_width; + }); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_mgr_unlock(struct dispatch_queue_static_s *dq) +{ + uint64_t old_state, new_state; + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + }); + return _dq_state_is_dirty(old_state); +} + +static void +_dispatch_mgr_queue_drain(void) +{ + const dispatch_invoke_flags_t flags = DISPATCH_INVOKE_MANAGER_DRAIN; + dispatch_invoke_context_s dic = { }; + struct dispatch_queue_static_s *dq = &_dispatch_mgr_q; + uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + + if (dq->dq_items_tail) { + _dispatch_perfmon_start(); + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); + if (unlikely(_dispatch_lane_serial_drain(dq, &dic, flags, &owned))) { + DISPATCH_INTERNAL_CRASH(0, "Interrupted drain on manager queue"); + } + _dispatch_voucher_debug("mgr queue clear", NULL); + _voucher_clear(); + _dispatch_reset_basepri_override(); + _dispatch_perfmon_end(perfmon_thread_manager); + } + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunreachable-code" +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (!_dispatch_kevent_workqueue_enabled) +#endif + { + _dispatch_force_cache_cleanup(); + } +#pragma clang diagnostic pop +} + +void +_dispatch_mgr_queue_push(dispatch_lane_t dq, dispatch_object_t dou, + DISPATCH_UNUSED dispatch_qos_t qos) +{ + uint64_t dq_state; + + if (unlikely(_dispatch_object_is_waiter(dou))) { + DISPATCH_CLIENT_CRASH(0, "Waiter pushed onto manager"); + } + + if (unlikely(_dispatch_queue_push_item(dq, dou))) { + dq_state = os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); + if (!_dq_state_drain_locked_by_self(dq_state)) { + _dispatch_trace_runtime_event(worker_request, &_dispatch_mgr_q, 1); + _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0); + } + } +} + +DISPATCH_NORETURN +void +_dispatch_mgr_queue_wakeup(DISPATCH_UNUSED dispatch_lane_t dq, + DISPATCH_UNUSED dispatch_qos_t qos, + DISPATCH_UNUSED dispatch_wakeup_flags_t flags) +{ + DISPATCH_INTERNAL_CRASH(0, "Don't try to wake up or override the manager"); +} + +#if DISPATCH_USE_MGR_THREAD +DISPATCH_NOINLINE DISPATCH_NORETURN +static void +_dispatch_mgr_invoke(void) +{ +#if DISPATCH_EVENT_BACKEND_KEVENT + dispatch_kevent_s evbuf[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT]; +#endif + dispatch_deferred_items_s ddi = { + .ddi_wlh = DISPATCH_WLH_ANON, +#if DISPATCH_EVENT_BACKEND_KEVENT + .ddi_maxevents = DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, + .ddi_eventlist = evbuf, +#endif + }; + + _dispatch_deferred_items_set(&ddi); + for (;;) { + bool poll = false; + _dispatch_mgr_queue_drain(); + _dispatch_event_loop_drain_anon_timers(); + poll = _dispatch_queue_class_probe(&_dispatch_mgr_q); + _dispatch_event_loop_drain(poll ? KEVENT_FLAG_IMMEDIATE : 0); + } +} + +DISPATCH_NORETURN +void +_dispatch_mgr_thread(dispatch_lane_t dq DISPATCH_UNUSED, + dispatch_invoke_context_t dic DISPATCH_UNUSED, + dispatch_invoke_flags_t flags DISPATCH_UNUSED) +{ +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_kevent_workqueue_enabled) { + DISPATCH_INTERNAL_CRASH(0, "Manager queue invoked with " + "kevent workqueue enabled"); + } +#endif + _dispatch_queue_set_current(&_dispatch_mgr_q); +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES + _dispatch_mgr_priority_init(); +#endif + _dispatch_queue_mgr_lock(&_dispatch_mgr_q); + // never returns, so burn bridges behind us & clear stack 2k ahead + _dispatch_clear_stack(2048); + _dispatch_mgr_invoke(); +} +#endif // DISPATCH_USE_MGR_THREAD + +#if DISPATCH_USE_KEVENT_WORKQUEUE + +dispatch_static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >= + DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, + "our list should not be longer than the kernel's"); + +static void _dispatch_root_queue_drain_deferred_item( + dispatch_deferred_items_t ddi DISPATCH_PERF_MON_ARGS_PROTO); +static void _dispatch_root_queue_drain_deferred_wlh( + dispatch_deferred_items_t ddi DISPATCH_PERF_MON_ARGS_PROTO); + +void +_dispatch_kevent_workqueue_init(void) +{ + // Initialize kevent workqueue support + _dispatch_root_queues_init(); + if (!_dispatch_kevent_workqueue_enabled) return; + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + qos_class_t qos = _dispatch_mgr_sched.qos; + int prio = _dispatch_mgr_sched.prio; + pthread_priority_t pp = 0; + if (qos) { + pp = _pthread_qos_class_encode(qos, 0, 0); + } + if (prio > _dispatch_mgr_sched.default_prio) { + pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; + } + if (pp) { + int r = _pthread_workqueue_set_event_manager_priority(pp); + (void)dispatch_assume_zero(r); + } +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_wlh_worker_thread_init(dispatch_deferred_items_t ddi) +{ + dispatch_assert(ddi->ddi_wlh); + + pthread_priority_t pp = _dispatch_get_priority(); + if (!(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { + // If this thread does not have the event manager flag set, don't setup + // as the dispatch manager and let the caller know to only process + // the delivered events. + // + // Also add the NEEDS_UNBIND flag so that + // _dispatch_priority_compute_update knows it has to unbind + pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (ddi->ddi_wlh == DISPATCH_WLH_ANON) { + pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + } else { + // pthread sets the flag when it is an event delivery thread + // so we need to explicitly clear it + pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + } + _dispatch_thread_setspecific(dispatch_priority_key, + (void *)(uintptr_t)pp); + if (ddi->ddi_wlh != DISPATCH_WLH_ANON) { + _dispatch_debug("wlh[%p]: handling events", ddi->ddi_wlh); + } else { + ddi->ddi_can_stash = true; + } + return false; + } + + if ((pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) || + !(pp & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { + // When the phtread kext is delivering kevents to us, and pthread + // root queues are in use, then the pthread priority TSD is set + // to a sched pri with the _PTHREAD_PRIORITY_SCHED_PRI_FLAG bit set. + // + // Given that this isn't a valid QoS we need to fixup the TSD, + // and the best option is to clear the qos/priority bits which tells + // us to not do any QoS related calls on this thread. + // + // However, in that case the manager thread is opted out of QoS, + // as far as pthread is concerned, and can't be turned into + // something else, so we can't stash. + pp &= (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK; + } + // Managers always park without mutating to a regular worker thread, and + // hence never need to unbind from userland, and when draining a manager, + // the NEEDS_UNBIND flag would cause the mutation to happen. + // So we need to strip this flag + pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); + + // ensure kevents registered from this thread are registered at manager QoS + _dispatch_init_basepri_wlh(DISPATCH_PRIORITY_FLAG_MANAGER); + _dispatch_queue_set_current(&_dispatch_mgr_q); + _dispatch_queue_mgr_lock(&_dispatch_mgr_q); + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_wlh_worker_thread_reset(void) +{ + bool needs_poll = _dispatch_queue_mgr_unlock(&_dispatch_mgr_q); + _dispatch_clear_basepri(); + _dispatch_queue_set_current(NULL); + if (needs_poll) { + _dispatch_trace_runtime_event(worker_request, &_dispatch_mgr_q, 1); + _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0); + } +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, + int *nevents) +{ + _dispatch_introspection_thread_add(); + + DISPATCH_PERF_MON_VAR_INIT + + dispatch_deferred_items_s ddi = { + .ddi_wlh = wlh, + .ddi_eventlist = events, + }; + bool is_manager; + + is_manager = _dispatch_wlh_worker_thread_init(&ddi); + if (!is_manager) { + _dispatch_trace_runtime_event(worker_event_delivery, + wlh == DISPATCH_WLH_ANON ? NULL : wlh, (uint64_t)*nevents); + _dispatch_perfmon_start_impl(true); + } else { + _dispatch_trace_runtime_event(worker_event_delivery, + &_dispatch_mgr_q, (uint64_t)*nevents); + ddi.ddi_wlh = DISPATCH_WLH_ANON; + } + _dispatch_deferred_items_set(&ddi); + _dispatch_event_loop_merge(events, *nevents); + + if (is_manager) { + _dispatch_trace_runtime_event(worker_unpark, &_dispatch_mgr_q, 0); + _dispatch_mgr_queue_drain(); + _dispatch_event_loop_drain_anon_timers(); + _dispatch_wlh_worker_thread_reset(); + } else if (ddi.ddi_stashed_dou._do) { + _dispatch_debug("wlh[%p]: draining deferred item %p", ddi.ddi_wlh, + ddi.ddi_stashed_dou._do); + if (ddi.ddi_wlh == DISPATCH_WLH_ANON) { + dispatch_assert(ddi.ddi_nevents == 0); + _dispatch_deferred_items_set(NULL); + _dispatch_trace_runtime_event(worker_unpark, ddi.ddi_stashed_rq, 0); + _dispatch_root_queue_drain_deferred_item(&ddi + DISPATCH_PERF_MON_ARGS); + } else { + _dispatch_trace_runtime_event(worker_unpark, wlh, 0); + _dispatch_root_queue_drain_deferred_wlh(&ddi + DISPATCH_PERF_MON_ARGS); + } + } + + _dispatch_deferred_items_set(NULL); + if (!is_manager && !ddi.ddi_stashed_dou._do) { + _dispatch_perfmon_end(perfmon_thread_event_no_steal); + } + _dispatch_debug("returning %d deferred kevents", ddi.ddi_nevents); + _dispatch_clear_return_to_kernel(); + *nevents = ddi.ddi_nevents; + + _dispatch_trace_runtime_event(worker_park, NULL, 0); +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents) +{ + if (!dispatch_assume(events && nevents)) { + return; + } + if (*nevents == 0 || *events == NULL) { + // events for worker thread request have already been delivered earlier + // or got cancelled before point of no return concurrently + return; + } + _dispatch_adopt_wlh_anon(); + _dispatch_wlh_worker_thread(DISPATCH_WLH_ANON, *events, nevents); + _dispatch_reset_wlh(); +} + +#if DISPATCH_USE_KEVENT_WORKLOOP +DISPATCH_NOINLINE +static void +_dispatch_workloop_worker_thread(uint64_t *workloop_id, + dispatch_kevent_t *events, int *nevents) +{ + if (!dispatch_assume(workloop_id && events && nevents)) { + return; + } + if (!dispatch_assume(*workloop_id != 0)) { + return _dispatch_kevent_worker_thread(events, nevents); + } + if (*nevents == 0 || *events == NULL) { + // events for worker thread request have already been delivered earlier + // or got cancelled before point of no return concurrently + return; + } + dispatch_wlh_t wlh = (dispatch_wlh_t)*workloop_id; + _dispatch_adopt_wlh(wlh); + _dispatch_wlh_worker_thread(wlh, *events, nevents); + _dispatch_preserve_wlh_storage_reference(wlh); +} +#endif // DISPATCH_USE_KEVENT_WORKLOOP +#endif // DISPATCH_USE_KEVENT_WORKQUEUE +#pragma mark - +#pragma mark dispatch_root_queue + +#if DISPATCH_USE_PTHREAD_POOL +static void *_dispatch_worker_thread(void *context); +#if defined(_WIN32) +static unsigned WINAPI _dispatch_worker_thread_thunk(LPVOID lpParameter); +#endif +#endif // DISPATCH_USE_PTHREAD_POOL + +#if DISPATCH_DEBUG && DISPATCH_ROOT_QUEUE_DEBUG +#define _dispatch_root_queue_debug(...) _dispatch_debug(__VA_ARGS__) +static void +_dispatch_debug_root_queue(dispatch_queue_class_t dqu, const char *str) +{ + if (likely(dqu._dq)) { + _dispatch_object_debug(dqu._dq, "%s", str); + } else { + _dispatch_log("queue[NULL]: %s", str); + } +} +#else +#define _dispatch_root_queue_debug(...) +#define _dispatch_debug_root_queue(...) +#endif // DISPATCH_DEBUG && DISPATCH_ROOT_QUEUE_DEBUG + +DISPATCH_NOINLINE +static void +_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) +{ + int remaining = n; + int r = ENOSYS; + + _dispatch_root_queues_init(); + _dispatch_debug_root_queue(dq, __func__); + _dispatch_trace_runtime_event(worker_request, dq, (uint64_t)n); + +#if !DISPATCH_USE_INTERNAL_WORKQUEUE +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES + if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) +#endif + { + _dispatch_root_queue_debug("requesting new worker thread for global " + "queue: %p", dq); + r = _pthread_workqueue_addthreads(remaining, + _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority)); + (void)dispatch_assume_zero(r); + return; + } +#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE +#if DISPATCH_USE_PTHREAD_POOL + dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; + if (likely(pqc->dpq_thread_mediator.do_vtable)) { + while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) { + _dispatch_root_queue_debug("signaled sleeping worker for " + "global queue: %p", dq); + if (!--remaining) { + return; + } + } + } + + bool overcommit = dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + if (overcommit) { + os_atomic_add2o(dq, dgq_pending, remaining, relaxed); + } else { + if (!os_atomic_cmpxchg2o(dq, dgq_pending, 0, remaining, relaxed)) { + _dispatch_root_queue_debug("worker thread request still pending for " + "global queue: %p", dq); + return; + } + } + + int can_request, t_count; + // seq_cst with atomic store to tail + t_count = os_atomic_load2o(dq, dgq_thread_pool_size, ordered); + do { + can_request = t_count < floor ? 0 : t_count - floor; + if (remaining > can_request) { + _dispatch_root_queue_debug("pthread pool reducing request from %d to %d", + remaining, can_request); + os_atomic_sub2o(dq, dgq_pending, remaining - can_request, relaxed); + remaining = can_request; + } + if (remaining == 0) { + _dispatch_root_queue_debug("pthread pool is full for root queue: " + "%p", dq); + return; + } + } while (!os_atomic_cmpxchgv2o(dq, dgq_thread_pool_size, t_count, + t_count - remaining, &t_count, acquire)); + +#if !defined(_WIN32) + pthread_attr_t *attr = &pqc->dpq_thread_attr; + pthread_t tid, *pthr = &tid; +#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES + if (unlikely(dq == &_dispatch_mgr_root_queue)) { + pthr = _dispatch_mgr_root_queue_init(); + } +#endif + do { + _dispatch_retain(dq); // released in _dispatch_worker_thread + while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) { + if (r != EAGAIN) { + (void)dispatch_assume_zero(r); + } + _dispatch_temporary_resource_shortage(); + } + } while (--remaining); +#else // defined(_WIN32) +#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES + if (unlikely(dq == &_dispatch_mgr_root_queue)) { + _dispatch_mgr_root_queue_init(); + } +#endif + do { + _dispatch_retain(dq); // released in _dispatch_worker_thread +#if DISPATCH_DEBUG + unsigned dwStackSize = 0; +#else + unsigned dwStackSize = 64 * 1024; +#endif + uintptr_t hThread = 0; + while (!(hThread = _beginthreadex(NULL, dwStackSize, _dispatch_worker_thread_thunk, dq, STACK_SIZE_PARAM_IS_A_RESERVATION, NULL))) { + if (errno != EAGAIN) { + (void)dispatch_assume(hThread); + } + _dispatch_temporary_resource_shortage(); + } + if (_dispatch_mgr_sched.prio > _dispatch_mgr_sched.default_prio) { + (void)dispatch_assume_zero(SetThreadPriority((HANDLE)hThread, _dispatch_mgr_sched.prio) == TRUE); + } + CloseHandle((HANDLE)hThread); + } while (--remaining); +#endif // defined(_WIN32) +#else + (void)floor; +#endif // DISPATCH_USE_PTHREAD_POOL +} + +DISPATCH_NOINLINE +void +_dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor) +{ + if (!_dispatch_queue_class_probe(dq)) { + return; + } +#if !DISPATCH_USE_INTERNAL_WORKQUEUE +#if DISPATCH_USE_PTHREAD_POOL + if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) +#endif + { + if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, relaxed))) { + _dispatch_root_queue_debug("worker thread request still pending " + "for global queue: %p", dq); + return; + } + } +#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE + return _dispatch_root_queue_poke_slow(dq, n, floor); +} + +#define DISPATCH_ROOT_QUEUE_MEDIATOR ((struct dispatch_object_s *)~0ul) + +enum { + DISPATCH_ROOT_QUEUE_DRAIN_WAIT, + DISPATCH_ROOT_QUEUE_DRAIN_READY, + DISPATCH_ROOT_QUEUE_DRAIN_ABORT, +}; + +static int +_dispatch_root_queue_mediator_is_gone(dispatch_queue_global_t dq) +{ + return os_atomic_load2o(dq, dq_items_head, relaxed) != + DISPATCH_ROOT_QUEUE_MEDIATOR; +} + +static int +_dispatch_root_queue_head_tail_quiesced(dispatch_queue_global_t dq) +{ + // Wait for queue head and tail to be both non-empty or both empty + struct dispatch_object_s *head, *tail; + head = os_atomic_load2o(dq, dq_items_head, relaxed); + tail = os_atomic_load2o(dq, dq_items_tail, relaxed); + if ((head == NULL) == (tail == NULL)) { + if (tail == NULL) { // + return DISPATCH_ROOT_QUEUE_DRAIN_ABORT; + } + return DISPATCH_ROOT_QUEUE_DRAIN_READY; + } + return DISPATCH_ROOT_QUEUE_DRAIN_WAIT; +} + +DISPATCH_NOINLINE +static bool +__DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dispatch_queue_global_t dq, + int (*predicate)(dispatch_queue_global_t dq)) +{ + unsigned int sleep_time = DISPATCH_CONTENTION_USLEEP_START; + int status = DISPATCH_ROOT_QUEUE_DRAIN_READY; + bool pending = false; + + do { + // Spin for a short while in case the contention is temporary -- e.g. + // when starting up after dispatch_apply, or when executing a few + // short continuations in a row. + if (_dispatch_contention_wait_until(status = predicate(dq))) { + goto out; + } + // Since we have serious contention, we need to back off. + if (!pending) { + // Mark this queue as pending to avoid requests for further threads + (void)os_atomic_inc2o(dq, dgq_pending, relaxed); + pending = true; + } + _dispatch_contention_usleep(sleep_time); + if (likely(status = predicate(dq))) goto out; + sleep_time *= 2; + } while (sleep_time < DISPATCH_CONTENTION_USLEEP_MAX); + + // The ratio of work to libdispatch overhead must be bad. This + // scenario implies that there are too many threads in the pool. + // Create a new pending thread and then exit this thread. + // The kernel will grant a new thread when the load subsides. + _dispatch_debug("contention on global queue: %p", dq); +out: + if (pending) { + (void)os_atomic_dec2o(dq, dgq_pending, relaxed); + } + if (status == DISPATCH_ROOT_QUEUE_DRAIN_WAIT) { + _dispatch_root_queue_poke(dq, 1, 0); + } + return status == DISPATCH_ROOT_QUEUE_DRAIN_READY; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline struct dispatch_object_s * +_dispatch_root_queue_drain_one(dispatch_queue_global_t dq) +{ + struct dispatch_object_s *head, *next; + +start: + // The MEDIATOR value acts both as a "lock" and a signal + head = os_atomic_xchg2o(dq, dq_items_head, + DISPATCH_ROOT_QUEUE_MEDIATOR, relaxed); + + if (unlikely(head == NULL)) { + // The first xchg on the tail will tell the enqueueing thread that it + // is safe to blindly write out to the head pointer. A cmpxchg honors + // the algorithm. + if (unlikely(!os_atomic_cmpxchg2o(dq, dq_items_head, + DISPATCH_ROOT_QUEUE_MEDIATOR, NULL, relaxed))) { + goto start; + } + if (unlikely(dq->dq_items_tail)) { // + if (__DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dq, + _dispatch_root_queue_head_tail_quiesced)) { + goto start; + } + } + _dispatch_root_queue_debug("no work on global queue: %p", dq); + return NULL; + } + + if (unlikely(head == DISPATCH_ROOT_QUEUE_MEDIATOR)) { + // This thread lost the race for ownership of the queue. + if (likely(__DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dq, + _dispatch_root_queue_mediator_is_gone))) { + goto start; + } + return NULL; + } + + // Restore the head pointer to a sane value before returning. + // If 'next' is NULL, then this item _might_ be the last item. + next = head->do_next; + + if (unlikely(!next)) { + os_atomic_store2o(dq, dq_items_head, NULL, relaxed); + // 22708742: set tail to NULL with release, so that NULL write to head + // above doesn't clobber head from concurrent enqueuer + if (os_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL, release)) { + // both head and tail are NULL now + goto out; + } + // There must be a next item now. + next = os_mpsc_get_next(head, do_next); + } + + os_atomic_store2o(dq, dq_items_head, next, relaxed); + _dispatch_root_queue_poke(dq, 1, 0); +out: + return head; +} + +#if DISPATCH_USE_KEVENT_WORKQUEUE +static void +_dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi + DISPATCH_PERF_MON_ARGS_PROTO) +{ + dispatch_queue_global_t rq = ddi->ddi_stashed_rq; + dispatch_queue_t dq = ddi->ddi_stashed_dou._dq; + _dispatch_queue_set_current(rq); + + dispatch_invoke_context_s dic = { }; + dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | + DISPATCH_INVOKE_REDIRECTING_DRAIN | DISPATCH_INVOKE_WLH; + _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority); + uint64_t dq_state; + + _dispatch_init_basepri_wlh(rq->dq_priority); + ddi->ddi_wlh_servicing = true; +retry: + dispatch_assert(ddi->ddi_wlh_needs_delete); + _dispatch_trace_item_pop(rq, dq); + + if (_dispatch_queue_drain_try_lock_wlh(dq, &dq_state)) { + dx_invoke(dq, &dic, flags); +#if DISPATCH_USE_KEVENT_WORKLOOP + // + // dx_invoke() will always return `dq` unlocked or locked by another + // thread, and either have consumed the +2 or transferred it to the + // other thread. + // +#endif + if (!ddi->ddi_wlh_needs_delete) { +#if DISPATCH_USE_KEVENT_WORKLOOP + // + // The fate of the workloop thread request has already been dealt + // with, which can happen for 4 reasons, for which we just want + // to go park and skip trying to unregister the thread request: + // - the workloop target has been changed + // - the workloop has been re-enqueued because of narrowing + // - the workloop has been re-enqueued on the manager queue + // - the workloop ownership has been handed off to a sync owner + // +#endif + goto park; + } +#if DISPATCH_USE_KEVENT_WORKLOOP + // + // The workloop has been drained to completion or suspended. + // dx_invoke() has cleared the enqueued bit before it returned. + // + // Since a dispatch_set_target_queue() could occur between the unlock + // and our reload of `dq_state` (rdar://32671286) we need to re-assess + // the workloop-ness of the queue. If it's not a workloop anymore, + // _dispatch_event_loop_leave_immediate() will have handled the kevent + // deletion already. + // + // Then, we check one last time that the queue is still not enqueued, + // in which case we attempt to quiesce it. + // + // If we find it enqueued again, it means someone else has been + // enqueuing concurrently and has made a thread request that coalesced + // with ours, but since dx_invoke() cleared the enqueued bit, + // the other thread didn't realize that and added a +1 ref count. + // Take over that +1, and add our own to make the +2 this loop expects, + // and drain again. + // +#endif // DISPATCH_USE_KEVENT_WORKLOOP + dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(!_dq_state_is_base_wlh(dq_state))) { // rdar://32671286 + goto park; + } + if (unlikely(_dq_state_is_enqueued_on_target(dq_state))) { + _dispatch_retain(dq); + _dispatch_trace_item_push(dq->do_targetq, dq); + goto retry; + } + } else { +#if DISPATCH_USE_KEVENT_WORKLOOP + // + // The workloop enters this function with a +2 refcount, however we + // couldn't acquire the lock due to suspension or discovering that + // the workloop was locked by a sync owner. + // + // We need to give up, and _dispatch_event_loop_leave_deferred() + // will do a DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC transition to + // tell the kernel to stop driving this thread request. We leave + // a +1 with the thread request, and consume the extra +1 we have. + // +#endif + if (_dq_state_is_suspended(dq_state)) { + dispatch_assert(!_dq_state_is_enqueued(dq_state)); + _dispatch_release_2_no_dispose(dq); + } else { + dispatch_assert(_dq_state_is_enqueued(dq_state)); + dispatch_assert(_dq_state_drain_locked(dq_state)); + _dispatch_release_no_dispose(dq); + } + } + + _dispatch_event_loop_leave_deferred(ddi, dq_state); + +park: + // event thread that could steal + _dispatch_perfmon_end(perfmon_thread_event_steal); + _dispatch_clear_basepri(); + _dispatch_queue_set_current(NULL); + + _dispatch_voucher_debug("root queue clear", NULL); + _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); +} + +static void +_dispatch_root_queue_drain_deferred_item(dispatch_deferred_items_t ddi + DISPATCH_PERF_MON_ARGS_PROTO) +{ + dispatch_queue_global_t rq = ddi->ddi_stashed_rq; + _dispatch_queue_set_current(rq); + _dispatch_trace_runtime_event(worker_unpark, NULL, 0); + + dispatch_invoke_context_s dic = { }; + dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | + DISPATCH_INVOKE_REDIRECTING_DRAIN; +#if DISPATCH_COCOA_COMPAT + _dispatch_last_resort_autorelease_pool_push(&dic); +#endif // DISPATCH_COCOA_COMPAT + _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority); + _dispatch_init_basepri(rq->dq_priority); + + _dispatch_continuation_pop_inline(ddi->ddi_stashed_dou, &dic, flags, rq); + + // event thread that could steal + _dispatch_perfmon_end(perfmon_thread_event_steal); +#if DISPATCH_COCOA_COMPAT + _dispatch_last_resort_autorelease_pool_pop(&dic); +#endif // DISPATCH_COCOA_COMPAT + _dispatch_clear_basepri(); + _dispatch_queue_set_current(NULL); + + _dispatch_voucher_debug("root queue clear", NULL); + _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); +} +#endif + +DISPATCH_NOT_TAIL_CALLED // prevent tailcall (for Instrument DTrace probe) +static void +_dispatch_root_queue_drain(dispatch_queue_global_t dq, + dispatch_priority_t pri, dispatch_invoke_flags_t flags) +{ +#if DISPATCH_DEBUG + dispatch_queue_t cq; + if (unlikely(cq = _dispatch_queue_get_current())) { + DISPATCH_INTERNAL_CRASH(cq, "Premature thread recycling"); + } +#endif + _dispatch_queue_set_current(dq); + _dispatch_init_basepri(pri); + _dispatch_adopt_wlh_anon(); + + struct dispatch_object_s *item; + bool reset = false; + dispatch_invoke_context_s dic = { }; +#if DISPATCH_COCOA_COMPAT + _dispatch_last_resort_autorelease_pool_push(&dic); +#endif // DISPATCH_COCOA_COMPAT + _dispatch_queue_drain_init_narrowing_check_deadline(&dic, pri); + _dispatch_perfmon_start(); + while (likely(item = _dispatch_root_queue_drain_one(dq))) { + if (reset) _dispatch_wqthread_override_reset(); + _dispatch_continuation_pop_inline(item, &dic, flags, dq); + reset = _dispatch_reset_basepri_override(); + if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) { + break; + } + } + + // overcommit or not. worker thread + if (pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { + _dispatch_perfmon_end(perfmon_thread_worker_oc); + } else { + _dispatch_perfmon_end(perfmon_thread_worker_non_oc); + } + +#if DISPATCH_COCOA_COMPAT + _dispatch_last_resort_autorelease_pool_pop(&dic); +#endif // DISPATCH_COCOA_COMPAT + _dispatch_reset_wlh(); + _dispatch_clear_basepri(); + _dispatch_queue_set_current(NULL); +} + +#if !DISPATCH_USE_INTERNAL_WORKQUEUE +static void +_dispatch_worker_thread2(pthread_priority_t pp) +{ + bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + dispatch_queue_global_t dq; + + pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); + dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit); + + _dispatch_introspection_thread_add(); + _dispatch_trace_runtime_event(worker_unpark, dq, 0); + + int pending = os_atomic_dec2o(dq, dgq_pending, relaxed); + dispatch_assert(pending >= 0); + _dispatch_root_queue_drain(dq, dq->dq_priority, + DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN); + _dispatch_voucher_debug("root queue clear", NULL); + _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); + _dispatch_trace_runtime_event(worker_park, NULL, 0); +} +#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE + +#if DISPATCH_USE_PTHREAD_POOL +static inline void +_dispatch_root_queue_init_pthread_pool(dispatch_queue_global_t dq, + int pool_size, dispatch_priority_t pri) +{ + dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; + int thread_pool_size = DISPATCH_WORKQ_MAX_PTHREAD_COUNT; + if (!(pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)) { + thread_pool_size = (int32_t)dispatch_hw_config(active_cpus); + } + if (pool_size && pool_size < thread_pool_size) thread_pool_size = pool_size; + dq->dgq_thread_pool_size = thread_pool_size; + qos_class_t cls = _dispatch_qos_to_qos_class(_dispatch_priority_qos(pri) ?: + _dispatch_priority_fallback_qos(pri)); + if (cls) { +#if !defined(_WIN32) + pthread_attr_t *attr = &pqc->dpq_thread_attr; + int r = pthread_attr_init(attr); + dispatch_assume_zero(r); + r = pthread_attr_setdetachstate(attr, PTHREAD_CREATE_DETACHED); + dispatch_assume_zero(r); +#endif // !defined(_WIN32) +#if HAVE_PTHREAD_WORKQUEUE_QOS + r = pthread_attr_set_qos_class_np(attr, cls, 0); + dispatch_assume_zero(r); +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + } + _dispatch_sema4_t *sema = &pqc->dpq_thread_mediator.dsema_sema; + pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); + _dispatch_sema4_init(sema, _DSEMA4_POLICY_LIFO); + _dispatch_sema4_create(sema, _DSEMA4_POLICY_LIFO); +} + +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +static void * +_dispatch_worker_thread(void *context) +{ + dispatch_queue_global_t dq = context; + dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; + + int pending = os_atomic_dec2o(dq, dgq_pending, relaxed); + if (unlikely(pending < 0)) { + DISPATCH_INTERNAL_CRASH(pending, "Pending thread request underflow"); + } + + if (pqc->dpq_observer_hooks.queue_will_execute) { + _dispatch_set_pthread_root_queue_observer_hooks( + &pqc->dpq_observer_hooks); + } + if (pqc->dpq_thread_configure) { + pqc->dpq_thread_configure(); + } + +#if !defined(_WIN32) + // workaround tweaks the kernel workqueue does for us + _dispatch_sigmask(); +#endif + _dispatch_introspection_thread_add(); + + const int64_t timeout = 5ull * NSEC_PER_SEC; + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_priority_t pri = dq->dq_priority; + + // If the queue is neither + // - the manager + // - with a fallback set + // - with a requested QoS or QoS floor + // then infer the basepri from the current priority. + if ((pri & (DISPATCH_PRIORITY_FLAG_MANAGER | + DISPATCH_PRIORITY_FLAG_FALLBACK | + DISPATCH_PRIORITY_FLAG_FLOOR | + DISPATCH_PRIORITY_REQUESTED_MASK)) == 0) { + pri &= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + if (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) { + pri |= _dispatch_priority_from_pp(pp); + } else { + pri |= _dispatch_priority_make_override(DISPATCH_QOS_SATURATED); + } + } + +#if DISPATCH_USE_INTERNAL_WORKQUEUE + bool monitored = ((pri & (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | + DISPATCH_PRIORITY_FLAG_MANAGER)) == 0); + if (monitored) _dispatch_workq_worker_register(dq); +#endif + + do { + _dispatch_trace_runtime_event(worker_unpark, dq, 0); + _dispatch_root_queue_drain(dq, pri, DISPATCH_INVOKE_REDIRECTING_DRAIN); + _dispatch_reset_priority_and_voucher(pp, NULL); + _dispatch_trace_runtime_event(worker_park, NULL, 0); + } while (dispatch_semaphore_wait(&pqc->dpq_thread_mediator, + dispatch_time(0, timeout)) == 0); + +#if DISPATCH_USE_INTERNAL_WORKQUEUE + if (monitored) _dispatch_workq_worker_unregister(dq); +#endif + (void)os_atomic_inc2o(dq, dgq_thread_pool_size, release); + _dispatch_root_queue_poke(dq, 1, 0); + _dispatch_release(dq); // retained in _dispatch_root_queue_poke_slow + return NULL; +} +#if defined(_WIN32) +static unsigned WINAPI +_dispatch_worker_thread_thunk(LPVOID lpParameter) +{ + _dispatch_worker_thread(lpParameter); + return 0; +} +#endif // defined(_WIN32) +#endif // DISPATCH_USE_PTHREAD_POOL + +DISPATCH_NOINLINE +void +_dispatch_root_queue_wakeup(dispatch_queue_global_t dq, + DISPATCH_UNUSED dispatch_qos_t qos, dispatch_wakeup_flags_t flags) +{ + if (!(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) { + DISPATCH_INTERNAL_CRASH(dq->dq_priority, + "Don't try to wake up or override a root queue"); + } + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } +} + +DISPATCH_NOINLINE +void +_dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou, + dispatch_qos_t qos) +{ +#if DISPATCH_USE_KEVENT_WORKQUEUE + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (unlikely(ddi && ddi->ddi_can_stash)) { + dispatch_object_t old_dou = ddi->ddi_stashed_dou; + dispatch_priority_t rq_overcommit; + rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + + if (likely(!old_dou._do || rq_overcommit)) { + dispatch_queue_global_t old_rq = ddi->ddi_stashed_rq; + dispatch_qos_t old_qos = ddi->ddi_stashed_qos; + ddi->ddi_stashed_rq = rq; + ddi->ddi_stashed_dou = dou; + ddi->ddi_stashed_qos = qos; + _dispatch_debug("deferring item %p, rq %p, qos %d", + dou._do, rq, qos); + if (rq_overcommit) { + ddi->ddi_can_stash = false; + } + if (likely(!old_dou._do)) { + return; + } + // push the previously stashed item + qos = old_qos; + rq = old_rq; + dou = old_dou; + } + } +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (_dispatch_root_queue_push_needs_override(rq, qos)) { + return _dispatch_root_queue_push_override(rq, dou, qos); + } +#else + (void)qos; +#endif + _dispatch_root_queue_push_inline(rq, dou, dou, 1); +} + +#pragma mark - +#pragma mark dispatch_pthread_root_queue +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES + +static dispatch_queue_global_t +_dispatch_pthread_root_queue_create(const char *label, unsigned long flags, + const pthread_attr_t *attr, dispatch_block_t configure, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks) +{ + dispatch_queue_pthread_root_t dpq; + dispatch_queue_flags_t dqf = 0; + int32_t pool_size = flags & _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE ? + (int8_t)(flags & ~_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE) : 0; + + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } + } + + dpq = _dispatch_queue_alloc(queue_pthread_root, dqf, + DISPATCH_QUEUE_WIDTH_POOL, 0)._dpq; + dpq->dq_label = label; + dpq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; + dpq->dq_priority = DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + dpq->do_ctxt = &dpq->dpq_ctxt; + + dispatch_pthread_root_queue_context_t pqc = &dpq->dpq_ctxt; + _dispatch_root_queue_init_pthread_pool(dpq->_as_dgq, pool_size, + DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + +#if !defined(_WIN32) + if (attr) { + memcpy(&pqc->dpq_thread_attr, attr, sizeof(pthread_attr_t)); + _dispatch_mgr_priority_raise(&pqc->dpq_thread_attr); + } else { + (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); + } + (void)dispatch_assume_zero(pthread_attr_setdetachstate( + &pqc->dpq_thread_attr, PTHREAD_CREATE_DETACHED)); +#else // defined(_WIN32) + dispatch_assert(attr == NULL); +#endif // defined(_WIN32) + if (configure) { + pqc->dpq_thread_configure = _dispatch_Block_copy(configure); + } + if (observer_hooks) { + pqc->dpq_observer_hooks = *observer_hooks; + } + _dispatch_object_debug(dpq, "%s", __func__); + return _dispatch_trace_queue_create(dpq)._dgq; +} + +dispatch_queue_global_t +dispatch_pthread_root_queue_create(const char *label, unsigned long flags, + const pthread_attr_t *attr, dispatch_block_t configure) +{ + return _dispatch_pthread_root_queue_create(label, flags, attr, configure, + NULL); +} + +#if DISPATCH_IOHID_SPI +dispatch_queue_global_t +_dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID(const char *label, + unsigned long flags, const pthread_attr_t *attr, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks, + dispatch_block_t configure) +{ + if (!observer_hooks->queue_will_execute || + !observer_hooks->queue_did_execute) { + DISPATCH_CLIENT_CRASH(0, "Invalid pthread root queue observer hooks"); + } + return _dispatch_pthread_root_queue_create(label, flags, attr, configure, + observer_hooks); +} + +bool +_dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( + dispatch_queue_t dq) // rdar://problem/18033810 +{ + if (dq->dq_width != 1) { + DISPATCH_CLIENT_CRASH(dq->dq_width, "Invalid queue type"); + } + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + return _dq_state_drain_locked_by_self(dq_state); +} +#endif + +dispatch_queue_global_t +dispatch_pthread_root_queue_copy_current(void) +{ + dispatch_queue_t dq = _dispatch_queue_get_current(); + if (!dq) return NULL; + while (unlikely(dq->do_targetq)) { + dq = dq->do_targetq; + } + if (dx_type(dq) != DISPATCH_QUEUE_PTHREAD_ROOT_TYPE) { + return NULL; + } + _os_object_retain_with_resurrect(dq->_as_os_obj); + return upcast(dq)._dgq; +} + +void +_dispatch_pthread_root_queue_dispose(dispatch_queue_global_t dq, + bool *allow_free) +{ + dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; + + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_trace_queue_dispose(dq); + +#if !defined(_WIN32) + pthread_attr_destroy(&pqc->dpq_thread_attr); +#endif + _dispatch_semaphore_dispose(&pqc->dpq_thread_mediator, NULL); + if (pqc->dpq_thread_configure) { + Block_release(pqc->dpq_thread_configure); + } + dq->do_targetq = _dispatch_get_default_queue(false); + _dispatch_lane_class_dispose(dq, allow_free); +} + +#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES +#pragma mark - +#pragma mark dispatch_runloop_queue + +DISPATCH_STATIC_GLOBAL(bool _dispatch_program_is_probably_callback_driven); + +#if DISPATCH_COCOA_COMPAT || defined(_WIN32) +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_main_q_handle_pred); + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_runloop_handle_is_valid(dispatch_runloop_handle_t handle) +{ +#if TARGET_OS_MAC + return MACH_PORT_VALID(handle); +#elif defined(__linux__) + return handle >= 0; +#elif defined(_WIN32) + return handle != INVALID_HANDLE_VALUE; +#else +#error "runloop support not implemented on this platform" +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_runloop_handle_t +_dispatch_runloop_queue_get_handle(dispatch_lane_t dq) +{ +#if TARGET_OS_MAC + return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt); +#elif defined(__linux__) + // decode: 0 is a valid fd, so offset by 1 to distinguish from NULL + return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt) - 1; +#elif defined(_WIN32) + return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt); +#else +#error "runloop support not implemented on this platform" +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_runloop_queue_set_handle(dispatch_lane_t dq, + dispatch_runloop_handle_t handle) +{ +#if TARGET_OS_MAC + dq->do_ctxt = (void *)(uintptr_t)handle; +#elif defined(__linux__) + // encode: 0 is a valid fd, so offset by 1 to distinguish from NULL + dq->do_ctxt = (void *)(uintptr_t)(handle + 1); +#elif defined(_WIN32) + dq->do_ctxt = (void *)(uintptr_t)handle; +#else +#error "runloop support not implemented on this platform" +#endif +} + +static void +_dispatch_runloop_queue_handle_init(void *ctxt) +{ + dispatch_lane_t dq = (dispatch_lane_t)ctxt; + dispatch_runloop_handle_t handle; + + _dispatch_fork_becomes_unsafe(); + +#if TARGET_OS_MAC + mach_port_options_t opts = { + .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT | MPO_INSERT_SEND_RIGHT, + }; + mach_port_context_t guard = (uintptr_t)dq; + kern_return_t kr; + mach_port_t mp; + + if (dx_type(dq) == DISPATCH_QUEUE_MAIN_TYPE) { + opts.flags |= MPO_QLIMIT; + opts.mpl.mpl_qlimit = 1; + } + + kr = mach_port_construct(mach_task_self(), &opts, guard, &mp); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + + handle = mp; +#elif defined(__linux__) + int fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + if (fd == -1) { + int err = errno; + switch (err) { + case EMFILE: + DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " + "process is out of file descriptors"); + break; + case ENFILE: + DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " + "system is out of file descriptors"); + break; + case ENOMEM: + DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " + "kernel is out of memory"); + break; + default: + DISPATCH_INTERNAL_CRASH(err, "eventfd() failure"); + break; + } + } + handle = fd; +#elif defined(_WIN32) + handle = INVALID_HANDLE_VALUE; +#else +#error "runloop support not implemented on this platform" +#endif + _dispatch_runloop_queue_set_handle(dq, handle); + + _dispatch_program_is_probably_callback_driven = true; +} + +static void +_dispatch_runloop_queue_handle_dispose(dispatch_lane_t dq) +{ + dispatch_runloop_handle_t handle = _dispatch_runloop_queue_get_handle(dq); + if (!_dispatch_runloop_handle_is_valid(handle)) { + return; + } + dq->do_ctxt = NULL; +#if TARGET_OS_MAC + mach_port_t mp = (mach_port_t)handle; + mach_port_context_t guard = (uintptr_t)dq; + kern_return_t kr; + kr = mach_port_destruct(mach_task_self(), mp, -1, guard); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); +#elif defined(__linux__) + int rc = close(handle); + (void)dispatch_assume_zero(rc); +#elif defined(_WIN32) + CloseHandle(handle); +#else +#error "runloop support not implemented on this platform" +#endif +} +#endif // DISPATCH_COCOA_COMPAT || defined(_WIN32) + +#if DISPATCH_COCOA_COMPAT +static inline void +_dispatch_runloop_queue_class_poke(dispatch_lane_t dq) +{ + dispatch_runloop_handle_t handle = _dispatch_runloop_queue_get_handle(dq); + if (!_dispatch_runloop_handle_is_valid(handle)) { + return; + } + + _dispatch_trace_runtime_event(worker_request, dq, 1); +#if HAVE_MACH + mach_port_t mp = handle; + kern_return_t kr = _dispatch_send_wakeup_runloop_thread(mp, 0); + switch (kr) { + case MACH_SEND_TIMEOUT: + case MACH_SEND_TIMED_OUT: + case MACH_SEND_INVALID_DEST: + break; + default: + (void)dispatch_assume_zero(kr); + break; + } +#elif defined(__linux__) + int result; + do { + result = eventfd_write(handle, 1); + } while (result == -1 && errno == EINTR); + (void)dispatch_assume_zero(result); +#else +#error "runloop support not implemented on this platform" +#endif +} + +DISPATCH_NOINLINE +static void +_dispatch_runloop_queue_poke(dispatch_lane_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + // it's not useful to handle WAKEUP_MAKE_DIRTY because mach_msg() will have + // a release barrier and that when runloop queues stop being thread-bound + // they have a non optional wake-up to start being a "normal" queue + // either in _dispatch_runloop_queue_xref_dispose, + // or in _dispatch_queue_cleanup2() for the main thread. + uint64_t old_state, new_state; + + if (dx_type(dq) == DISPATCH_QUEUE_MAIN_TYPE) { + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); + } + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = _dq_state_merge_qos(old_state, qos); + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(goto no_change); + } + }); + + dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority); + if (qos > dq_qos) { + mach_port_t owner = _dq_state_drain_owner(new_state); + pthread_priority_t pp = _dispatch_qos_to_pp(qos); + _dispatch_thread_override_start(owner, pp, dq); + if (_dq_state_max_qos(old_state) > dq_qos) { + _dispatch_thread_override_end(owner, dq); + } + } +no_change: + _dispatch_runloop_queue_class_poke(dq); + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } +} + +#if DISPATCH_COCOA_COMPAT || defined(_WIN32) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_runloop_queue_reset_max_qos(dispatch_lane_t dq) +{ + uint64_t old_state, clear_bits = DISPATCH_QUEUE_MAX_QOS_MASK | + DISPATCH_QUEUE_RECEIVED_OVERRIDE; + old_state = os_atomic_and_orig2o(dq, dq_state, ~clear_bits, relaxed); + return _dq_state_max_qos(old_state); +} + +void +_dispatch_runloop_queue_wakeup(dispatch_lane_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + if (unlikely(_dispatch_queue_atomic_flags(dq) & DQF_RELEASED)) { + // + return _dispatch_lane_wakeup(dq, qos, flags); + } + + if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { + os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); + } + if (_dispatch_queue_class_probe(dq)) { + return _dispatch_runloop_queue_poke(dq, qos, flags); + } + + qos = _dispatch_runloop_queue_reset_max_qos(dq); + if (qos) { + mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); + if (_dispatch_queue_class_probe(dq)) { + _dispatch_runloop_queue_poke(dq, qos, flags); + } + _dispatch_thread_override_end(owner, dq); + return; + } + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_main_queue_update_priority_from_thread(void) +{ + dispatch_queue_main_t dq = &_dispatch_main_q; + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + mach_port_t owner = _dq_state_drain_owner(dq_state); + + dispatch_priority_t main_pri = + _dispatch_priority_from_pp_strip_flags(_dispatch_get_priority()); + dispatch_qos_t main_qos = _dispatch_priority_qos(main_pri); + dispatch_qos_t max_qos = _dq_state_max_qos(dq_state); + dispatch_qos_t old_qos = _dispatch_priority_qos(dq->dq_priority); + + // the main thread QoS was adjusted by someone else, learn the new QoS + // and reinitialize _dispatch_main_q.dq_priority + dq->dq_priority = main_pri; + + if (old_qos < max_qos && main_qos == DISPATCH_QOS_UNSPECIFIED) { + // main thread is opted out of QoS and we had an override + return _dispatch_thread_override_end(owner, dq); + } + + if (old_qos < max_qos && max_qos <= main_qos) { + // main QoS was raised, and we had an override which is now useless + return _dispatch_thread_override_end(owner, dq); + } + + if (main_qos < max_qos && max_qos <= old_qos) { + // main thread QoS was lowered, and we actually need an override + pthread_priority_t pp = _dispatch_qos_to_pp(max_qos); + return _dispatch_thread_override_start(owner, pp, dq); + } +} + +static void +_dispatch_main_queue_drain(dispatch_queue_main_t dq) +{ + dispatch_thread_frame_s dtf; + + if (!dq->dq_items_tail) { + return; + } + + _dispatch_perfmon_start_notrace(); + if (unlikely(!_dispatch_queue_is_thread_bound(dq))) { + DISPATCH_CLIENT_CRASH(0, "_dispatch_main_queue_callback_4CF called" + " after dispatch_main()"); + } + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(!_dq_state_drain_locked_by_self(dq_state))) { + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "_dispatch_main_queue_callback_4CF called" + " from the wrong thread"); + } + + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); + + // hide the frame chaining when CFRunLoop + // drains the main runloop, as this should not be observable that way + _dispatch_adopt_wlh_anon(); + _dispatch_thread_frame_push_and_rebase(&dtf, dq, NULL); + + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_priority_t pri = _dispatch_priority_from_pp(pp); + dispatch_qos_t qos = _dispatch_priority_qos(pri); + voucher_t voucher = _voucher_copy(); + + if (unlikely(qos != _dispatch_priority_qos(dq->dq_priority))) { + _dispatch_main_queue_update_priority_from_thread(); + } + dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); + + dispatch_invoke_context_s dic = { }; + struct dispatch_object_s *dc, *next_dc, *tail; + dc = os_mpsc_capture_snapshot(os_mpsc(dq, dq_items), &tail); + do { + next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next); + _dispatch_continuation_pop_inline(dc, &dic, + DISPATCH_INVOKE_THREAD_BOUND, dq); + } while ((dc = next_dc)); + + dx_wakeup(dq->_as_dq, 0, 0); + _dispatch_voucher_debug("main queue restore", voucher); + _dispatch_reset_basepri(old_dbp); + _dispatch_reset_basepri_override(); + _dispatch_reset_priority_and_voucher(pp, voucher); + _dispatch_thread_frame_pop(&dtf); + _dispatch_reset_wlh(); + _dispatch_force_cache_cleanup(); + _dispatch_perfmon_end_notrace(); +} + +static bool +_dispatch_runloop_queue_drain_one(dispatch_lane_t dq) +{ + if (!dq->dq_items_tail) { + return false; + } + _dispatch_perfmon_start_notrace(); + dispatch_thread_frame_s dtf; + bool should_reset_wlh = _dispatch_adopt_wlh_anon_recurse(); + _dispatch_thread_frame_push(&dtf, dq); + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_priority_t pri = _dispatch_priority_from_pp(pp); + voucher_t voucher = _voucher_copy(); + dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); + + dispatch_invoke_context_s dic = { }; + struct dispatch_object_s *dc, *next_dc; + dc = _dispatch_queue_get_head(dq); + next_dc = _dispatch_queue_pop_head(dq, dc); + _dispatch_continuation_pop_inline(dc, &dic, + DISPATCH_INVOKE_THREAD_BOUND, dq); + + if (!next_dc) { + dx_wakeup(dq, 0, 0); + } + + _dispatch_voucher_debug("runloop queue restore", voucher); + _dispatch_reset_basepri(old_dbp); + _dispatch_reset_basepri_override(); + _dispatch_reset_priority_and_voucher(pp, voucher); + _dispatch_thread_frame_pop(&dtf); + if (should_reset_wlh) _dispatch_reset_wlh(); + _dispatch_force_cache_cleanup(); + _dispatch_perfmon_end_notrace(); + return next_dc; +} + +dispatch_queue_serial_t +_dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) +{ + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_lane_t dq; + + if (unlikely(flags)) { + return DISPATCH_BAD_INPUT; + } + dq = _dispatch_object_alloc(DISPATCH_VTABLE(queue_runloop), + sizeof(struct dispatch_lane_s)); + _dispatch_queue_init(dq, DQF_THREAD_BOUND, 1, + DISPATCH_QUEUE_ROLE_BASE_ANON); + dq->do_targetq = _dispatch_get_default_queue(true); + dq->dq_label = label ? label : "runloop-queue"; // no-copy contract + if (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) { + dq->dq_priority = _dispatch_priority_from_pp_strip_flags(pp); + } + _dispatch_runloop_queue_handle_init(dq); + _dispatch_queue_set_bound_thread(dq); + _dispatch_object_debug(dq, "%s", __func__); + return _dispatch_trace_queue_create(dq)._dl; +} + +void +_dispatch_runloop_queue_xref_dispose(dispatch_lane_t dq) +{ + _dispatch_object_debug(dq, "%s", __func__); + + dispatch_qos_t qos = _dispatch_runloop_queue_reset_max_qos(dq); + _dispatch_queue_clear_bound_thread(dq); + dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); + if (qos) _dispatch_thread_override_end(DISPATCH_QUEUE_DRAIN_OWNER(dq), dq); +} + +void +_dispatch_runloop_queue_dispose(dispatch_lane_t dq, bool *allow_free) +{ + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_trace_queue_dispose(dq); + _dispatch_runloop_queue_handle_dispose(dq); + _dispatch_lane_class_dispose(dq, allow_free); +} + +bool +_dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t dq) +{ + if (unlikely(dx_type(dq) != DISPATCH_QUEUE_RUNLOOP_TYPE)) { + DISPATCH_CLIENT_CRASH(dx_type(dq), "Not a runloop queue"); + } + dispatch_retain(dq); + bool r = _dispatch_runloop_queue_drain_one(upcast(dq)._dl); + dispatch_release(dq); + return r; +} + +void +_dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t dq) +{ + if (unlikely(dx_type(dq) != DISPATCH_QUEUE_RUNLOOP_TYPE)) { + DISPATCH_CLIENT_CRASH(dx_type(dq), "Not a runloop queue"); + } + _dispatch_runloop_queue_wakeup(upcast(dq)._dl, 0, false); +} + +#if TARGET_OS_MAC || defined(_WIN32) +dispatch_runloop_handle_t +_dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) +{ + if (unlikely(dx_type(dq) != DISPATCH_QUEUE_RUNLOOP_TYPE)) { + DISPATCH_CLIENT_CRASH(dx_type(dq), "Not a runloop queue"); + } + return _dispatch_runloop_queue_get_handle(upcast(dq)._dl); +} +#endif + +#endif // DISPATCH_COCOA_COMPAT || defined(_WIN32) +#pragma mark - +#pragma mark dispatch_main_queue +#if DISPATCH_COCOA_COMPAT || defined(_WIN32) + +dispatch_runloop_handle_t +_dispatch_get_main_queue_handle_4CF(void) +{ + dispatch_queue_main_t dq = &_dispatch_main_q; + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); + return _dispatch_runloop_queue_get_handle(dq->_as_dl); +} + +#if TARGET_OS_MAC +dispatch_runloop_handle_t +_dispatch_get_main_queue_port_4CF(void) +{ + return _dispatch_get_main_queue_handle_4CF(); +} +#endif + +void +_dispatch_main_queue_callback_4CF( + void *ignored DISPATCH_UNUSED) +{ + // the main queue cannot be suspended and no-one looks at this bit + // so abuse it to avoid dirtying more memory + + if (_dispatch_main_q.dq_side_suspend_cnt) { + return; + } + _dispatch_main_q.dq_side_suspend_cnt = true; + _dispatch_main_queue_drain(&_dispatch_main_q); + _dispatch_main_q.dq_side_suspend_cnt = false; +} + +#endif // DISPATCH_COCOA_COMPAT || defined(_WIN32) + +DISPATCH_NOINLINE +void +_dispatch_main_queue_push(dispatch_queue_main_t dq, dispatch_object_t dou, + dispatch_qos_t qos) +{ + // Same as _dispatch_lane_push() but without the refcounting due to being + // a global object + if (_dispatch_queue_push_item(dq, dou)) { + return dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); + } + + qos = _dispatch_queue_push_qos(dq, qos); + if (_dispatch_queue_need_override(dq, qos)) { + return dx_wakeup(dq, qos, 0); + } +} + +void +_dispatch_main_queue_wakeup(dispatch_queue_main_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ +#if DISPATCH_COCOA_COMPAT + if (_dispatch_queue_is_thread_bound(dq)) { + return _dispatch_runloop_queue_wakeup(dq->_as_dl, qos, flags); + } +#endif + return _dispatch_lane_wakeup(dq, qos, flags); +} + +#if !defined(_WIN32) +DISPATCH_NOINLINE DISPATCH_NORETURN +static void +_dispatch_sigsuspend(void) +{ + static const sigset_t mask; + pthread_sigmask(SIG_SETMASK, &mask, NULL); + for (;;) { + sigsuspend(&mask); + } +} +#endif // !defined(_WIN32) + +DISPATCH_NORETURN +static void +_dispatch_sig_thread(void *ctxt DISPATCH_UNUSED) +{ + // never returns, so burn bridges behind us + _dispatch_clear_stack(0); +#if defined(_WIN32) + for (;;) SuspendThread(GetCurrentThread()); +#else + _dispatch_sigsuspend(); +#endif +} + +void +dispatch_main(void) +{ + _dispatch_root_queues_init(); +#if HAVE_PTHREAD_MAIN_NP + if (pthread_main_np()) { +#endif + _dispatch_object_debug(&_dispatch_main_q, "%s", __func__); + _dispatch_program_is_probably_callback_driven = true; + _dispatch_ktrace0(ARIADNE_ENTER_DISPATCH_MAIN_CODE); +#ifdef __linux__ + // On Linux, if the main thread calls pthread_exit, the process becomes a zombie. + // To avoid that, just before calling pthread_exit we register a TSD destructor + // that will call _dispatch_sig_thread -- thus capturing the main thread in sigsuspend. + // This relies on an implementation detail (currently true in glibc) that TSD destructors + // will be called in the order of creation to cause all the TSD cleanup functions to + // run before the thread becomes trapped in sigsuspend. + pthread_key_t dispatch_main_key; + pthread_key_create(&dispatch_main_key, _dispatch_sig_thread); + pthread_setspecific(dispatch_main_key, &dispatch_main_key); + _dispatch_sigmask(); +#endif +#if !defined(_WIN32) + pthread_exit(NULL); +#else + _endthreadex(0); +#endif // defined(_WIN32) + DISPATCH_INTERNAL_CRASH(errno, "pthread_exit() returned"); +#if HAVE_PTHREAD_MAIN_NP + } + DISPATCH_CLIENT_CRASH(0, "dispatch_main() must be called on the main thread"); +#endif +} + +DISPATCH_NOINLINE +static void +_dispatch_queue_cleanup2(void) +{ + dispatch_queue_main_t dq = &_dispatch_main_q; + uint64_t old_state, new_state; + + // Turning the main queue from a runloop queue into an ordinary serial queue + // is a 3 steps operation: + // 1. finish taking the main queue lock the usual way + // 2. clear the THREAD_BOUND flag + // 3. do a handoff + // + // If an enqueuer executes concurrently, he may do the wakeup the runloop + // way, because he still believes the queue to be thread-bound, but the + // dirty bit will force this codepath to notice the enqueue, and the usual + // lock transfer will do the proper wakeup. + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + new_state = old_state & ~DISPATCH_QUEUE_DIRTY; + new_state += DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state += DISPATCH_QUEUE_IN_BARRIER; + }); + _dispatch_queue_atomic_flags_clear(dq, DQF_THREAD_BOUND); + _dispatch_lane_barrier_complete(dq, 0, 0); + + // overload the "probably" variable to mean that dispatch_main() or + // similar non-POSIX API was called + // this has to run before the DISPATCH_COCOA_COMPAT below + // See dispatch_main for call to _dispatch_sig_thread on linux. +#ifndef __linux__ + if (_dispatch_program_is_probably_callback_driven) { + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + pthread_t tid; + int r = pthread_create(&tid, &attr, (void*)_dispatch_sig_thread, NULL); + if (unlikely(r)) { + DISPATCH_CLIENT_CRASH(r, "Unable to create signal thread"); + } + pthread_attr_destroy(&attr); + // this used to be here as a workaround for 6778970 + // but removing it had bincompat fallouts :'( + sleep(1); + } +#endif + +#if DISPATCH_COCOA_COMPAT + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); + _dispatch_runloop_queue_handle_dispose(dq->_as_dl); +#endif +} + +static void DISPATCH_TSD_DTOR_CC +_dispatch_queue_cleanup(void *ctxt) +{ + if (ctxt == &_dispatch_main_q) { + return _dispatch_queue_cleanup2(); + } + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_INTERNAL_CRASH(ctxt, + "Premature thread exit while a dispatch queue is running"); +} + +static void DISPATCH_TSD_DTOR_CC +_dispatch_wlh_cleanup(void *ctxt) +{ + // POSIX defines that destructors are only called if 'ctxt' is non-null + dispatch_queue_t wlh; + wlh = (dispatch_queue_t)((uintptr_t)ctxt & ~DISPATCH_WLH_STORAGE_REF); + _dispatch_queue_release_storage(wlh); +} + +DISPATCH_NORETURN +static void DISPATCH_TSD_DTOR_CC +_dispatch_deferred_items_cleanup(void *ctxt) +{ + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_INTERNAL_CRASH(ctxt, + "Premature thread exit with unhandled deferred items"); +} + +DISPATCH_NORETURN +static DISPATCH_TSD_DTOR_CC void +_dispatch_frame_cleanup(void *ctxt) +{ + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_INTERNAL_CRASH(ctxt, + "Premature thread exit while a dispatch frame is active"); +} + +DISPATCH_NORETURN +static void DISPATCH_TSD_DTOR_CC +_dispatch_context_cleanup(void *ctxt) +{ + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_INTERNAL_CRASH(ctxt, + "Premature thread exit while a dispatch context is set"); +} +#pragma mark - +#pragma mark dispatch_init + +static void +_dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) +{ + _dispatch_fork_becomes_unsafe(); +#if DISPATCH_USE_INTERNAL_WORKQUEUE + size_t i; + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + _dispatch_root_queue_init_pthread_pool(&_dispatch_root_queues[i], 0, + _dispatch_root_queues[i].dq_priority); + } +#else + int wq_supported = _pthread_workqueue_supported(); + int r = ENOTSUP; + + if (!(wq_supported & WORKQ_FEATURE_MAINTENANCE)) { + DISPATCH_INTERNAL_CRASH(wq_supported, + "QoS Maintenance support required"); + } + +#if DISPATCH_USE_KEVENT_SETUP + struct pthread_workqueue_config cfg = { + .version = PTHREAD_WORKQUEUE_CONFIG_VERSION, + .flags = 0, + .workq_cb = 0, + .kevent_cb = 0, + .workloop_cb = 0, + .queue_serialno_offs = dispatch_queue_offsets.dqo_serialnum, +#if PTHREAD_WORKQUEUE_CONFIG_VERSION >= 2 + .queue_label_offs = dispatch_queue_offsets.dqo_label, +#endif + }; +#endif + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunreachable-code" + if (unlikely(!_dispatch_kevent_workqueue_enabled)) { +#if DISPATCH_USE_KEVENT_SETUP + cfg.workq_cb = _dispatch_worker_thread2; + r = pthread_workqueue_setup(&cfg, sizeof(cfg)); +#else + r = _pthread_workqueue_init(_dispatch_worker_thread2, + offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif // DISPATCH_USE_KEVENT_SETUP +#if DISPATCH_USE_KEVENT_WORKLOOP + } else if (wq_supported & WORKQ_FEATURE_WORKLOOP) { +#if DISPATCH_USE_KEVENT_SETUP + cfg.workq_cb = _dispatch_worker_thread2; + cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; + cfg.workloop_cb = (pthread_workqueue_function_workloop_t) _dispatch_workloop_worker_thread; + r = pthread_workqueue_setup(&cfg, sizeof(cfg)); +#else + r = _pthread_workqueue_init_with_workloop(_dispatch_worker_thread2, + (pthread_workqueue_function_kevent_t) + _dispatch_kevent_worker_thread, + (pthread_workqueue_function_workloop_t) + _dispatch_workloop_worker_thread, + offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif // DISPATCH_USE_KEVENT_SETUP +#endif // DISPATCH_USE_KEVENT_WORKLOOP +#if DISPATCH_USE_KEVENT_WORKQUEUE + } else if (wq_supported & WORKQ_FEATURE_KEVENT) { +#if DISPATCH_USE_KEVENT_SETUP + cfg.workq_cb = _dispatch_worker_thread2; + cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; + r = pthread_workqueue_setup(&cfg, sizeof(cfg)); +#else + r = _pthread_workqueue_init_with_kevent(_dispatch_worker_thread2, + (pthread_workqueue_function_kevent_t) + _dispatch_kevent_worker_thread, + offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif // DISPATCH_USE_KEVENT_SETUP +#endif + } else { + DISPATCH_INTERNAL_CRASH(wq_supported, "Missing Kevent WORKQ support"); + } +#pragma clang diagnostic pop + + if (r != 0) { + DISPATCH_INTERNAL_CRASH((r << 16) | wq_supported, + "Root queue initialization failed"); + } +#endif // DISPATCH_USE_INTERNAL_WORKQUEUE +} + +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_root_queues_pred); +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_root_queues_init(void) +{ + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); +} + +DISPATCH_EXPORT DISPATCH_NOTHROW +void +libdispatch_init(void) +{ + dispatch_assert(sizeof(struct dispatch_apply_s) <= + DISPATCH_CONTINUATION_SIZE); + + if (_dispatch_getenv_bool("LIBDISPATCH_STRICT", false)) { + _dispatch_mode |= DISPATCH_MODE_STRICT; + } +#if HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR + if (_dispatch_getenv_bool("LIBDISPATCH_NO_FAULTS", false)) { + _dispatch_mode |= DISPATCH_MODE_NO_FAULTS; + } else if (getpid() == 1 || + !os_variant_has_internal_diagnostics("com.apple.libdispatch")) { + _dispatch_mode |= DISPATCH_MODE_NO_FAULTS; + } +#endif // HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR + + +#if DISPATCH_DEBUG || DISPATCH_PROFILE +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (getenv("LIBDISPATCH_DISABLE_KEVENT_WQ")) { + _dispatch_kevent_workqueue_enabled = false; + } +#endif +#endif + +#if HAVE_PTHREAD_WORKQUEUE_QOS + dispatch_qos_t qos = _dispatch_qos_from_qos_class(qos_class_main()); + _dispatch_main_q.dq_priority = _dispatch_priority_make(qos, 0); +#if DISPATCH_DEBUG + if (!getenv("LIBDISPATCH_DISABLE_SET_QOS")) { + _dispatch_set_qos_class_enabled = 1; + } +#endif +#endif + +#if DISPATCH_USE_THREAD_LOCAL_STORAGE + _dispatch_thread_key_create(&__dispatch_tsd_key, _libdispatch_tsd_cleanup); +#else + _dispatch_thread_key_create(&dispatch_priority_key, NULL); + _dispatch_thread_key_create(&dispatch_r2k_key, NULL); + _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); + _dispatch_thread_key_create(&dispatch_frame_key, _dispatch_frame_cleanup); + _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup); + _dispatch_thread_key_create(&dispatch_context_key, _dispatch_context_cleanup); + _dispatch_thread_key_create(&dispatch_pthread_root_queue_observer_hooks_key, + NULL); + _dispatch_thread_key_create(&dispatch_basepri_key, NULL); +#if DISPATCH_INTROSPECTION + _dispatch_thread_key_create(&dispatch_introspection_key , NULL); +#elif DISPATCH_PERF_MON + _dispatch_thread_key_create(&dispatch_bcounter_key, NULL); +#endif + _dispatch_thread_key_create(&dispatch_wlh_key, _dispatch_wlh_cleanup); + _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); + _dispatch_thread_key_create(&dispatch_deferred_items_key, + _dispatch_deferred_items_cleanup); +#endif + +#if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 + _dispatch_main_q.do_targetq = _dispatch_get_default_queue(true); +#endif + + _dispatch_queue_set_current(&_dispatch_main_q); + _dispatch_queue_set_bound_thread(&_dispatch_main_q); + +#if DISPATCH_USE_PTHREAD_ATFORK + (void)dispatch_assume_zero(pthread_atfork(dispatch_atfork_prepare, + dispatch_atfork_parent, dispatch_atfork_child)); +#endif + _dispatch_hw_config_init(); + _dispatch_time_init(); + _dispatch_vtable_init(); + _os_object_init(); + _voucher_init(); + _dispatch_introspection_init(); +} + +#if DISPATCH_USE_THREAD_LOCAL_STORAGE +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) +#include +#endif +#if !defined(_WIN32) +#include +#endif + +#ifndef __ANDROID__ +#ifdef SYS_gettid +DISPATCH_ALWAYS_INLINE +static inline pid_t +gettid(void) +{ + return (pid_t)syscall(SYS_gettid); +} +#elif defined(__FreeBSD__) +DISPATCH_ALWAYS_INLINE +static inline pid_t +gettid(void) +{ + return (pid_t)pthread_getthreadid_np(); +} +#elif defined(_WIN32) +DISPATCH_ALWAYS_INLINE +static inline DWORD +gettid(void) +{ + return GetCurrentThreadId(); +} +#else +#error "SYS_gettid unavailable on this system" +#endif /* SYS_gettid */ +#endif /* ! __ANDROID__ */ + +#define _tsd_call_cleanup(k, f) do { \ + if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \ + } while (0) + +#ifdef __ANDROID__ +static void (*_dispatch_thread_detach_callback)(void); + +void +_dispatch_install_thread_detach_callback(void (*cb)(void)) +{ + if (os_atomic_xchg(&_dispatch_thread_detach_callback, cb, relaxed)) { + DISPATCH_CLIENT_CRASH(0, "Installing a thread detach callback twice"); + } +} +#endif + +#if defined(_WIN32) +static bool +_dispatch_process_is_exiting(void) +{ + // The goal here is to detect if the current thread is executing cleanup + // code (e.g. FLS destructors) as a result of calling ExitProcess(). Windows + // doesn't provide an official method of getting this information, so we + // take advantage of how ExitProcess() works internally. The first thing + // that it does (according to MSDN) is terminate every other thread in the + // process. Logically, it should not be possible to create more threads + // after this point, and Windows indeed enforces this. Try to create a + // lightweight suspended thread, and if access is denied, assume that this + // is because the process is exiting. + // + // We aren't worried about any race conditions here during process exit. + // Cleanup code is only run on the thread that already called ExitProcess(), + // and every other thread will have been forcibly terminated by the time + // that happens. Additionally, while CreateThread() could conceivably fail + // due to resource exhaustion, the process would already be in a bad state + // if that happens. This is only intended to prevent unwanted cleanup code + // from running, so the worst case is that a thread doesn't clean up after + // itself when the process is about to die anyway. + const size_t stack_size = 1; // As small as possible + HANDLE thread = CreateThread(NULL, stack_size, NULL, NULL, + CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, NULL); + if (thread) { + // Although Microsoft recommends against using TerminateThread, it's + // safe to use it here because we know that the thread is suspended and + // it has not executed any code due to a NULL lpStartAddress. There was + // a bug in Windows Server 2003 and Windows XP where the initial stack + // would not be freed, but libdispatch does not support them anyway. + TerminateThread(thread, 0); + CloseHandle(thread); + return false; + } + return GetLastError() == ERROR_ACCESS_DENIED; +} +#endif // defined(_WIN32) + + +void DISPATCH_TSD_DTOR_CC +_libdispatch_tsd_cleanup(void *ctx) +{ +#if defined(_WIN32) + // On Windows, exiting a process will still call FLS destructors for the + // thread that called ExitProcess(). pthreads-based platforms don't call key + // destructors on exit, so be consistent. + if (_dispatch_process_is_exiting()) { + return; + } +#endif // defined(_WIN32) + + struct dispatch_tsd *tsd = (struct dispatch_tsd*) ctx; + + _tsd_call_cleanup(dispatch_priority_key, NULL); + _tsd_call_cleanup(dispatch_r2k_key, NULL); + + _tsd_call_cleanup(dispatch_queue_key, _dispatch_queue_cleanup); + _tsd_call_cleanup(dispatch_frame_key, _dispatch_frame_cleanup); + _tsd_call_cleanup(dispatch_cache_key, _dispatch_cache_cleanup); + _tsd_call_cleanup(dispatch_context_key, _dispatch_context_cleanup); + _tsd_call_cleanup(dispatch_pthread_root_queue_observer_hooks_key, + NULL); + _tsd_call_cleanup(dispatch_basepri_key, NULL); +#if DISPATCH_INTROSPECTION + _tsd_call_cleanup(dispatch_introspection_key, NULL); +#elif DISPATCH_PERF_MON + _tsd_call_cleanup(dispatch_bcounter_key, NULL); +#endif + _tsd_call_cleanup(dispatch_wlh_key, _dispatch_wlh_cleanup); + _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup); + _tsd_call_cleanup(dispatch_deferred_items_key, + _dispatch_deferred_items_cleanup); +#ifdef __ANDROID__ + if (_dispatch_thread_detach_callback) { + _dispatch_thread_detach_callback(); + } +#endif + tsd->tid = 0; +} + +DISPATCH_NOINLINE +void +libdispatch_tsd_init(void) +{ +#if !defined(_WIN32) + pthread_setspecific(__dispatch_tsd_key, &__dispatch_tsd); +#else + FlsSetValue(__dispatch_tsd_key, &__dispatch_tsd); +#endif // defined(_WIN32) + __dispatch_tsd.tid = gettid(); +} +#endif + +DISPATCH_NOTHROW +void +_dispatch_queue_atfork_child(void) +{ + dispatch_queue_main_t main_q = &_dispatch_main_q; + void *crash = (void *)0x100; + size_t i; + + if (_dispatch_queue_is_thread_bound(main_q)) { + _dispatch_queue_set_bound_thread(main_q); + } + + if (!_dispatch_is_multithreaded_inline()) return; + + main_q->dq_items_head = crash; + main_q->dq_items_tail = crash; + + _dispatch_mgr_q.dq_items_head = crash; + _dispatch_mgr_q.dq_items_tail = crash; + + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + _dispatch_root_queues[i].dq_items_head = crash; + _dispatch_root_queues[i].dq_items_tail = crash; + } +} + +DISPATCH_NOINLINE +void +_dispatch_fork_becomes_unsafe_slow(void) +{ + uint8_t value = (uint8_t)os_atomic_or(&_dispatch_unsafe_fork, + _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed); + if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) { + DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); + } +} + +DISPATCH_NOINLINE +void +_dispatch_prohibit_transition_to_multithreaded(bool prohibit) +{ + if (prohibit) { + uint8_t value = (uint8_t)os_atomic_or(&_dispatch_unsafe_fork, + _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); + if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) { + DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded"); + } + } else { + os_atomic_and(&_dispatch_unsafe_fork, + (uint8_t)~_DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); + } +} From 6eef9ce65f4393e0800f6ceb1134b26989f16335 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 1 Feb 2019 11:00:43 -0800 Subject: [PATCH 069/249] Merge pull request #436 from compnerd/dispatch-queues Improve dispatch queue support for Windows Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.Md0Qny => unifdef.1gCYBC} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.Md0Qny => unifdef.1gCYBC} (100%) diff --git a/PATCHES b/PATCHES index 2b4e4ae4f..bc1574b4e 100644 --- a/PATCHES +++ b/PATCHES @@ -464,3 +464,4 @@ github commits starting with 29bdc2f from [e3ae79b] APPLIED rdar://54572081 [fb368f6] APPLIED rdar://54572081 [afa6cc3] APPLIED rdar://54572081 +[e6df818] APPLIED rdar://54572081 diff --git a/src/unifdef.Md0Qny b/src/unifdef.1gCYBC similarity index 100% rename from src/unifdef.Md0Qny rename to src/unifdef.1gCYBC From 9eb7a414a72bbc745d0125462643521ede3ecf65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Helge=20He=C3=9F?= Date: Mon, 14 Jan 2019 15:34:08 +0100 Subject: [PATCH 070/249] Pass along source context in `dispatchPrecondition` `dispatchPrecondition` is great, but it lacks file/line parameters ... Signed-off-by: Kim Topley --- src/swift/Dispatch.swift | 4 ++-- src/{unifdef.1gCYBC => unifdef.6iaen7} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename src/{unifdef.1gCYBC => unifdef.6iaen7} (100%) diff --git a/src/swift/Dispatch.swift b/src/swift/Dispatch.swift index 0fd138d6a..2ba819223 100644 --- a/src/swift/Dispatch.swift +++ b/src/swift/Dispatch.swift @@ -38,10 +38,10 @@ public func _dispatchPreconditionTest(_ condition: DispatchPredicate) -> Bool { @_transparent @available(macOS 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) -public func dispatchPrecondition(condition: @autoclosure () -> DispatchPredicate) { +public func dispatchPrecondition(condition: @autoclosure () -> DispatchPredicate, file: StaticString = #file, line: UInt = #line) { // precondition is able to determine release-vs-debug asserts where the overlay // cannot, so formulating this into a call that we can call with precondition() - precondition(_dispatchPreconditionTest(condition()), "dispatchPrecondition failure") + precondition(_dispatchPreconditionTest(condition()), "dispatchPrecondition failure", file: file, line: line) } /// qos_class_t diff --git a/src/unifdef.1gCYBC b/src/unifdef.6iaen7 similarity index 100% rename from src/unifdef.1gCYBC rename to src/unifdef.6iaen7 From ff706513373a09441778973e62f2196e441fad37 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 1 Feb 2019 11:02:23 -0800 Subject: [PATCH 071/249] Merge pull request #437 from helje5/feature/dizpatchPrecondition-context Pass along source context in `dispatchPrecondition` Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.6iaen7 => unifdef.h0c0bA} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.6iaen7 => unifdef.h0c0bA} (100%) diff --git a/PATCHES b/PATCHES index bc1574b4e..230f692e7 100644 --- a/PATCHES +++ b/PATCHES @@ -465,3 +465,4 @@ github commits starting with 29bdc2f from [fb368f6] APPLIED rdar://54572081 [afa6cc3] APPLIED rdar://54572081 [e6df818] APPLIED rdar://54572081 +[7144ee3] APPLIED rdar://54572081 diff --git a/src/unifdef.6iaen7 b/src/unifdef.h0c0bA similarity index 100% rename from src/unifdef.6iaen7 rename to src/unifdef.h0c0bA From e7f5361761bfc53eeb6ac298ec3425c0b463e5d5 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 17 Jan 2019 22:02:53 -0800 Subject: [PATCH 072/249] build: remove unnecessary `find_package` Now that we link using the swift driver, we no longer need to explicitly add in the path to the swift registrar. Remove the unncessary `find_package` to make it easier to build libdispatch. Signed-off-by: Kim Topley --- CMakeLists.txt | 2 -- src/{unifdef.h0c0bA => unifdef.CIOS7B} | 0 2 files changed, 2 deletions(-) rename src/{unifdef.h0c0bA => unifdef.CIOS7B} (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 10d4b7de9..5a86eb80f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -55,8 +55,6 @@ if(ENABLE_SWIFT) message(FATAL_ERROR "CMAKE_SWIFT_COMPILER must be defined to enable swift") endif() - find_package(Swift REQUIRED CONFIG) - string(TOLOWER ${CMAKE_SYSTEM_NAME} swift_os) get_swift_host_arch(swift_arch) diff --git a/src/unifdef.h0c0bA b/src/unifdef.CIOS7B similarity index 100% rename from src/unifdef.h0c0bA rename to src/unifdef.CIOS7B From 8f9f8ec677d45ad27dad0dad5737ca83e080c63b Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 1 Feb 2019 11:03:00 -0800 Subject: [PATCH 073/249] Merge pull request #438 from compnerd/swift-less build: remove unnecessary `find_package` Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.CIOS7B => unifdef.whaJl2} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.CIOS7B => unifdef.whaJl2} (100%) diff --git a/PATCHES b/PATCHES index 230f692e7..c204f4d7f 100644 --- a/PATCHES +++ b/PATCHES @@ -466,3 +466,4 @@ github commits starting with 29bdc2f from [afa6cc3] APPLIED rdar://54572081 [e6df818] APPLIED rdar://54572081 [7144ee3] APPLIED rdar://54572081 +[60ffcc2] APPLIED rdar://54572081 diff --git a/src/unifdef.CIOS7B b/src/unifdef.whaJl2 similarity index 100% rename from src/unifdef.CIOS7B rename to src/unifdef.whaJl2 From a9100ccab96d0dfc92189bc53335adc494662093 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 22 Jan 2019 14:31:47 -0800 Subject: [PATCH 074/249] build: add executable suffix for `-fuse-ld=` The `-fuse-ld=` parameter requires the executable suffix for this to work on Windows. This allows us to cross-compile for android on Windows. Signed-off-by: Kim Topley --- cmake/modules/DispatchUtilities.cmake | 8 ++++++-- src/{unifdef.whaJl2 => unifdef.T79kyk} | 0 2 files changed, 6 insertions(+), 2 deletions(-) rename src/{unifdef.whaJl2 => unifdef.T79kyk} (100%) diff --git a/cmake/modules/DispatchUtilities.cmake b/cmake/modules/DispatchUtilities.cmake index 15d8cd771..fea3622ec 100644 --- a/cmake/modules/DispatchUtilities.cmake +++ b/cmake/modules/DispatchUtilities.cmake @@ -1,15 +1,19 @@ function(dispatch_set_linker target) + if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) + set(CMAKE_HOST_EXECUTABLE_SUFFIX .exe) + endif() + if(USE_GOLD_LINKER) set_property(TARGET ${target} APPEND_STRING PROPERTY LINK_FLAGS - -fuse-ld=gold) + -fuse-ld=gold${CMAKE_HOST_EXECUTABLE_SUFFIX}) endif() if(USE_LLD_LINKER) set_property(TARGET ${target} APPEND_STRING PROPERTY LINK_FLAGS - -fuse-ld=lld) + -fuse-ld=lld${CMAKE_HOST_EXECUTABLE_SUFFIX}) endif() endfunction() diff --git a/src/unifdef.whaJl2 b/src/unifdef.T79kyk similarity index 100% rename from src/unifdef.whaJl2 rename to src/unifdef.T79kyk From 3d8bc11a548c77e991f5a1387e39ca5896747039 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 1 Feb 2019 11:04:15 -0800 Subject: [PATCH 075/249] Merge pull request #439 from compnerd/android-wins build: add executable suffix for `-fuse-ld=` Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.T79kyk => unifdef.7bOs0t} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.T79kyk => unifdef.7bOs0t} (100%) diff --git a/PATCHES b/PATCHES index c204f4d7f..f7310fad1 100644 --- a/PATCHES +++ b/PATCHES @@ -467,3 +467,4 @@ github commits starting with 29bdc2f from [e6df818] APPLIED rdar://54572081 [7144ee3] APPLIED rdar://54572081 [60ffcc2] APPLIED rdar://54572081 +[618b070] APPLIED rdar://54572081 diff --git a/src/unifdef.T79kyk b/src/unifdef.7bOs0t similarity index 100% rename from src/unifdef.T79kyk rename to src/unifdef.7bOs0t From bdcfeb05d5cd103bfddf89908f044e3b0141e64d Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 1 Feb 2019 13:56:43 -0800 Subject: [PATCH 076/249] Merge pull request #440 from adierking/windowstests tests: port the test harness to Windows Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.7bOs0t => unifdef.j8jZfa} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.7bOs0t => unifdef.j8jZfa} (100%) diff --git a/PATCHES b/PATCHES index f7310fad1..22384c5cd 100644 --- a/PATCHES +++ b/PATCHES @@ -468,3 +468,4 @@ github commits starting with 29bdc2f from [7144ee3] APPLIED rdar://54572081 [60ffcc2] APPLIED rdar://54572081 [618b070] APPLIED rdar://54572081 +[dde5892] APPLIED rdar://54572081 diff --git a/src/unifdef.7bOs0t b/src/unifdef.j8jZfa similarity index 100% rename from src/unifdef.7bOs0t rename to src/unifdef.j8jZfa From 1d1e2be6667757c4fc2dc111fe17fe2becb0524f Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Sat, 2 Feb 2019 15:24:13 -0800 Subject: [PATCH 077/249] dispatch: fix logging to a file on Windows Port the dispatch_debug test to Windows and fix a bug that it found related to logfile initialization. _dispatch_logv_init()'s Windows implementation uses _fdopen() and fprintf() to write the log header. However, this gives ownership of the file to stdio, so fclose() closes the descriptor and causes future writes to trigger CRT assertion failures. Avoid this by using snprintf() and _write() instead. Signed-off-by: Kim Topley --- src/init.c | 16 ++++++++++------ src/{unifdef.j8jZfa => unifdef.Bs53V8} | 0 2 files changed, 10 insertions(+), 6 deletions(-) rename src/{unifdef.j8jZfa => unifdef.Bs53V8} (100%) diff --git a/src/init.c b/src/init.c index 10e24dd38..8a9598f7d 100644 --- a/src/init.c +++ b/src/init.c @@ -1111,15 +1111,19 @@ _dispatch_logv_init(void *context DISPATCH_UNUSED) dispatch_log_basetime = _dispatch_uptime(); #endif #if defined(_WIN32) - FILE *pLogFile = _fdopen(dispatch_logfile, "w"); - char szProgramName[MAX_PATH + 1] = {0}; GetModuleFileNameA(NULL, szProgramName, MAX_PATH); - fprintf(pLogFile, "=== log file opened for %s[%lu] at " - "%ld.%06u ===\n", szProgramName, GetCurrentProcessId(), - tv.tv_sec, (int)tv.tv_usec); - fclose(pLogFile); + char szMessage[512]; + int len = snprintf(szMessage, sizeof(szMessage), + "=== log file opened for %s[%lu] at %ld.%06u ===", + szProgramName, GetCurrentProcessId(), tv.tv_sec, + (int)tv.tv_usec); + if (len > 0) { + len = MIN(len, sizeof(szMessage) - 1); + _write(dispatch_logfile, szMessage, len); + _write(dispatch_logfile, "\n", 1); + } #else dprintf(dispatch_logfile, "=== log file opened for %s[%u] at " "%ld.%06u ===\n", getprogname() ?: "", getpid(), diff --git a/src/unifdef.j8jZfa b/src/unifdef.Bs53V8 similarity index 100% rename from src/unifdef.j8jZfa rename to src/unifdef.Bs53V8 From 3975291be6158528b356df0c36a38d148972cba9 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 5 Feb 2019 15:33:56 -0800 Subject: [PATCH 078/249] Merge pull request #441 from adierking/winlog dispatch: fix logging to a file on Windows Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.Bs53V8 => unifdef.a1Z4l5} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.Bs53V8 => unifdef.a1Z4l5} (100%) diff --git a/PATCHES b/PATCHES index 22384c5cd..5a700bafb 100644 --- a/PATCHES +++ b/PATCHES @@ -469,3 +469,4 @@ github commits starting with 29bdc2f from [60ffcc2] APPLIED rdar://54572081 [618b070] APPLIED rdar://54572081 [dde5892] APPLIED rdar://54572081 +[81c9bf6] APPLIED rdar://54572081 diff --git a/src/unifdef.Bs53V8 b/src/unifdef.a1Z4l5 similarity index 100% rename from src/unifdef.Bs53V8 rename to src/unifdef.a1Z4l5 From 459cbef11fd6a5ff2469f4bf005af3ab3e95e728 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Grzegorz=20Mi=C5=82o=C5=9B?= Date: Sat, 2 Feb 2019 15:35:03 -0800 Subject: [PATCH 079/249] Retry sem_wait on signal interrupts Signed-off-by: Kim Topley --- src/shims/lock.c | 5 ++++- src/{unifdef.a1Z4l5 => unifdef.77AuvW} | 0 2 files changed, 4 insertions(+), 1 deletion(-) rename src/{unifdef.a1Z4l5 => unifdef.77AuvW} (100%) diff --git a/src/shims/lock.c b/src/shims/lock.c index f0e493796..8892efed6 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -193,7 +193,10 @@ _dispatch_sema4_signal(_dispatch_sema4_t *sema, long count) void _dispatch_sema4_wait(_dispatch_sema4_t *sema) { - int ret = sem_wait(sema); + int ret = 0; + do { + ret = sem_wait(sema); + } while (ret == -1 && errno == EINTR); DISPATCH_SEMAPHORE_VERIFY_RET(ret); } diff --git a/src/unifdef.a1Z4l5 b/src/unifdef.77AuvW similarity index 100% rename from src/unifdef.a1Z4l5 rename to src/unifdef.77AuvW From 17659f9984df580e706e4235de8baeb364e59f99 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 5 Feb 2019 15:34:33 -0800 Subject: [PATCH 080/249] Merge pull request #442 from gmilos/SR-9863-sem-wait-signal-interrupt-fix Retry sem_wait on signal interrupts Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.77AuvW => unifdef.dWuIeA} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.77AuvW => unifdef.dWuIeA} (100%) diff --git a/PATCHES b/PATCHES index 5a700bafb..342f705c8 100644 --- a/PATCHES +++ b/PATCHES @@ -470,3 +470,4 @@ github commits starting with 29bdc2f from [618b070] APPLIED rdar://54572081 [dde5892] APPLIED rdar://54572081 [81c9bf6] APPLIED rdar://54572081 +[4b85ca6] APPLIED rdar://54572081 diff --git a/src/unifdef.77AuvW b/src/unifdef.dWuIeA similarity index 100% rename from src/unifdef.77AuvW rename to src/unifdef.dWuIeA From 3c888b73602adc17cecda4f27edd33eb0cc9e54f Mon Sep 17 00:00:00 2001 From: Kim Topley Date: Fri, 8 Feb 2019 13:02:31 -0800 Subject: [PATCH 081/249] Use numericCast() to get the correct type when creating a dispatch_block_flags_t. rdar://problem/44957560 Signed-off-by: Kim Topley --- src/swift/Block.swift | 12 ++---------- src/{unifdef.dWuIeA => unifdef.PMKCEw} | 0 2 files changed, 2 insertions(+), 10 deletions(-) rename src/{unifdef.dWuIeA => unifdef.PMKCEw} (100%) diff --git a/src/swift/Block.swift b/src/swift/Block.swift index f1c2f08df..1849907d6 100644 --- a/src/swift/Block.swift +++ b/src/swift/Block.swift @@ -40,11 +40,7 @@ public class DispatchWorkItem { internal var _block: _DispatchBlock public init(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @escaping @convention(block) () -> ()) { -#if os(Windows) && arch(x86_64) - let flags = dispatch_block_flags_t(UInt32(flags.rawValue)) -#else - let flags = dispatch_block_flags_t(UInt(flags.rawValue)) -#endif + let flags: dispatch_block_flags_t = numericCast(flags.rawValue) _block = dispatch_block_create_with_qos_class(flags, qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority), block) } @@ -52,11 +48,7 @@ public class DispatchWorkItem { // Used by DispatchQueue.synchronously to provide a path through // dispatch_block_t, as we know the lifetime of the block in question. internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: () -> ()) { -#if os(Windows) && arch(x86_64) - let flags = dispatch_block_flags_t(UInt32(flags.rawValue)) -#else - let flags = dispatch_block_flags_t(UInt(flags.rawValue)) -#endif + let flags: dispatch_block_flags_t = numericCast(flags.rawValue) _block = _swift_dispatch_block_create_noescape(flags, noescapeBlock) } diff --git a/src/unifdef.dWuIeA b/src/unifdef.PMKCEw similarity index 100% rename from src/unifdef.dWuIeA rename to src/unifdef.PMKCEw From 9ebfacb34880f636f88c3a236fac7e01d3a45d85 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 11 Feb 2019 07:03:30 -0800 Subject: [PATCH 082/249] Merge pull request #444 from ktopley-apple/ktopley-numericCast-for-dispatch-block-flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use numericCast() to get the correct type when creating a dispatch_bl… Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.PMKCEw => unifdef.qYjbRu} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.PMKCEw => unifdef.qYjbRu} (100%) diff --git a/PATCHES b/PATCHES index 342f705c8..c7979babc 100644 --- a/PATCHES +++ b/PATCHES @@ -471,3 +471,4 @@ github commits starting with 29bdc2f from [dde5892] APPLIED rdar://54572081 [81c9bf6] APPLIED rdar://54572081 [4b85ca6] APPLIED rdar://54572081 +[ff3bf51] APPLIED rdar://54572081 diff --git a/src/unifdef.PMKCEw b/src/unifdef.qYjbRu similarity index 100% rename from src/unifdef.PMKCEw rename to src/unifdef.qYjbRu From dbb013fd6cfdcf3eac59b2a37c040f3b1589438c Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Wed, 13 Feb 2019 08:15:24 -0800 Subject: [PATCH 083/249] Revert "Use numericCast() to get the correct type when creating a dispatch_block_flags_t." This (partially) reverts commit b048490a717e08b27b835750f0f6f631f0e8af0d. Unfortunately, the numeric cast'ed value cannot be used to construct the dispatch_flags_t type. Trying to change to an explicit construction also fails. This repairs the Windows build for the time being. Use the `numericCast` for other targets. Signed-off-by: Kim Topley --- src/swift/Block.swift | 8 ++++++++ src/{unifdef.qYjbRu => unifdef.b11S2D} | 0 2 files changed, 8 insertions(+) rename src/{unifdef.qYjbRu => unifdef.b11S2D} (100%) diff --git a/src/swift/Block.swift b/src/swift/Block.swift index 1849907d6..71d998ba6 100644 --- a/src/swift/Block.swift +++ b/src/swift/Block.swift @@ -40,7 +40,11 @@ public class DispatchWorkItem { internal var _block: _DispatchBlock public init(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @escaping @convention(block) () -> ()) { +#if os(Windows) && (arch(arm64) || arch(x86_64)) + let flags = dispatch_block_flags_t(UInt32(flags.rawValue)) +#else let flags: dispatch_block_flags_t = numericCast(flags.rawValue) +#endif _block = dispatch_block_create_with_qos_class(flags, qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority), block) } @@ -48,7 +52,11 @@ public class DispatchWorkItem { // Used by DispatchQueue.synchronously to provide a path through // dispatch_block_t, as we know the lifetime of the block in question. internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: () -> ()) { +#if os(Windows) && (arch(arm64) || arch(x86_64)) + let flags = dispatch_block_flags_t(UInt32(flags.rawValue)) +#else let flags: dispatch_block_flags_t = numericCast(flags.rawValue) +#endif _block = _swift_dispatch_block_create_noescape(flags, noescapeBlock) } diff --git a/src/unifdef.qYjbRu b/src/unifdef.b11S2D similarity index 100% rename from src/unifdef.qYjbRu rename to src/unifdef.b11S2D From 01aa2f38bd749ef627e59d702aa58ea4541a4c83 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 13 Feb 2019 10:35:20 -0800 Subject: [PATCH 084/249] Merge pull request #446 from compnerd/sometimes-code-is-ugly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Revert "Use numericCast() to get the correct type when creating a dis… Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.b11S2D => unifdef.QpRS5Q} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.b11S2D => unifdef.QpRS5Q} (100%) diff --git a/PATCHES b/PATCHES index c7979babc..5ba5c91a3 100644 --- a/PATCHES +++ b/PATCHES @@ -472,3 +472,4 @@ github commits starting with 29bdc2f from [81c9bf6] APPLIED rdar://54572081 [4b85ca6] APPLIED rdar://54572081 [ff3bf51] APPLIED rdar://54572081 +[bc00e13] APPLIED rdar://54572081 diff --git a/src/unifdef.b11S2D b/src/unifdef.QpRS5Q similarity index 100% rename from src/unifdef.b11S2D rename to src/unifdef.QpRS5Q From 9c1b616581bbfb81c86d0cdba0fbd9aeca9dc7ed Mon Sep 17 00:00:00 2001 From: Kim Topley Date: Fri, 22 Feb 2019 09:02:15 -0800 Subject: [PATCH 085/249] libdispatch-1121 merge fixes. Signed-off-by: Kim Topley --- private/private.h | 1 - private/workloop_private.h | 4 +- src/event/event_epoll.c | 25 +++++---- src/event/event_internal.h | 2 +- src/event/workqueue.c | 8 +-- src/init.c | 16 ++++-- src/inline_internal.h | 2 +- src/internal.h | 1 + src/io.c | 4 +- src/queue.c | 13 +++-- src/semaphore.c | 2 +- src/shims/lock.c | 70 +++++++++++++++++--------- src/shims/priority.h | 7 ++- src/shims/yield.h | 10 +++- src/source.c | 4 +- src/{unifdef.QpRS5Q => unifdef.EdlZEO} | 20 ++++++-- src/voucher.c | 2 + 17 files changed, 132 insertions(+), 59 deletions(-) rename src/{unifdef.QpRS5Q => unifdef.EdlZEO} (99%) diff --git a/private/private.h b/private/private.h index 795f06459..9692c2af8 100644 --- a/private/private.h +++ b/private/private.h @@ -227,7 +227,6 @@ API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_runloop_handle_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t queue); -#endif #if TARGET_OS_MAC API_AVAILABLE(macos(10.13.2), ios(11.2), tvos(11.2), watchos(4.2)) diff --git a/private/workloop_private.h b/private/workloop_private.h index c06b498db..89e857a57 100644 --- a/private/workloop_private.h +++ b/private/workloop_private.h @@ -73,7 +73,7 @@ API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_workloop_set_qos_class_floor(dispatch_workloop_t workloop, - qos_class_t qos, int relpri, dispatch_workloop_param_flags_t flags); + dispatch_qos_class_t qos, int relpri, dispatch_workloop_param_flags_t flags); /*! * @function dispatch_workloop_set_scheduler_priority @@ -172,7 +172,7 @@ API_DEPRECATED_WITH_REPLACEMENT("dispatch_workloop_set_qos_class_floor", DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_workloop_set_qos_class(dispatch_workloop_t workloop, - qos_class_t qos, dispatch_workloop_param_flags_t flags); + dispatch_qos_class_t qos, dispatch_workloop_param_flags_t flags); API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_NOTHROW diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index a5c71c710..7c746c0f2 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -260,7 +260,6 @@ _dispatch_unote_register_muxed(dispatch_unote_t du) uint32_t events; events = _dispatch_unote_required_events(du); - du._du->du_priority = pri; dmb = _dispatch_unote_muxnote_bucket(du); dmn = _dispatch_unote_muxnote_find(dmb, du); @@ -429,8 +428,9 @@ _dispatch_timeout_program(uint32_t tidx, uint64_t target, } void -_dispatch_event_loop_timer_arm(dispatch_timer_heap_t dth, uint32_t tidx, - dispatch_timer_delay_s range, dispatch_clock_now_cache_t nows) +_dispatch_event_loop_timer_arm(dispatch_timer_heap_t dth DISPATCH_UNUSED, + uint32_t tidx, dispatch_timer_delay_s range, + dispatch_clock_now_cache_t nows) { dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx); uint64_t target = range.delay + _dispatch_time_now_cached(clock, nows); @@ -438,7 +438,8 @@ _dispatch_event_loop_timer_arm(dispatch_timer_heap_t dth, uint32_t tidx, } void -_dispatch_event_loop_timer_delete(dispatch_timer_heap_t dth, uint32_t tidx) +_dispatch_event_loop_timer_delete(dispatch_timer_heap_t dth DISPATCH_UNUSED, + uint32_t tidx) { _dispatch_timeout_program(tidx, UINT64_MAX, UINT64_MAX); } @@ -510,7 +511,7 @@ _dispatch_event_merge_signal(dispatch_muxnote_t dmn) // consumed by dux_merge_evt() _dispatch_retain_unote_owner(du); dispatch_assert(!dux_needs_rearm(du._du)); - os_atomic_store2o(du._dr, ds_pending_data, 1, relaxed) + os_atomic_store2o(du._dr, ds_pending_data, 1, relaxed); dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_CLEAR, 1, 0); } } else { @@ -564,8 +565,8 @@ _dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events) _dispatch_retain_unote_owner(du); dispatch_assert(dux_needs_rearm(du._du)); _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); - os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed) - dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, data, 0, 0); + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, data, 0); } } @@ -577,8 +578,8 @@ _dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events) _dispatch_retain_unote_owner(du); dispatch_assert(dux_needs_rearm(du._du)); _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); - os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed) - dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, data, 0, 0); + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, data, 0); } } @@ -628,10 +629,14 @@ _dispatch_event_loop_drain(uint32_t flags) _dispatch_event_merge_timer(DISPATCH_CLOCK_WALL); break; - case DISPATCH_EPOLL_CLOCK_MACH: + case DISPATCH_EPOLL_CLOCK_UPTIME: _dispatch_event_merge_timer(DISPATCH_CLOCK_UPTIME); break; + case DISPATCH_EPOLL_CLOCK_MONOTONIC: + _dispatch_event_merge_timer(DISPATCH_CLOCK_MONOTONIC); + break; + default: dmn = ev[i].data.ptr; switch (dmn->dmn_filter) { diff --git a/src/event/event_internal.h b/src/event/event_internal.h index 8a3ae22b4..d0bdca8fb 100644 --- a/src/event/event_internal.h +++ b/src/event/event_internal.h @@ -382,9 +382,9 @@ extern const dispatch_source_type_s _dispatch_mach_type_send; extern const dispatch_source_type_s _dispatch_mach_type_recv; extern const dispatch_source_type_s _dispatch_mach_type_reply; extern const dispatch_source_type_s _dispatch_xpc_type_sigterm; -extern const dispatch_source_type_s _dispatch_source_type_timer_with_clock; #define DISPATCH_MACH_TYPE_WAITER ((const dispatch_source_type_s *)-2) #endif +extern const dispatch_source_type_s _dispatch_source_type_timer_with_clock; #pragma mark - #pragma mark deferred items diff --git a/src/event/workqueue.c b/src/event/workqueue.c index 326c3d936..dc020f32a 100644 --- a/src/event/workqueue.c +++ b/src/event/workqueue.c @@ -218,7 +218,7 @@ _dispatch_workq_monitor_pools(void *context DISPATCH_UNUSED) int32_t floor = mon->target_runnable - WORKQ_MAX_TRACKED_TIDS; _dispatch_debug("workq: %s has no runnable workers; poking with floor %d", dq->dq_label, floor); - _dispatch_global_queue_poke(dq, 1, floor); + _dispatch_root_queue_poke(dq, 1, floor); global_runnable += 1; // account for poke in global estimate } else if (mon->num_runnable < mon->target_runnable && global_runnable < global_soft_max) { @@ -231,7 +231,7 @@ _dispatch_workq_monitor_pools(void *context DISPATCH_UNUSED) floor = MAX(floor, floor2); _dispatch_debug("workq: %s under utilization target; poking with floor %d", dq->dq_label, floor); - _dispatch_global_queue_poke(dq, 1, floor); + _dispatch_root_queue_poke(dq, 1, floor); global_runnable += 1; // account for poke in global estimate } } @@ -245,7 +245,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED) int i, target_runnable = (int)dispatch_hw_config(active_cpus); foreach_qos_bucket_reverse(i) { dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i]; - mon->dq = _dispatch_get_root_queue(i, false); + mon->dq = _dispatch_get_root_queue(DISPATCH_QOS_FOR_BUCKET(i), false); void *buf = _dispatch_calloc(WORKQ_MAX_TRACKED_TIDS, sizeof(dispatch_tid)); mon->registered_tids = buf; mon->target_runnable = target_runnable; @@ -253,7 +253,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED) // Create monitoring timer that will periodically run on dispatch_mgr_q dispatch_source_t ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, - 0, 0, &_dispatch_mgr_q); + 0, 0, _dispatch_mgr_q._as_dq); dispatch_source_set_timer(ds, dispatch_time(DISPATCH_TIME_NOW, 0), NSEC_PER_SEC, 0); dispatch_source_set_event_handler_f(ds, _dispatch_workq_monitor_pools); diff --git a/src/init.c b/src/init.c index 8a9598f7d..d5db67b61 100644 --- a/src/init.c +++ b/src/init.c @@ -431,6 +431,12 @@ _dispatch_queue_attr_to_info(dispatch_queue_attr_t dqa) if (dqa < _dispatch_queue_attrs || dqa >= &_dispatch_queue_attrs[DISPATCH_QUEUE_ATTR_COUNT]) { +#ifndef __APPLE__ + if (memcmp(dqa, &_dispatch_queue_attrs[0], + sizeof(struct dispatch_queue_attr_s)) == 0) { + dqa = (dispatch_queue_attr_t)&_dispatch_queue_attrs[0]; + } else +#endif // __APPLE__ DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } @@ -974,9 +980,11 @@ _dispatch_bug_kevent_client(const char *msg, const char *filter, dc = du._dr->ds_handler[DS_EVENT_HANDLER]; if (dc) func = _dispatch_continuation_get_function_symbol(dc); break; +#if HAVE_MACH case DISPATCH_MACH_CHANNEL_TYPE: func = du._dmrr->dmrr_handler_func; break; +#endif // HAVE_MACH } filter = dux_type(du._du)->dst_kind; } @@ -984,18 +992,18 @@ _dispatch_bug_kevent_client(const char *msg, const char *filter, if (operation && err) { _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_client", "BUG in libdispatch client: %s %s: \"%s\" - 0x%x " - "{ 0x%llx[%s], ident: %lld / 0x%llx, handler: %p }", + "{ 0x%"PRIx64"[%s], ident: %"PRId64" / 0x%"PRIx64", handler: %p }", msg, operation, strerror(err), err, udata, filter, ident, ident, func); } else if (operation) { _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_client", "BUG in libdispatch client: %s %s" - "{ 0x%llx[%s], ident: %lld / 0x%llx, handler: %p }", + "{ 0x%"PRIx64"[%s], ident: %"PRId64" / 0x%"PRIx64", handler: %p }", msg, operation, udata, filter, ident, ident, func); } else { _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_client", "BUG in libdispatch: %s: \"%s\" - 0x%x" - "{ 0x%llx[%s], ident: %lld / 0x%llx, handler: %p }", + "{ 0x%"PRIx64"[%s], ident: %"PRId64" / 0x%"PRIx64", handler: %p }", msg, strerror(err), err, udata, filter, ident, ident, func); } } @@ -1013,9 +1021,11 @@ _dispatch_bug_kevent_vanished(dispatch_unote_t du) dc = du._dr->ds_handler[DS_EVENT_HANDLER]; if (dc) func = _dispatch_continuation_get_function_symbol(dc); break; +#if HAVE_MACH case DISPATCH_MACH_CHANNEL_TYPE: func = du._dmrr->dmrr_handler_func; break; +#endif // MACH } _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_vanished", "BUG in libdispatch client: %s, monitored resource vanished before " diff --git a/src/inline_internal.h b/src/inline_internal.h index f91e2fe7d..317e4afbe 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -2081,7 +2081,7 @@ _dispatch_set_basepri(dispatch_priority_t dq_dbp) _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); return old_dbp; #else - (void)dbp; + (void)dq_dbp; return 0; #endif } diff --git a/src/internal.h b/src/internal.h index 9f3d6118c..775fd7249 100644 --- a/src/internal.h +++ b/src/internal.h @@ -327,6 +327,7 @@ upcast(dispatch_object_t dou) #include #include #endif +#include #if __has_include() #include diff --git a/src/io.c b/src/io.c index 4631f32af..a807c683b 100644 --- a/src/io.c +++ b/src/io.c @@ -2267,8 +2267,8 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) } #else #error "_dispatch_operation_advise not implemented on this platform" -#endif -#endif +#endif // defined(F_RDADVISE) +#endif // defined(_WIN32) } static int diff --git a/src/queue.c b/src/queue.c index 7a6a0b3a9..6668b1e6a 100644 --- a/src/queue.c +++ b/src/queue.c @@ -204,10 +204,12 @@ _dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, static void _dispatch_async_redirect_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +#if HAVE_PTHREAD_WORKQUEUE_QOS static void _dispatch_queue_override_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); static void _dispatch_workloop_stealer_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +#endif // HAVE_PTHREAD_WORKQUEUE_QOS const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { DC_VTABLE_ENTRY(ASYNC_REDIRECT, @@ -898,7 +900,6 @@ dispatch_async(dispatch_queue_t dq, dispatch_block_t work) qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags); _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); } -#endif #pragma mark - #pragma mark _dispatch_sync_invoke / _dispatch_sync_complete @@ -3558,6 +3559,7 @@ _dispatch_queue_drain_should_narrow_slow(uint64_t now, dispatch_assert_zero(rv); } } +#endif // TARGET_OS_MAC pthread_attr_destroy(&attr); } @@ -3602,10 +3604,12 @@ _dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) dwl->dwl_timer_heap = NULL; } +#if TARGET_OS_MAC if (dwl->dwl_attr && (dwl->dwl_attr->dwla_flags & DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY)) { (void)dispatch_assume_zero(_pthread_workloop_destroy((uint64_t)dwl)); } +#endif // TARGET_OS_MAC _dispatch_workloop_attributes_dispose(dwl); _dispatch_queue_dispose(dwl, allow_free); } @@ -3660,11 +3664,13 @@ _dispatch_workloop_try_lower_max_qos(dispatch_workloop_t dwl, new_state |= qos_bits; }); +#if DISPATCH_USE_KEVENT_WORKQUEUE dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); if (likely(ddi)) { ddi->ddi_wlh_needs_update = true; _dispatch_return_to_kernel(); } +#endif // DISPATCH_USE_KEVENT_WORKQUEUE return true; } @@ -5188,6 +5194,7 @@ _dispatch_queue_mgr_lock(struct dispatch_queue_static_s *dq) }); } +#if DISPATCH_USE_KEVENT_WORKQUEUE DISPATCH_ALWAYS_INLINE static inline bool _dispatch_queue_mgr_unlock(struct dispatch_queue_static_s *dq) @@ -5200,6 +5207,7 @@ _dispatch_queue_mgr_unlock(struct dispatch_queue_static_s *dq) }); return _dq_state_is_dirty(old_state); } +#endif // DISPATCH_USE_KEVENT_WORKQUEUE static void _dispatch_mgr_queue_drain(void) @@ -5812,7 +5820,6 @@ _dispatch_root_queue_drain_one(dispatch_queue_global_t dq) return head; } -#if DISPATCH_USE_KEVENT_WORKQUEUE static void _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi DISPATCH_PERF_MON_ARGS_PROTO) @@ -7332,7 +7339,6 @@ libdispatch_tsd_init(void) #endif // defined(_WIN32) __dispatch_tsd.tid = gettid(); } -#endif DISPATCH_NOTHROW void @@ -7370,6 +7376,7 @@ _dispatch_fork_becomes_unsafe_slow(void) DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); } } +#endif // TARGET_OS_MAC DISPATCH_NOINLINE void diff --git a/src/semaphore.c b/src/semaphore.c index aba285189..222d872ec 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -198,7 +198,7 @@ _dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz) _dispatch_object_class_name(dg), dg); offset += _dispatch_object_debug_attr(dg, &buf[offset], bufsiz - offset); offset += dsnprintf(&buf[offset], bufsiz - offset, - "count = %"PRIu32", gen = %"PRIu32", waiters = %d, notifs = %d }", + "count = %u, gen = %d, waiters = %d, notifs = %d }", _dg_state_value(dg_state), _dg_state_gen(dg_state), (bool)(dg_state & DISPATCH_GROUP_HAS_WAITERS), (bool)(dg_state & DISPATCH_GROUP_HAS_NOTIFS)); diff --git a/src/shims/lock.c b/src/shims/lock.c index 8892efed6..a51fbf821 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -408,48 +408,69 @@ _dispatch_futex(uint32_t *uaddr, int op, uint32_t val, return (int)syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3); } +// returns 0, ETIMEDOUT, EFAULT, EINTR, EWOULDBLOCK +DISPATCH_ALWAYS_INLINE +static inline int +_futex_blocking_op(uint32_t *uaddr, int futex_op, uint32_t val, + const struct timespec *timeout, int flags) +{ + for (;;) { + int rc = _dispatch_futex(uaddr, futex_op, val, timeout, NULL, 0, flags); + if (!rc) { + return 0; + } + switch (errno) { + case EINTR: + /* + * if we have a timeout, we need to return for the caller to + * recompute the new deadline, else just go back to wait. + */ + if (timeout == 0) { + continue; + } + /* FALLTHROUGH */ + case ETIMEDOUT: + case EFAULT: + case EWOULDBLOCK: + return errno; + default: + DISPATCH_INTERNAL_CRASH(errno, "_futex_op() failed"); + } + } +} + static int _dispatch_futex_wait(uint32_t *uaddr, uint32_t val, const struct timespec *timeout, int opflags) { - _dlock_syscall_switch(err, - _dispatch_futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags), - case 0: case EWOULDBLOCK: case ETIMEDOUT: return err; - default: DISPATCH_CLIENT_CRASH(err, "futex_wait() failed"); - ); + return _futex_blocking_op(uaddr, FUTEX_WAIT, val, timeout, opflags); } static void _dispatch_futex_wake(uint32_t *uaddr, int wake, int opflags) { - int rc; - _dlock_syscall_switch(err, - rc = _dispatch_futex(uaddr, FUTEX_WAKE, (uint32_t)wake, NULL, NULL, 0, opflags), - case 0: return; - default: DISPATCH_CLIENT_CRASH(err, "futex_wake() failed"); - ); + int rc = _dispatch_futex(uaddr, FUTEX_WAKE, (uint32_t)wake, NULL, NULL, 0, + opflags); + if (rc >= 0 || errno == ENOENT) return; + DISPATCH_INTERNAL_CRASH(errno, "_dlock_wake() failed"); } static void _dispatch_futex_lock_pi(uint32_t *uaddr, struct timespec *timeout, int detect, int opflags) { - _dlock_syscall_switch(err, - _dispatch_futex(uaddr, FUTEX_LOCK_PI, (uint32_t)detect, timeout, - NULL, 0, opflags), - case 0: return; - default: DISPATCH_CLIENT_CRASH(errno, "futex_lock_pi() failed"); - ); + int err = _futex_blocking_op(uaddr, FUTEX_LOCK_PI, (uint32_t)detect, + timeout, opflags); + if (err == 0) return; + DISPATCH_CLIENT_CRASH(err, "futex_lock_pi() failed"); } static void _dispatch_futex_unlock_pi(uint32_t *uaddr, int opflags) { - _dlock_syscall_switch(err, - _dispatch_futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags), - case 0: return; - default: DISPATCH_CLIENT_CRASH(errno, "futex_unlock_pi() failed"); - ); + int rc = _dispatch_futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags); + if (rc == 0) return; + DISPATCH_CLIENT_CRASH(errno, "futex_unlock_pi() failed"); } #endif @@ -478,10 +499,11 @@ _dispatch_wait_on_address(uint32_t volatile *_address, uint32_t value, (nsecs = _dispatch_timeout(timeout)) != 0); return rc; #elif HAVE_FUTEX + (void)flags; if (nsecs != DISPATCH_TIME_FOREVER) { struct timespec ts = { - .tv_sec = (__typeof__(ts.tv_sec))(nsec / NSEC_PER_SEC), - .tv_nsec = (__typeof__(ts.tv_nsec))(nsec % NSEC_PER_SEC), + .tv_sec = (__typeof__(ts.tv_sec))(nsecs / NSEC_PER_SEC), + .tv_nsec = (__typeof__(ts.tv_nsec))(nsecs % NSEC_PER_SEC), }; return _dispatch_futex_wait(address, value, &ts, FUTEX_PRIVATE_FLAG); } diff --git a/src/shims/priority.h b/src/shims/priority.h index df26f848e..3a79c5efb 100644 --- a/src/shims/priority.h +++ b/src/shims/priority.h @@ -70,6 +70,10 @@ typedef unsigned long pthread_priority_t; #endif // HAVE_PTHREAD_QOS_H +#if !defined(POLICY_RR) && defined(SCHED_RR) +#define POLICY_RR SCHED_RR +#endif // !defined(POLICY_RR) && defined(SCHED_RR) + typedef uint32_t dispatch_qos_t; typedef uint32_t dispatch_priority_t; @@ -85,7 +89,8 @@ typedef uint32_t dispatch_priority_t; #define DISPATCH_QOS_SATURATED ((dispatch_qos_t)15) #define DISPATCH_QOS_NBUCKETS (DISPATCH_QOS_MAX - DISPATCH_QOS_MIN + 1) -#define DISPATCH_QOS_BUCKET(qos) ((qos) - DISPATCH_QOS_MIN) +#define DISPATCH_QOS_BUCKET(qos) ((int)((qos) - DISPATCH_QOS_MIN)) +#define DISPATCH_QOS_FOR_BUCKET(bucket) ((dispatch_qos_t)((uint32_t)bucket + DISPATCH_QOS_MIN)) #define DISPATCH_PRIORITY_RELPRI_MASK ((dispatch_priority_t)0x000000ff) #define DISPATCH_PRIORITY_RELPRI_SHIFT 0 diff --git a/src/shims/yield.h b/src/shims/yield.h index 4a1a0effe..0f1db8747 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -145,9 +145,15 @@ void *_dispatch_wait_for_enqueuer(void **ptr); DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n)) #define _dispatch_preemption_yield_to(th, n) thread_switch(th, \ DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n)) +#elif HAVE_PTHREAD_YIELD_NP +#define _dispatch_preemption_yield(n) { (void)n; pthread_yield_np(); } +#define _dispatch_preemption_yield_to(th, n) { (void)n; pthread_yield_np(); } +#elif defined(_WIN32) +#define _dispatch_preemption_yield(n) { (void)n; sched_yield(); } +#define _dispatch_preemption_yield_to(th, n) { (void)n; sched_yield(); } #else -#define _dispatch_preemption_yield(n) pthread_yield_np() -#define _dispatch_preemption_yield_to(th, n) pthread_yield_np() +#define _dispatch_preemption_yield(n) { (void)n; pthread_yield(); } +#define _dispatch_preemption_yield_to(th, n) { (void)n; pthread_yield(); } #endif // HAVE_MACH #pragma mark - diff --git a/src/source.c b/src/source.c index daa637a89..c2fe427b7 100644 --- a/src/source.c +++ b/src/source.c @@ -147,8 +147,8 @@ dispatch_source_get_handle(dispatch_source_t ds) uintptr_t dispatch_source_get_data(dispatch_source_t ds) { -#if DISPATCH_USE_MEMORYSTATUS dispatch_source_refs_t dr = ds->ds_refs; +#if DISPATCH_USE_MEMORYSTATUS if (dr->du_vmpressure_override) { return NOTE_VM_PRESSURE; } @@ -418,6 +418,7 @@ dispatch_source_set_registration_handler_f(dispatch_source_t ds, #pragma mark - #pragma mark dispatch_source_invoke +#if TARGET_OS_MAC bool _dispatch_source_will_reenable_kevent_4NW(dispatch_source_t ds) { @@ -429,6 +430,7 @@ _dispatch_source_will_reenable_kevent_4NW(dispatch_source_t ds) } return _dispatch_unote_needs_rearm(ds->ds_refs); } +#endif // TARGET_OS_MAC static void _dispatch_source_registration_callout(dispatch_source_t ds, dispatch_queue_t cq, diff --git a/src/unifdef.QpRS5Q b/src/unifdef.EdlZEO similarity index 99% rename from src/unifdef.QpRS5Q rename to src/unifdef.EdlZEO index 0c9fcddba..0128bf122 100644 --- a/src/unifdef.QpRS5Q +++ b/src/unifdef.EdlZEO @@ -204,10 +204,12 @@ _dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, static void _dispatch_async_redirect_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +#if HAVE_PTHREAD_WORKQUEUE_QOS static void _dispatch_queue_override_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); static void _dispatch_workloop_stealer_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +#endif // HAVE_PTHREAD_WORKQUEUE_QOS const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { DC_VTABLE_ENTRY(ASYNC_REDIRECT, @@ -894,7 +896,6 @@ dispatch_async(dispatch_queue_t dq, dispatch_block_t work) qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags); _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); } -#endif #pragma mark - #pragma mark _dispatch_sync_invoke / _dispatch_sync_complete @@ -3983,6 +3984,7 @@ _dispatch_workloop_attributes_dispose(dispatch_workloop_t dwl) } } +#if TARGET_OS_MAC DISPATCH_ALWAYS_INLINE static bool _dispatch_workloop_has_kernel_attributes(dispatch_workloop_t dwl) @@ -3992,6 +3994,7 @@ _dispatch_workloop_has_kernel_attributes(dispatch_workloop_t dwl) DISPATCH_WORKLOOP_ATTR_HAS_POLICY | DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT)); } +#endif // TARGET_OS_MAC void dispatch_workloop_set_scheduler_priority(dispatch_workloop_t dwl, int priority, @@ -4079,6 +4082,7 @@ _dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t dwl, } #endif +#if TARGET_OS_MAC static void _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, pthread_attr_t *attr) @@ -4098,6 +4102,7 @@ _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, new_state |= DISPATCH_QUEUE_ROLE_BASE_ANON; }); } +#endif // TARGET_OS_MAC static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), @@ -4133,10 +4138,13 @@ _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_POLICY) { pthread_attr_setschedpolicy(&attr, dwla->dwla_policy); } +#if HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT) { pthread_attr_setcpupercent_np(&attr, dwla->dwla_cpupercent.percent, (unsigned long)dwla->dwla_cpupercent.refillms); } +#endif // HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP +#if TARGET_OS_MAC if (_dispatch_workloop_has_kernel_attributes(dwl)) { int rv = _pthread_workloop_create((uint64_t)dwl, 0, &attr); switch (rv) { @@ -4151,6 +4159,7 @@ _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) dispatch_assert_zero(rv); } } +#endif // TARGET_OS_MAC pthread_attr_destroy(&attr); } @@ -4195,10 +4204,12 @@ _dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) dwl->dwl_timer_heap = NULL; } +#if TARGET_OS_MAC if (dwl->dwl_attr && (dwl->dwl_attr->dwla_flags & DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY)) { (void)dispatch_assume_zero(_pthread_workloop_destroy((uint64_t)dwl)); } +#endif // TARGET_OS_MAC _dispatch_workloop_attributes_dispose(dwl); _dispatch_queue_dispose(dwl, allow_free); } @@ -4253,11 +4264,13 @@ _dispatch_workloop_try_lower_max_qos(dispatch_workloop_t dwl, new_state |= qos_bits; }); +#if DISPATCH_USE_KEVENT_WORKQUEUE dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); if (likely(ddi)) { ddi->ddi_wlh_needs_update = true; _dispatch_return_to_kernel(); } +#endif // DISPATCH_USE_KEVENT_WORKQUEUE return true; } @@ -5781,6 +5794,7 @@ _dispatch_queue_mgr_lock(struct dispatch_queue_static_s *dq) }); } +#if DISPATCH_USE_KEVENT_WORKQUEUE DISPATCH_ALWAYS_INLINE static inline bool _dispatch_queue_mgr_unlock(struct dispatch_queue_static_s *dq) @@ -5793,6 +5807,7 @@ _dispatch_queue_mgr_unlock(struct dispatch_queue_static_s *dq) }); return _dq_state_is_dirty(old_state); } +#endif // DISPATCH_USE_KEVENT_WORKQUEUE static void _dispatch_mgr_queue_drain(void) @@ -6405,7 +6420,6 @@ out: return head; } -#if DISPATCH_USE_KEVENT_WORKQUEUE static void _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi DISPATCH_PERF_MON_ARGS_PROTO) @@ -7925,7 +7939,6 @@ libdispatch_tsd_init(void) #endif // defined(_WIN32) __dispatch_tsd.tid = gettid(); } -#endif DISPATCH_NOTHROW void @@ -7963,6 +7976,7 @@ _dispatch_fork_becomes_unsafe_slow(void) DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); } } +#endif // TARGET_OS_MAC DISPATCH_NOINLINE void diff --git a/src/voucher.c b/src/voucher.c index 1ba87032f..f06089d64 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -1870,6 +1870,7 @@ _voucher_dispose(voucher_t voucher) (void)voucher; } +#if __has_include() voucher_t voucher_copy_with_persona_mach_voucher(mach_voucher_t persona_mach_voucher) { @@ -1913,6 +1914,7 @@ voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_inf (void)persona_info; return -1; } +#endif // __has_include() void _voucher_activity_debug_channel_init(void) From aad26e9003ef217b2262ab08e5e590a283e5868c Mon Sep 17 00:00:00 2001 From: Kim Topley Date: Wed, 13 Mar 2019 15:07:19 -0700 Subject: [PATCH 086/249] Make workloop scheduling depend on TARGET_OS_MAC Signed-off-by: Kim Topley --- src/queue_internal.h | 2 ++ src/{unifdef.EdlZEO => unifdef.d8V0yG} | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) rename src/{unifdef.EdlZEO => unifdef.d8V0yG} (99%) diff --git a/src/queue_internal.h b/src/queue_internal.h index 713677301..90b57b685 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -467,7 +467,9 @@ typedef struct dispatch_workloop_attr_s *dispatch_workloop_attr_t; typedef struct dispatch_workloop_attr_s { uint32_t dwla_flags; dispatch_priority_t dwla_pri; +#if TARGET_OS_MAC struct sched_param dwla_sched; +#endif // TARGET_OS_MAC int dwla_policy; struct { uint8_t percent; diff --git a/src/unifdef.EdlZEO b/src/unifdef.d8V0yG similarity index 99% rename from src/unifdef.EdlZEO rename to src/unifdef.d8V0yG index 0128bf122..4129826ab 100644 --- a/src/unifdef.EdlZEO +++ b/src/unifdef.d8V0yG @@ -3994,7 +3994,6 @@ _dispatch_workloop_has_kernel_attributes(dispatch_workloop_t dwl) DISPATCH_WORKLOOP_ATTR_HAS_POLICY | DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT)); } -#endif // TARGET_OS_MAC void dispatch_workloop_set_scheduler_priority(dispatch_workloop_t dwl, int priority, @@ -4018,6 +4017,7 @@ dispatch_workloop_set_scheduler_priority(dispatch_workloop_t dwl, int priority, dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; } } +#endif // TARGET_OS_MAC void dispatch_workloop_set_qos_class_floor(dispatch_workloop_t dwl, @@ -4036,12 +4036,16 @@ dispatch_workloop_set_qos_class_floor(dispatch_workloop_t dwl, dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS; } +#if TARGET_OS_MAC if (flags & DISPATCH_WORKLOOP_FIXED_PRIORITY) { dwl->dwl_attr->dwla_policy = POLICY_RR; dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_POLICY; } else { dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; } +#else // TARGET_OS_MAC + (void)flags; +#endif // TARGET_OS_MAC } void @@ -4102,7 +4106,6 @@ _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, new_state |= DISPATCH_QUEUE_ROLE_BASE_ANON; }); } -#endif // TARGET_OS_MAC static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), @@ -4115,6 +4118,7 @@ static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, .dgq_thread_pool_size = 1, }; +#endif // TARGET_OS_MAC static void _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) @@ -4126,6 +4130,7 @@ _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS) { dwl->dq_priority |= dwla->dwla_pri | DISPATCH_PRIORITY_FLAG_FLOOR; } +#if TARGET_OS_MAC if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_SCHED) { pthread_attr_setschedparam(&attr, &dwla->dwla_sched); // _dispatch_async_and_wait_should_always_async detects when a queue @@ -4138,6 +4143,7 @@ _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_POLICY) { pthread_attr_setschedpolicy(&attr, dwla->dwla_policy); } +#endif // TARGET_OS_MAC #if HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT) { pthread_attr_setcpupercent_np(&attr, dwla->dwla_cpupercent.percent, From fdefd3d2e82b047f755a04ea472730a349f4306a Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 18 Mar 2019 14:48:22 -0700 Subject: [PATCH 087/249] Merge pull request #447 from apple/kwt-test-darwin-libdispatch-1121-merge-master Merge darwin/libdispatch-1121 to master Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.d8V0yG => unifdef.0qQHE2} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.d8V0yG => unifdef.0qQHE2} (100%) diff --git a/PATCHES b/PATCHES index 5ba5c91a3..5693ca8a6 100644 --- a/PATCHES +++ b/PATCHES @@ -473,3 +473,4 @@ github commits starting with 29bdc2f from [4b85ca6] APPLIED rdar://54572081 [ff3bf51] APPLIED rdar://54572081 [bc00e13] APPLIED rdar://54572081 +[d44acc0] APPLIED rdar://54572081 diff --git a/src/unifdef.d8V0yG b/src/unifdef.0qQHE2 similarity index 100% rename from src/unifdef.d8V0yG rename to src/unifdef.0qQHE2 From d07d536a40395431a40a5c1db49cdedea09b3e8a Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 16:05:31 -0700 Subject: [PATCH 088/249] shims: correct indirection on Windows The `_dispatch_sema4_t` type is a pointer to the handle of the semaphore, so we need to indirect through the pointer before using it. This site was incorrectly using it directly. Signed-off-by: Kim Topley --- src/shims/lock.c | 2 +- src/{unifdef.0qQHE2 => unifdef.sIXsKL} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/{unifdef.0qQHE2 => unifdef.sIXsKL} (100%) diff --git a/src/shims/lock.c b/src/shims/lock.c index a51fbf821..e0358d261 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -308,7 +308,7 @@ _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) nsec = _dispatch_timeout(timeout); msec = (DWORD)(nsec / (uint64_t)1000000); resolution = _push_timer_resolution(msec); - wait_result = WaitForSingleObject(sema, msec); + wait_result = WaitForSingleObject(*sema, msec); _pop_timer_resolution(resolution); return wait_result == WAIT_TIMEOUT; } diff --git a/src/unifdef.0qQHE2 b/src/unifdef.sIXsKL similarity index 100% rename from src/unifdef.0qQHE2 rename to src/unifdef.sIXsKL From b2832762fa539aac687109aa9b37d76fb3ba316a Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:36:58 -0700 Subject: [PATCH 089/249] event: plumb partial timer support for Windows This sets up the event loop for Windows to run and schedule timers. This allows us to start running some of the timer sources. Signed-off-by: Kim Topley --- src/event/event_windows.c | 178 +++++++++++++++++++++++-- src/{unifdef.sIXsKL => unifdef.9jgBfV} | 0 2 files changed, 170 insertions(+), 8 deletions(-) rename src/{unifdef.sIXsKL => unifdef.9jgBfV} (100%) diff --git a/src/event/event_windows.c b/src/event/event_windows.c index 2fe968071..f662f5d15 100644 --- a/src/event/event_windows.c +++ b/src/event/event_windows.c @@ -21,6 +21,14 @@ #include "internal.h" #if DISPATCH_EVENT_BACKEND_WINDOWS +static HANDLE hPort = NULL; +enum _dispatch_windows_port { + DISPATCH_PORT_POKE = 0, + DISPATCH_PORT_TIMER_CLOCK_WALL, + DISPATCH_PORT_TIMER_CLOCK_UPTIME, + DISPATCH_PORT_TIMER_CLOCK_MONOTONIC, +}; + #pragma mark dispatch_unote_t bool @@ -48,34 +56,188 @@ _dispatch_unote_unregister(dispatch_unote_t du DISPATCH_UNUSED, #pragma mark timers +typedef struct _dispatch_windows_timeout_s { + PTP_TIMER pTimer; + enum _dispatch_windows_port ullIdent; + bool bArmed; +} *dispatch_windows_timeout_t; + +#define DISPATCH_WINDOWS_TIMEOUT_INITIALIZER(clock) \ + [DISPATCH_CLOCK_##clock] = { \ + .pTimer = NULL, \ + .ullIdent = DISPATCH_PORT_TIMER_CLOCK_##clock, \ + .bArmed = FALSE, \ + } + +static struct _dispatch_windows_timeout_s _dispatch_windows_timeout[] = { + DISPATCH_WINDOWS_TIMEOUT_INITIALIZER(WALL), + DISPATCH_WINDOWS_TIMEOUT_INITIALIZER(UPTIME), + DISPATCH_WINDOWS_TIMEOUT_INITIALIZER(MONOTONIC), +}; + +static void +_dispatch_event_merge_timer(dispatch_clock_t clock) +{ + uint32_t tidx = DISPATCH_TIMER_INDEX(clock, 0); + + _dispatch_windows_timeout[clock].bArmed = FALSE; + + _dispatch_timers_heap_dirty(_dispatch_timers_heap, tidx); + _dispatch_timers_heap[tidx].dth_needs_program = true; + _dispatch_timers_heap[tidx].dth_armed = false; +} + +static void CALLBACK +_dispatch_timer_callback(PTP_CALLBACK_INSTANCE Instance, PVOID Context, + PTP_TIMER Timer) +{ + BOOL bSuccess; + + bSuccess = PostQueuedCompletionStatus(hPort, 0, (ULONG_PTR)Context, + NULL); + if (bSuccess == FALSE) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } +} + void -_dispatch_event_loop_timer_arm(uint32_t tidx DISPATCH_UNUSED, - dispatch_timer_delay_s range DISPATCH_UNUSED, +_dispatch_event_loop_timer_arm(uint32_t tidx, + dispatch_timer_delay_s range, dispatch_clock_now_cache_t nows DISPATCH_UNUSED) { - WIN_PORT_ERROR(); + dispatch_windows_timeout_t timer; + FILETIME ftDueTime; + LARGE_INTEGER liTime; + + switch (DISPATCH_TIMER_CLOCK(tidx)) { + case DISPATCH_CLOCK_WALL: + timer = &_dispatch_windows_timeout[DISPATCH_CLOCK_WALL]; + + WIN_PORT_ERROR(); + __assume(0); + + case DISPATCH_CLOCK_UPTIME: + case DISPATCH_CLOCK_MONOTONIC: + timer = &_dispatch_windows_timeout[DISPATCH_TIMER_CLOCK(tidx)]; + liTime.QuadPart = -((range.delay + 99) / 100); + break; + } + + if (timer->pTimer == NULL) { + timer->pTimer = CreateThreadpoolTimer(_dispatch_timer_callback, + (LPVOID)timer->ullIdent, NULL); + if (timer->pTimer == NULL) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "CreateThreadpoolTimer"); + } + } + + ftDueTime.dwHighDateTime = liTime.HighPart; + ftDueTime.dwLowDateTime = liTime.LowPart; + + SetThreadpoolTimer(timer->pTimer, &ftDueTime, /*msPeriod=*/0, + /*msWindowLength=*/0); + timer->bArmed = TRUE; } void -_dispatch_event_loop_timer_delete(uint32_t tidx DISPATCH_UNUSED) +_dispatch_event_loop_timer_delete(uint32_t tidx) { - WIN_PORT_ERROR(); + dispatch_windows_timeout_t timer; + + switch (DISPATCH_TIMER_CLOCK(tidx)) { + case DISPATCH_CLOCK_WALL: + timer = &_dispatch_windows_timeout[DISPATCH_CLOCK_WALL]; + + WIN_PORT_ERROR(); + __assume(0); + + case DISPATCH_CLOCK_UPTIME: + case DISPATCH_CLOCK_MONOTONIC: + timer = &_dispatch_windows_timeout[DISPATCH_TIMER_CLOCK(tidx)]; + break; + } + + SetThreadpoolTimer(timer->pTimer, NULL, /*msPeriod=*/0, + /*msWindowLength=*/0); + timer->bArmed = FALSE; } #pragma mark dispatch_loop +static void +_dispatch_windows_port_init(void *context DISPATCH_UNUSED) +{ + hPort = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1); + if (hPort == NULL) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "CreateIoCompletionPort"); + } + +#if DISPATCH_USE_MGR_THREAD + _dispatch_trace_item_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q); + dx_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); +#endif +} + void _dispatch_event_loop_poke(dispatch_wlh_t wlh DISPATCH_UNUSED, uint64_t dq_state DISPATCH_UNUSED, uint32_t flags DISPATCH_UNUSED) { - WIN_PORT_ERROR(); + static dispatch_once_t _dispatch_windows_port_init_pred; + BOOL bSuccess; + + dispatch_once_f(&_dispatch_windows_port_init_pred, NULL, + _dispatch_windows_port_init); + bSuccess = PostQueuedCompletionStatus(hPort, 0, DISPATCH_PORT_POKE, + NULL); + (void)dispatch_assume(bSuccess); } DISPATCH_NOINLINE void -_dispatch_event_loop_drain(uint32_t flags DISPATCH_UNUSED) +_dispatch_event_loop_drain(uint32_t flags) { - WIN_PORT_ERROR(); + DWORD dwNumberOfBytesTransferred; + ULONG_PTR ulCompletionKey; + LPOVERLAPPED pOV; + BOOL bSuccess; + + pOV = (LPOVERLAPPED)&pOV; + bSuccess = GetQueuedCompletionStatus(hPort, &dwNumberOfBytesTransferred, + &ulCompletionKey, &pOV, + (flags & KEVENT_FLAG_IMMEDIATE) ? 0 : INFINITE); + while (bSuccess) { + switch (ulCompletionKey) { + case DISPATCH_PORT_POKE: + break; + + case DISPATCH_PORT_TIMER_CLOCK_WALL: + _dispatch_event_merge_timer(DISPATCH_CLOCK_WALL); + break; + + case DISPATCH_PORT_TIMER_CLOCK_UPTIME: + _dispatch_event_merge_timer(DISPATCH_CLOCK_UPTIME); + break; + + case DISPATCH_PORT_TIMER_CLOCK_MONOTONIC: + _dispatch_event_merge_timer(DISPATCH_CLOCK_MONOTONIC); + break; + + default: + DISPATCH_INTERNAL_CRASH(ulCompletionKey, + "unsupported completion key"); + } + + bSuccess = GetQueuedCompletionStatus(hPort, + &dwNumberOfBytesTransferred, &ulCompletionKey, &pOV, 0); + } + + if (bSuccess == FALSE && pOV != NULL) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "GetQueuedCompletionStatus"); + } } void diff --git a/src/unifdef.sIXsKL b/src/unifdef.9jgBfV similarity index 100% rename from src/unifdef.sIXsKL rename to src/unifdef.9jgBfV From 45d641d48af58ce7a6feb189663943006437109b Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:40:26 -0700 Subject: [PATCH 090/249] remove last few __LP64__ usage This cleans up the __LP64__ usage to allow libdispatch to work correctly on LLP64 targets (like Windows x64). Signed-off-by: Kim Topley --- src/allocator_internal.h | 4 ++-- src/benchmark.c | 4 ++-- src/inline_internal.h | 2 +- src/queue.c | 17 +++++++++++++++-- src/queue_internal.h | 2 +- src/{unifdef.9jgBfV => unifdef.KWVIEP} | 17 +++++++++++++++-- 6 files changed, 36 insertions(+), 10 deletions(-) rename src/{unifdef.9jgBfV => unifdef.KWVIEP} (99%) diff --git a/src/allocator_internal.h b/src/allocator_internal.h index ead653595..2b5a6061b 100644 --- a/src/allocator_internal.h +++ b/src/allocator_internal.h @@ -97,7 +97,7 @@ // Use the largest type your platform is comfortable doing atomic ops with. // TODO: rdar://11477843 typedef unsigned long bitmap_t; -#if defined(__LP64__) +#if DISPATCH_SIZEOF_PTR == 8 #define BYTES_PER_BITMAP 8 #else #define BYTES_PER_BITMAP 4 @@ -147,7 +147,7 @@ typedef unsigned long bitmap_t; #define PADDING_TO_CONTINUATION_SIZE(x) (ROUND_UP_TO_CONTINUATION_SIZE(x) - (x)) -#if defined(__LP64__) +#if DISPATCH_SIZEOF_PTR == 8 #define SIZEOF_HEADER 16 #else #define SIZEOF_HEADER 8 diff --git a/src/benchmark.c b/src/benchmark.c index b47504386..2b1edb76a 100644 --- a/src/benchmark.c +++ b/src/benchmark.c @@ -41,7 +41,7 @@ _dispatch_benchmark_init(void *context) register size_t cnt = bdata->count; size_t i = 0; uint64_t start, delta; -#if defined(__LP64__) +#if DISPATCH_SIZEOF_PTR == 8 __uint128_t lcost; #else long double lcost; @@ -93,7 +93,7 @@ dispatch_benchmark_f(size_t count, register void *ctxt, }; static dispatch_once_t pred; uint64_t ns, start, delta; -#if defined(__LP64__) +#if DISPATCH_SIZEOF_PTR == 8 __uint128_t conversion, big_denom; #else long double conversion, big_denom; diff --git a/src/inline_internal.h b/src/inline_internal.h index 317e4afbe..ba6dbed47 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -1326,7 +1326,7 @@ _dispatch_queue_drain_try_lock_wlh(dispatch_queue_t dq, uint64_t *dq_state) if (unlikely(!_dq_state_is_base_wlh(old_state) || !_dq_state_is_enqueued_on_target(old_state) || _dq_state_is_enqueued_on_manager(old_state))) { -#if !__LP64__ +#if DISPATCH_SIZEOF_PTR == 4 old_state >>= 32; #endif DISPATCH_INTERNAL_CRASH(old_state, "Invalid wlh state"); diff --git a/src/queue.c b/src/queue.c index 6668b1e6a..9a353bd69 100644 --- a/src/queue.c +++ b/src/queue.c @@ -2576,6 +2576,19 @@ _dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, return DISPATCH_PRIORITY_FLAG_MANAGER; } +DISPATCH_ALWAYS_INLINE +static void +_dispatch_queue_setter_assert_inactive(dispatch_queue_class_t dq) +{ + uint64_t dq_state = os_atomic_load2o(dq._dq, dq_state, relaxed); + if (likely(dq_state & DISPATCH_QUEUE_INACTIVE)) return; +#if DISPATCH_SIZEOF_PTR == 4 + dq_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "dispatch queue/source property setter called after activation"); +} + DISPATCH_ALWAYS_INLINE static void _dispatch_workloop_attributes_alloc_if_needed(dispatch_workloop_t dwl) @@ -3576,7 +3589,7 @@ _dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, "Release of a locked workloop"); } -#ifndef __LP64__ +#if DISPATCH_SIZEOF_PTR == 4 dq_state >>= 32; #endif DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, @@ -3976,7 +3989,7 @@ _dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, }); if (unlikely(_dq_state_is_suspended(old_state))) { -#ifndef __LP64__ +#if DISPATCH_SIZEOF_PTR == 4 old_state >>= 32; #endif DISPATCH_CLIENT_CRASH(old_state, "Waking up an inactive workloop"); diff --git a/src/queue_internal.h b/src/queue_internal.h index 90b57b685..34e22e296 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -964,7 +964,7 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); // If dc_flags is less than 0x1000, then the object is a continuation. // Otherwise, the object has a private layout and memory management rules. The // layout until after 'do_next' must align with normal objects. -#if __LP64__ +#if DISPATCH_SIZEOF_PTR == 8 #define DISPATCH_CONTINUATION_HEADER(x) \ union { \ const void *do_vtable; \ diff --git a/src/unifdef.9jgBfV b/src/unifdef.KWVIEP similarity index 99% rename from src/unifdef.9jgBfV rename to src/unifdef.KWVIEP index 4129826ab..27bc3df46 100644 --- a/src/unifdef.9jgBfV +++ b/src/unifdef.KWVIEP @@ -2572,6 +2572,19 @@ _dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, return DISPATCH_PRIORITY_FLAG_MANAGER; } +DISPATCH_ALWAYS_INLINE +static void +_dispatch_queue_setter_assert_inactive(dispatch_queue_class_t dq) +{ + uint64_t dq_state = os_atomic_load2o(dq._dq, dq_state, relaxed); + if (likely(dq_state & DISPATCH_QUEUE_INACTIVE)) return; +#if DISPATCH_SIZEOF_PTR == 4 + dq_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "dispatch queue/source property setter called after activation"); +} + DISPATCH_ALWAYS_INLINE static void _dispatch_workloop_attributes_alloc_if_needed(dispatch_workloop_t dwl) @@ -4182,7 +4195,7 @@ _dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, "Release of a locked workloop"); } -#ifndef __LP64__ +#if DISPATCH_SIZEOF_PTR == 4 dq_state >>= 32; #endif DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, @@ -4582,7 +4595,7 @@ _dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, }); if (unlikely(_dq_state_is_suspended(old_state))) { -#ifndef __LP64__ +#if DISPATCH_SIZEOF_PTR == 4 old_state >>= 32; #endif DISPATCH_CLIENT_CRASH(old_state, "Waking up an inactive workloop"); From e8703c9d4b1e3e02fd221476fe28e1bb5880d8fa Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:44:29 -0700 Subject: [PATCH 091/249] simplify preprocessor checks Linux, FreeBSD already define DISPATCH_COCOA_COMPAT, and Windows was defining that in a couple of places in an unstructured haphazard manner. Define it similarly and clean up the other sites. Signed-off-by: Kim Topley --- private/private.h | 6 +++--- src/{unifdef.KWVIEP => unifdef.l3TKJZ} | 0 2 files changed, 3 insertions(+), 3 deletions(-) rename src/{unifdef.KWVIEP => unifdef.l3TKJZ} (100%) diff --git a/private/private.h b/private/private.h index 9692c2af8..1e5a4c24d 100644 --- a/private/private.h +++ b/private/private.h @@ -178,13 +178,13 @@ void _dispatch_prohibit_transition_to_multithreaded(bool prohibit); #if TARGET_OS_MAC #define DISPATCH_COCOA_COMPAT 1 -#elif defined(__linux__) || defined(__FreeBSD__) +#elif defined(__linux__) || defined(__FreeBSD__) || defined(_WIN32) #define DISPATCH_COCOA_COMPAT 1 #else #define DISPATCH_COCOA_COMPAT 0 #endif -#if DISPATCH_COCOA_COMPAT || defined(_WIN32) +#if DISPATCH_COCOA_COMPAT #define DISPATCH_CF_SPI_VERSION 20160712 @@ -259,7 +259,7 @@ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT void (*_Nullable _dispatch_end_NSAutoReleasePool)(void *); -#endif /* DISPATCH_COCOA_COMPAT || defined(_WIN32) */ +#endif /* DISPATCH_COCOA_COMPAT */ API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW diff --git a/src/unifdef.KWVIEP b/src/unifdef.l3TKJZ similarity index 100% rename from src/unifdef.KWVIEP rename to src/unifdef.l3TKJZ From c34242a69cafa82379e49b4e14cc7a8c462b2bff Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:45:37 -0700 Subject: [PATCH 092/249] windows: correct use of WaitOnAddress Signed-off-by: Kim Topley --- src/shims/lock.c | 2 +- src/{unifdef.l3TKJZ => unifdef.yXVRSL} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/{unifdef.l3TKJZ => unifdef.yXVRSL} (100%) diff --git a/src/shims/lock.c b/src/shims/lock.c index e0358d261..ccc4e1cbc 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -509,7 +509,7 @@ _dispatch_wait_on_address(uint32_t volatile *_address, uint32_t value, } return _dispatch_futex_wait(address, value, NULL, FUTEX_PRIVATE_FLAG); #elif defined(_WIN32) - WaitOnAddress(address, (PVOID)(uintptr_t)value, sizeof(value), INFINITE); + WaitOnAddress(address, &value, sizeof(value), INFINITE); #else #error _dispatch_wait_on_address unimplemented for this platform #endif diff --git a/src/unifdef.l3TKJZ b/src/unifdef.yXVRSL similarity index 100% rename from src/unifdef.l3TKJZ rename to src/unifdef.yXVRSL From 7a9f955b6b16b86e5548d1c5ffd91b9a43a1934d Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:48:38 -0700 Subject: [PATCH 093/249] queue: implement queue handles for Windows Signed-off-by: Kim Topley --- src/queue.c | 18 +++++++++++++++--- src/{unifdef.yXVRSL => unifdef.FwW4K7} | 18 +++++++++++++++--- 2 files changed, 30 insertions(+), 6 deletions(-) rename src/{unifdef.yXVRSL => unifdef.FwW4K7} (99%) diff --git a/src/queue.c b/src/queue.c index 9a353bd69..bfa75ad41 100644 --- a/src/queue.c +++ b/src/queue.c @@ -6358,7 +6358,7 @@ _dispatch_runloop_handle_is_valid(dispatch_runloop_handle_t handle) #elif defined(__linux__) return handle >= 0; #elif defined(_WIN32) - return handle != INVALID_HANDLE_VALUE; + return handle != NULL; #else #error "runloop support not implemented on this platform" #endif @@ -6447,7 +6447,13 @@ _dispatch_runloop_queue_handle_init(void *ctxt) } handle = fd; #elif defined(_WIN32) - handle = INVALID_HANDLE_VALUE; + HANDLE hEvent; + hEvent = CreateEventW(NULL, /*bManualReset=*/TRUE, + /*bInitialState=*/FALSE, NULL); + if (hEvent == NULL) { + DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateEventW"); + } + handle = hEvent; #else #error "runloop support not implemented on this platform" #endif @@ -6475,7 +6481,9 @@ _dispatch_runloop_queue_handle_dispose(dispatch_lane_t dq) int rc = close(handle); (void)dispatch_assume_zero(rc); #elif defined(_WIN32) - CloseHandle(handle); + BOOL bSuccess; + bSuccess = CloseHandle(handle); + (void)dispatch_assume(bSuccess); #else #error "runloop support not implemented on this platform" #endif @@ -6510,6 +6518,10 @@ _dispatch_runloop_queue_class_poke(dispatch_lane_t dq) result = eventfd_write(handle, 1); } while (result == -1 && errno == EINTR); (void)dispatch_assume_zero(result); +#elif defined(_WIN32) + BOOL bSuccess; + bSuccess = SetEvent(handle); + (void)dispatch_assume(bSuccess); #else #error "runloop support not implemented on this platform" #endif diff --git a/src/unifdef.yXVRSL b/src/unifdef.FwW4K7 similarity index 99% rename from src/unifdef.yXVRSL rename to src/unifdef.FwW4K7 index 27bc3df46..fe62dd7bf 100644 --- a/src/unifdef.yXVRSL +++ b/src/unifdef.FwW4K7 @@ -6964,7 +6964,7 @@ _dispatch_runloop_handle_is_valid(dispatch_runloop_handle_t handle) #elif defined(__linux__) return handle >= 0; #elif defined(_WIN32) - return handle != INVALID_HANDLE_VALUE; + return handle != NULL; #else #error "runloop support not implemented on this platform" #endif @@ -7053,7 +7053,13 @@ _dispatch_runloop_queue_handle_init(void *ctxt) } handle = fd; #elif defined(_WIN32) - handle = INVALID_HANDLE_VALUE; + HANDLE hEvent; + hEvent = CreateEventW(NULL, /*bManualReset=*/TRUE, + /*bInitialState=*/FALSE, NULL); + if (hEvent == NULL) { + DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateEventW"); + } + handle = hEvent; #else #error "runloop support not implemented on this platform" #endif @@ -7081,7 +7087,9 @@ _dispatch_runloop_queue_handle_dispose(dispatch_lane_t dq) int rc = close(handle); (void)dispatch_assume_zero(rc); #elif defined(_WIN32) - CloseHandle(handle); + BOOL bSuccess; + bSuccess = CloseHandle(handle); + (void)dispatch_assume(bSuccess); #else #error "runloop support not implemented on this platform" #endif @@ -7116,6 +7124,10 @@ _dispatch_runloop_queue_class_poke(dispatch_lane_t dq) result = eventfd_write(handle, 1); } while (result == -1 && errno == EINTR); (void)dispatch_assume_zero(result); +#elif defined(_WIN32) + BOOL bSuccess; + bSuccess = SetEvent(handle); + (void)dispatch_assume(bSuccess); #else #error "runloop support not implemented on this platform" #endif From 431aad9c0718dc8133eb8b565c6f8ec5fec7d7e1 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:50:04 -0700 Subject: [PATCH 094/249] correct debug print format specifiers Signed-off-by: Kim Topley --- src/io.c | 4 ++-- src/{unifdef.FwW4K7 => unifdef.x6WQcv} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename src/{unifdef.FwW4K7 => unifdef.x6WQcv} (100%) diff --git a/src/io.c b/src/io.c index a807c683b..f5be3a6fc 100644 --- a/src/io.c +++ b/src/io.c @@ -147,7 +147,7 @@ enum { #endif // DISPATCH_IO_DEBUG #define _dispatch_fd_debug(msg, fd, ...) \ - _dispatch_io_log("fd[0x%x]: " msg, fd, ##__VA_ARGS__) + _dispatch_io_log("fd[0x%" PRIx64 "]: " msg, fd, ##__VA_ARGS__) #define _dispatch_op_debug(msg, op, ...) \ _dispatch_io_log("op[%p]: " msg, op, ##__VA_ARGS__) #define _dispatch_io_channel_debug(msg, channel, ...) \ @@ -1380,7 +1380,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // On fds lock queue dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( _dispatch_io_fds_lockq); - _dispatch_fd_entry_debug("create: fd %d", fd_entry, fd); + _dispatch_fd_entry_debug("create: fd %" PRId64, fd_entry, fd); fd_entry->fd = fd; LIST_INSERT_HEAD(&_dispatch_io_fds[hash], fd_entry, fd_list); fd_entry->barrier_queue = dispatch_queue_create( diff --git a/src/unifdef.FwW4K7 b/src/unifdef.x6WQcv similarity index 100% rename from src/unifdef.FwW4K7 rename to src/unifdef.x6WQcv From 693b697add4079d677dca801b935f2a2d4eb972a Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:50:54 -0700 Subject: [PATCH 095/249] build: allow enabling assertions Signed-off-by: Kim Topley --- CMakeLists.txt | 2 ++ src/{unifdef.x6WQcv => unifdef.9TLn7R} | 0 2 files changed, 2 insertions(+) rename src/{unifdef.x6WQcv => unifdef.9TLn7R} (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5a86eb80f..462be4e04 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -77,6 +77,8 @@ if(NOT ENABLE_SWIFT) set(INSTALL_OS_HEADERS_DIR "include/os" CACHE PATH "Path where the headers will be installed") endif() +option(DISPATCH_ENABLE_ASSERTS "enable debug assertions" FALSE) + option(ENABLE_DTRACE "enable dtrace support" "") option(ENABLE_TESTING "build libdispatch tests" ON) diff --git a/src/unifdef.x6WQcv b/src/unifdef.9TLn7R similarity index 100% rename from src/unifdef.x6WQcv rename to src/unifdef.9TLn7R From 6a3a8780b08e3b0f5a8adc0c4db21bee773fe203 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:51:42 -0700 Subject: [PATCH 096/249] TSD: correct FlsAlloc check FlsAlloc returns FLS_OUT_OF_INDEXES in an error scenario, everything else is a valid result. Correct the assertion. Signed-off-by: Kim Topley --- src/shims/tsd.h | 3 ++- src/{unifdef.9TLn7R => unifdef.FCywgs} | 0 2 files changed, 2 insertions(+), 1 deletion(-) rename src/{unifdef.9TLn7R => unifdef.FCywgs} (100%) diff --git a/src/shims/tsd.h b/src/shims/tsd.h index f44d7c863..446c4d796 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -112,7 +112,8 @@ DISPATCH_TSD_INLINE static inline void _dispatch_thread_key_create(DWORD *k, void (DISPATCH_TSD_DTOR_CC *d)(void *)) { - dispatch_assert_zero((*k = FlsAlloc(d))); + *k = FlsAlloc(d); + dispatch_assert(*k != FLS_OUT_OF_INDEXES); } extern DWORD __dispatch_tsd_key; diff --git a/src/unifdef.9TLn7R b/src/unifdef.FCywgs similarity index 100% rename from src/unifdef.9TLn7R rename to src/unifdef.FCywgs From 9da4d23362aac8d91e11e41208c8cfc81f9e5581 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:53:46 -0700 Subject: [PATCH 097/249] shims: correct _dispatch_get_nanoseconds on Windows The NT time representation uses 1/1/1601 as epoch as opposed to Unix which uses 1/1/1970. Account for the offset in the calculation. This corrects the calculation of now. Signed-off-by: Kim Topley --- src/shims/time.h | 3 ++- src/{unifdef.FCywgs => unifdef.eNWOmm} | 0 2 files changed, 2 insertions(+), 1 deletion(-) rename src/{unifdef.FCywgs => unifdef.eNWOmm} (100%) diff --git a/src/shims/time.h b/src/shims/time.h index 063d52397..2b5a6e940 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -108,13 +108,14 @@ _dispatch_get_nanoseconds(void) dispatch_assume_zero(clock_gettime(CLOCK_REALTIME, &ts)); return _dispatch_timespec_to_nano(ts); #elif defined(_WIN32) + static const uint64_t kNTToUNIXBiasAdjustment = 11644473600 * NSEC_PER_SEC; // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC). FILETIME ft; ULARGE_INTEGER li; GetSystemTimeAsFileTime(&ft); li.LowPart = ft.dwLowDateTime; li.HighPart = ft.dwHighDateTime; - return li.QuadPart * 100ull; + return li.QuadPart * 100ull - kNTToUNIXBiasAdjustment; #else struct timeval tv; dispatch_assert_zero(gettimeofday(&tv, NULL)); diff --git a/src/unifdef.FCywgs b/src/unifdef.eNWOmm similarity index 100% rename from src/unifdef.FCywgs rename to src/unifdef.eNWOmm From 0e4894bdcb561fae687c6f1ace6d4e42ff910888 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:55:22 -0700 Subject: [PATCH 098/249] event: support CLOCK_WALL on Windows This is needed for XCTest to run its test suite. Signed-off-by: Kim Topley --- src/event/event_windows.c | 12 +++++------- src/{unifdef.eNWOmm => unifdef.hzLX9X} | 0 2 files changed, 5 insertions(+), 7 deletions(-) rename src/{unifdef.eNWOmm => unifdef.hzLX9X} (100%) diff --git a/src/event/event_windows.c b/src/event/event_windows.c index f662f5d15..7a3f5ae96 100644 --- a/src/event/event_windows.c +++ b/src/event/event_windows.c @@ -104,7 +104,7 @@ _dispatch_timer_callback(PTP_CALLBACK_INSTANCE Instance, PVOID Context, void _dispatch_event_loop_timer_arm(uint32_t tidx, dispatch_timer_delay_s range, - dispatch_clock_now_cache_t nows DISPATCH_UNUSED) + dispatch_clock_now_cache_t nows) { dispatch_windows_timeout_t timer; FILETIME ftDueTime; @@ -113,9 +113,9 @@ _dispatch_event_loop_timer_arm(uint32_t tidx, switch (DISPATCH_TIMER_CLOCK(tidx)) { case DISPATCH_CLOCK_WALL: timer = &_dispatch_windows_timeout[DISPATCH_CLOCK_WALL]; - - WIN_PORT_ERROR(); - __assume(0); + liTime.QuadPart = range.delay + + _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows); + break; case DISPATCH_CLOCK_UPTIME: case DISPATCH_CLOCK_MONOTONIC: @@ -149,9 +149,7 @@ _dispatch_event_loop_timer_delete(uint32_t tidx) switch (DISPATCH_TIMER_CLOCK(tidx)) { case DISPATCH_CLOCK_WALL: timer = &_dispatch_windows_timeout[DISPATCH_CLOCK_WALL]; - - WIN_PORT_ERROR(); - __assume(0); + break; case DISPATCH_CLOCK_UPTIME: case DISPATCH_CLOCK_MONOTONIC: diff --git a/src/unifdef.eNWOmm b/src/unifdef.hzLX9X similarity index 100% rename from src/unifdef.eNWOmm rename to src/unifdef.hzLX9X From 4985807f6102c6b5b9437d27c0ddb5ed2bed0a94 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:55:52 -0700 Subject: [PATCH 099/249] time: use GetSystemTimePreceiseAsFileTime on Windows Use the `GetSystemTimePreciseAsFileTime` rather than `GetSystemTimeAsFileTime` as we may otherwise get a cached value from the last context switch. Signed-off-by: Kim Topley --- src/shims/time.h | 2 +- src/{unifdef.hzLX9X => unifdef.czudg7} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/{unifdef.hzLX9X => unifdef.czudg7} (100%) diff --git a/src/shims/time.h b/src/shims/time.h index 2b5a6e940..3502a8481 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -112,7 +112,7 @@ _dispatch_get_nanoseconds(void) // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC). FILETIME ft; ULARGE_INTEGER li; - GetSystemTimeAsFileTime(&ft); + GetSystemTimePreciseAsFileTime(&ft); li.LowPart = ft.dwLowDateTime; li.HighPart = ft.dwHighDateTime; return li.QuadPart * 100ull - kNTToUNIXBiasAdjustment; diff --git a/src/unifdef.hzLX9X b/src/unifdef.czudg7 similarity index 100% rename from src/unifdef.hzLX9X rename to src/unifdef.czudg7 From 3f2ffe02e0e49273d7812fe8b52396f0720d70f3 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:58:30 -0700 Subject: [PATCH 100/249] lock: adopt the POSIX behaviour on Windows Ideally we would do the lazy semaphore approach that Mach does. Unfortunately, the queue wasn't getting drained as in that case. A follow up should investigate and fix the lazy allocation. Signed-off-by: Kim Topley --- src/shims/lock.c | 5 ++--- src/shims/lock.h | 6 +++--- src/{unifdef.czudg7 => unifdef.UIEEfR} | 0 3 files changed, 5 insertions(+), 6 deletions(-) rename src/{unifdef.czudg7 => unifdef.UIEEfR} (100%) diff --git a/src/shims/lock.c b/src/shims/lock.c index ccc4e1cbc..35d77414b 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -260,8 +260,7 @@ _pop_timer_resolution(DWORD ms) if (ms) timeEndPeriod(ms); } -void -_dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy DISPATCH_UNUSED) +void _dispatch_sema4_init(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED) { HANDLE tmp; @@ -271,7 +270,7 @@ _dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy DISPATCH_UNUSED) _dispatch_temporary_resource_shortage(); } - if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) { + if (!os_atomic_cmpxchg(sema, 0, tmp, relaxed)) { CloseHandle(tmp); } } diff --git a/src/shims/lock.h b/src/shims/lock.h index f32ca5057..a05dd1152 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -213,9 +213,9 @@ typedef HANDLE _dispatch_sema4_t; #define _DSEMA4_POLICY_LIFO 0 #define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1) -#define _dispatch_sema4_init(sema, policy) (void)(*(sema) = 0) -#define _dispatch_sema4_is_created(sema) (*(sema) != 0) -void _dispatch_sema4_create_slow(_dispatch_sema4_t *sema, int policy); +void _dispatch_sema4_init(_dispatch_sema4_t *sema, int policy); +#define _dispatch_sema4_is_created(sema) ((void)sema, 1) +#define _dispatch_sema4_create_slow(sema, policy) ((void)sema, (void)policy) #else #error "port has to implement _dispatch_sema4_t" diff --git a/src/unifdef.czudg7 b/src/unifdef.UIEEfR similarity index 100% rename from src/unifdef.czudg7 rename to src/unifdef.UIEEfR From a191e33fd42fc2c7de56fbb3460552cafa496f59 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 17:59:29 -0700 Subject: [PATCH 101/249] queue: expose `_dispatch_get_main_queue_port_4CF` This was being re-implemented in CF by means of a macro. Just expose the function instead. Signed-off-by: Kim Topley --- private/private.h | 3 --- src/queue.c | 2 -- src/{unifdef.UIEEfR => unifdef.CtdWrJ} | 2 -- 3 files changed, 7 deletions(-) rename src/{unifdef.UIEEfR => unifdef.CtdWrJ} (99%) diff --git a/private/private.h b/private/private.h index 1e5a4c24d..b40a36c0f 100644 --- a/private/private.h +++ b/private/private.h @@ -198,12 +198,10 @@ typedef void *dispatch_runloop_handle_t; #error "runloop support not implemented on this platform" #endif -#if TARGET_OS_MAC API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_runloop_handle_t _dispatch_get_main_queue_port_4CF(void); -#endif API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NOTHROW @@ -222,7 +220,6 @@ dispatch_queue_serial_t _dispatch_runloop_root_queue_create_4CF(const char *_Nullable label, unsigned long flags); -#if TARGET_OS_MAC || defined(_WIN32) API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_runloop_handle_t diff --git a/src/queue.c b/src/queue.c index bfa75ad41..6118ffb47 100644 --- a/src/queue.c +++ b/src/queue.c @@ -6832,13 +6832,11 @@ _dispatch_get_main_queue_handle_4CF(void) return _dispatch_runloop_queue_get_handle(dq->_as_dl); } -#if TARGET_OS_MAC dispatch_runloop_handle_t _dispatch_get_main_queue_port_4CF(void) { return _dispatch_get_main_queue_handle_4CF(); } -#endif void _dispatch_main_queue_callback_4CF( diff --git a/src/unifdef.UIEEfR b/src/unifdef.CtdWrJ similarity index 99% rename from src/unifdef.UIEEfR rename to src/unifdef.CtdWrJ index fe62dd7bf..29d8ac67c 100644 --- a/src/unifdef.UIEEfR +++ b/src/unifdef.CtdWrJ @@ -7438,13 +7438,11 @@ _dispatch_get_main_queue_handle_4CF(void) return _dispatch_runloop_queue_get_handle(dq->_as_dl); } -#if TARGET_OS_MAC dispatch_runloop_handle_t _dispatch_get_main_queue_port_4CF(void) { return _dispatch_get_main_queue_handle_4CF(); } -#endif void _dispatch_main_queue_callback_4CF( From 8fcf7df0302bd58800c2b41473f28ba9bb5d5cf6 Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Mon, 18 Mar 2019 18:17:58 -0700 Subject: [PATCH 102/249] Optimize Windows contention performance The dispatch_cascade test is very slow on Windows right now (often taking 4-5 minutes to execute on my machine and sometimes locking it up entirely) because contention is not being handled correctly. `_dispatch_contention_usleep()` is expected to put the thread to sleep, but on Windows it spins on `QueryPerformanceCounter()`. This is causing a huge amount of starvation in the dispatch_cascade test. Implement it using `Sleep()`, and accordingly adjust `DISPATCH_CONTENTION_USLEEP_START` to be 1ms on Windows. Additionally, `_dispatch_contention_spins()` is currently implemented using the `rand_s()` function. This is slow (and experimentally seems to be slower than not randomizing the spin count at all!) because `rand_s()` guarantees cryptographic security, which is unnecessary for dispatch's use case. Replace it with a basic linear congruential generator (from K&R) since there isn't any other `rand_r()` equivalent. Based on the average wall clock times reported by bsdtestharness, this is around 35% faster on my PC (i7-8700K). These changes bring the runtime of the dispatch_cascade test down to around 1-2s at most for me. (It's even faster than this if stdout isn't a console window because that slows down the histogram display.) Signed-off-by: Kim Topley --- src/internal.h | 3 --- src/shims/yield.h | 32 +++++++++++--------------- src/{unifdef.CtdWrJ => unifdef.gmOBfK} | 0 3 files changed, 14 insertions(+), 21 deletions(-) rename src/{unifdef.CtdWrJ => unifdef.gmOBfK} (100%) diff --git a/src/internal.h b/src/internal.h index 775fd7249..990bfbe4a 100644 --- a/src/internal.h +++ b/src/internal.h @@ -315,9 +315,6 @@ upcast(dispatch_object_t dou) #include #include #include -#if defined(_WIN32) -#define _CRT_RAND_S -#endif #include #include #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) diff --git a/src/shims/yield.h b/src/shims/yield.h index 0f1db8747..a9cf5d6e0 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -99,10 +99,15 @@ void *_dispatch_wait_for_enqueuer(void **ptr); ((DISPATCH_CONTENTION_SPINS_MIN) + ((DISPATCH_CONTENTION_SPINS_MAX) - \ (DISPATCH_CONTENTION_SPINS_MIN)) / 2) #elif defined(_WIN32) -#define _dispatch_contention_spins() ({ \ - unsigned int _value; \ - rand_s(&_value); \ - (_value & DISPATCH_CONTENTION_SPINS_MAX) | DISPATCH_CONTENTION_SPINS_MIN; }) +// Use randomness to prevent threads from resonating at the same frequency and +// permanently contending. Windows doesn't provide rand_r(), so use a simple +// LCG. (msvcrt has rand_s(), but its security guarantees aren't optimal here.) +#define _dispatch_contention_spins() ({ \ + static os_atomic(unsigned int) _seed = 1; \ + unsigned int _next = os_atomic_load(&_seed, relaxed); \ + os_atomic_store(&_seed, _next * 1103515245 + 12345, relaxed); \ + ((_next >> 24) & (DISPATCH_CONTENTION_SPINS_MAX)) | \ + (DISPATCH_CONTENTION_SPINS_MIN); }) #else // Use randomness to prevent threads from resonating at the same // frequency and permanently contending. @@ -160,8 +165,12 @@ void *_dispatch_wait_for_enqueuer(void **ptr); #pragma mark _dispatch_contention_usleep #ifndef DISPATCH_CONTENTION_USLEEP_START +#if defined(_WIN32) +#define DISPATCH_CONTENTION_USLEEP_START 1000 // Must be >= 1ms for Sleep() +#else #define DISPATCH_CONTENTION_USLEEP_START 500 #endif +#endif #ifndef DISPATCH_CONTENTION_USLEEP_MAX #define DISPATCH_CONTENTION_USLEEP_MAX 100000 #endif @@ -176,20 +185,7 @@ void *_dispatch_wait_for_enqueuer(void **ptr); #endif #else #if defined(_WIN32) -DISPATCH_INLINE void -_dispatch_contention_usleep(uint64_t useconds) { - static BOOL bQPFExecuted = FALSE; - static LARGE_INTEGER liFreq; - LARGE_INTEGER liStart, liNow; - - if (!bQPFExecuted) - bQPFExecuted = QueryPerformanceFrequency(&liFreq); - - QueryPerformanceCounter(&liStart); - do { - QueryPerformanceCounter(&liNow); - } while ((liNow.QuadPart - liStart.QuadPart) / (float)liFreq.QuadPart * 1000 * 1000 < useconds); -} +#define _dispatch_contention_usleep(u) Sleep((u) / 1000) #else #define _dispatch_contention_usleep(u) usleep((u)) #endif diff --git a/src/unifdef.CtdWrJ b/src/unifdef.gmOBfK similarity index 100% rename from src/unifdef.CtdWrJ rename to src/unifdef.gmOBfK From 833974663549d6d70a8f59ad3a22ab360dd37e93 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 18:28:15 -0700 Subject: [PATCH 103/249] shims: flesh out generic_sys_queue.h further The 1121 merge introduced additional usage of sys/queue.h. Flesh out the shims header further to repair the build on Windows. Signed-off-by: Kim Topley --- src/shims/generic_sys_queue.h | 42 ++++++++++++++++++++++++++ src/{unifdef.gmOBfK => unifdef.yco40O} | 0 2 files changed, 42 insertions(+) rename src/{unifdef.gmOBfK => unifdef.yco40O} (100%) diff --git a/src/shims/generic_sys_queue.h b/src/shims/generic_sys_queue.h index 1d9a18d0f..c6c658706 100644 --- a/src/shims/generic_sys_queue.h +++ b/src/shims/generic_sys_queue.h @@ -89,4 +89,46 @@ } \ } while(0) +#define TAILQ_HEAD_INITIALIZER(head) \ + { NULL, (head).tq_first } + +#define TAILQ_CONCAT(head1, head2, field) do { \ + if (!TAILQ_EMPTY(head2)) { \ + (head1)->tq_last = (head2)->tq_first; \ + (head1)->tq_first->field.te_prev = (head1)->tq_last; \ + (head1)->tq_last = (head2)->tq_last; \ + TAILQ_INIT((head2)); \ + } \ + } while (0) + +#define LIST_HEAD(name, type) struct name { \ + struct type *lh_first; \ + } + +#define LIST_ENTRY(type) struct { \ + struct type *le_next; \ + struct type *le_prev; \ + } + +#define LIST_FIRST(head) ((head)->lh_first) + +#define LIST_FOREACH(var, head, field) \ + for ((var) = LIST_FIRST((head)); \ + (var); \ + (var) = LIST_NEXT((var), field)) + +#define LIST_NEXT(elm, field) ((elm)->field.le_next) + +#define LIST_REMOVE(elm, field) do { \ + if (LIST_NEXT((elm), field) != NULL) \ + LIST_NEXT((elm), field)->field.le_prev = (elm)->field.le_prev; \ + } while (0) + +#define LIST_INSERT_HEAD(head, elm, field) do { \ + if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ + LIST_FIRST((head))->field.le_prev = LIST_NEXT((elm), field); \ + LIST_FIRST((head)) = (elm); \ + (elm)->field.le_prev = LIST_FIRST((head)); \ + } while (0) + #endif // __DISPATCH_SHIMS_SYS_QUEUE__ diff --git a/src/unifdef.gmOBfK b/src/unifdef.yco40O similarity index 100% rename from src/unifdef.gmOBfK rename to src/unifdef.yco40O From 2d7c429faa3c950f4fa9cadddcee48556cd2a2dc Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 18:36:35 -0700 Subject: [PATCH 104/249] shims: correct `_dispatch_uptime` for Windows This should use `QueryUnbiasedInterruptTime` `QueryPerformanceCounter` is roughly in units of of TSC frequency. We would need to use `QueryPerformanceFrequency` to convert that into nanoseconds. Additionally, `QueryPerformanceCounter` includes time spent during suspend, which is not what is desired. Switch to `QueryUnbiasedInterruptTime` to get something close to `CLOCK_MONOTONIC` on Linux. Signed-off-by: Kim Topley --- src/shims/time.h | 7 ++++--- src/{unifdef.yco40O => unifdef.uYXNEi} | 0 2 files changed, 4 insertions(+), 3 deletions(-) rename src/{unifdef.yco40O => unifdef.uYXNEi} (100%) diff --git a/src/shims/time.h b/src/shims/time.h index 3502a8481..8fae5a2f2 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -149,9 +149,10 @@ _dispatch_uptime(void) struct timespec ts; dispatch_assume_zero(clock_gettime(CLOCK_UPTIME, &ts)); return _dispatch_timespec_to_nano(ts); -#elif TARGET_OS_WIN32 - LARGE_INTEGER now; - return QueryPerformanceCounter(&now) ? now.QuadPart : 0; +#elif defined(_WIN32) + ULONGLONG ullUnbiasedTime; + QueryUnbiasedInterruptTime(&ullUnbiasedTime); + return ullUnbiasedTime * 100; #else #error platform needs to implement _dispatch_uptime() #endif diff --git a/src/unifdef.yco40O b/src/unifdef.uYXNEi similarity index 100% rename from src/unifdef.yco40O rename to src/unifdef.uYXNEi From e2d01f850e357a79687997cb2e8bed590320f91a Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 19:43:08 -0700 Subject: [PATCH 105/249] benchmark: disable 128-bit math on Windows Windows does not support 128-bit division, disable the support for now. One option is to statically link compiler-rt to provide `__udivti3`. Signed-off-by: Kim Topley --- src/benchmark.c | 4 ++-- src/{unifdef.uYXNEi => unifdef.g2Y5qV} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename src/{unifdef.uYXNEi => unifdef.g2Y5qV} (100%) diff --git a/src/benchmark.c b/src/benchmark.c index 2b1edb76a..15e9f5535 100644 --- a/src/benchmark.c +++ b/src/benchmark.c @@ -41,7 +41,7 @@ _dispatch_benchmark_init(void *context) register size_t cnt = bdata->count; size_t i = 0; uint64_t start, delta; -#if DISPATCH_SIZEOF_PTR == 8 +#if DISPATCH_SIZEOF_PTR == 8 && !defined(_WIN32) __uint128_t lcost; #else long double lcost; @@ -93,7 +93,7 @@ dispatch_benchmark_f(size_t count, register void *ctxt, }; static dispatch_once_t pred; uint64_t ns, start, delta; -#if DISPATCH_SIZEOF_PTR == 8 +#if DISPATCH_SIZEOF_PTR == 8 && !defined(_WIN32) __uint128_t conversion, big_denom; #else long double conversion, big_denom; diff --git a/src/unifdef.uYXNEi b/src/unifdef.g2Y5qV similarity index 100% rename from src/unifdef.uYXNEi rename to src/unifdef.g2Y5qV From eb989df03bc6f29d6f7f3c3f17946530443baedf Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 18 Mar 2019 19:46:27 -0700 Subject: [PATCH 106/249] resolve 1121 merge conflicts This addresses the updates that were not applied to the Windows port but were applied to the Linux port and the macOS port. Signed-off-by: Kim Topley --- src/event/event_windows.c | 26 +++++++++++++++----------- src/event/workqueue.c | 2 -- src/io.c | 6 +++--- src/queue.c | 7 ++++++- src/shims/generic_win_stubs.h | 2 ++ src/shims/lock.c | 6 ++++-- src/shims/yield.h | 4 ++-- src/{unifdef.g2Y5qV => unifdef.283QS8} | 8 +++++++- 8 files changed, 39 insertions(+), 22 deletions(-) rename src/{unifdef.g2Y5qV => unifdef.283QS8} (99%) diff --git a/src/event/event_windows.c b/src/event/event_windows.c index 7a3f5ae96..1e3fae7db 100644 --- a/src/event/event_windows.c +++ b/src/event/event_windows.c @@ -32,23 +32,20 @@ enum _dispatch_windows_port { #pragma mark dispatch_unote_t bool -_dispatch_unote_register(dispatch_unote_t du DISPATCH_UNUSED, - dispatch_wlh_t wlh DISPATCH_UNUSED, - dispatch_priority_t pri DISPATCH_UNUSED) +_dispatch_unote_register_muxed(dispatch_unote_t du DISPATCH_UNUSED) { WIN_PORT_ERROR(); return false; } void -_dispatch_unote_resume(dispatch_unote_t du DISPATCH_UNUSED) +_dispatch_unote_resume_muxed(dispatch_unote_t du DISPATCH_UNUSED) { WIN_PORT_ERROR(); } bool -_dispatch_unote_unregister(dispatch_unote_t du DISPATCH_UNUSED, - uint32_t flags DISPATCH_UNUSED) +_dispatch_unote_unregister_muxed(dispatch_unote_t du DISPATCH_UNUSED) { WIN_PORT_ERROR(); return false; @@ -102,8 +99,8 @@ _dispatch_timer_callback(PTP_CALLBACK_INSTANCE Instance, PVOID Context, } void -_dispatch_event_loop_timer_arm(uint32_t tidx, - dispatch_timer_delay_s range, +_dispatch_event_loop_timer_arm(dispatch_timer_heap_t dth DISPATCH_UNUSED, + uint32_t tidx, dispatch_timer_delay_s range, dispatch_clock_now_cache_t nows) { dispatch_windows_timeout_t timer; @@ -142,7 +139,8 @@ _dispatch_event_loop_timer_arm(uint32_t tidx, } void -_dispatch_event_loop_timer_delete(uint32_t tidx) +_dispatch_event_loop_timer_delete(dispatch_timer_heap_t dth DISPATCH_UNUSED, + uint32_t tidx) { dispatch_windows_timeout_t timer; @@ -238,6 +236,12 @@ _dispatch_event_loop_drain(uint32_t flags) } } +void +_dispatch_event_loop_cancel_waiter(dispatch_sync_context_t dsc DISPATCH_UNUSED) +{ + WIN_PORT_ERROR(); +} + void _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state) @@ -269,9 +273,9 @@ _dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh) #endif void -_dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state) +_dispatch_event_loop_leave_immediate(uint64_t dq_state) { - (void)wlh; (void)dq_state; + (void)dq_state; } #endif // DISPATCH_EVENT_BACKEND_WINDOWS diff --git a/src/event/workqueue.c b/src/event/workqueue.c index dc020f32a..28f167517 100644 --- a/src/event/workqueue.c +++ b/src/event/workqueue.c @@ -97,7 +97,6 @@ _dispatch_workq_worker_register(dispatch_queue_global_t root_q) _dispatch_unfair_lock_unlock(&mon->registered_tid_lock); #else (void)root_q; - (void)cls; #endif // HAVE_DISPATCH_WORKQ_MONITORING } @@ -124,7 +123,6 @@ _dispatch_workq_worker_unregister(dispatch_queue_global_t root_q) _dispatch_unfair_lock_unlock(&mon->registered_tid_lock); #else (void)root_q; - (void)cls; #endif // HAVE_DISPATCH_WORKQ_MONITORING } diff --git a/src/io.c b/src/io.c index f5be3a6fc..9c0fff83d 100644 --- a/src/io.c +++ b/src/io.c @@ -1394,11 +1394,11 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) int result = ioctlsocket((SOCKET)fd, (long)FIONBIO, &value); (void)dispatch_assume_zero(result); _dispatch_stream_init(fd_entry, - _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)); + _dispatch_get_default_queue(false)); } else { dispatch_suspend(fd_entry->barrier_queue); - dispatch_once_f(&_dispatch_io_devs_lockq_pred, NULL, - _dispatch_io_devs_lockq_init); + dispatch_once_f(&_dispatch_io_init_pred, NULL, + _dispatch_io_queues_init); dispatch_async(_dispatch_io_devs_lockq, ^{ _dispatch_disk_init(fd_entry, 0); dispatch_resume(fd_entry->barrier_queue); diff --git a/src/queue.c b/src/queue.c index 6118ffb47..a34d7f4d3 100644 --- a/src/queue.c +++ b/src/queue.c @@ -3574,6 +3574,7 @@ _dispatch_queue_drain_should_narrow_slow(uint64_t now, } #endif // TARGET_OS_MAC pthread_attr_destroy(&attr); +#endif // defined(_POSIX_THREADS) } void @@ -5569,7 +5570,9 @@ static void _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) { int remaining = n; +#if !defined(_WIN32) int r = ENOSYS; +#endif _dispatch_root_queues_init(); _dispatch_debug_root_queue(dq, __func__); @@ -5667,9 +5670,11 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) } _dispatch_temporary_resource_shortage(); } +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES if (_dispatch_mgr_sched.prio > _dispatch_mgr_sched.default_prio) { (void)dispatch_assume_zero(SetThreadPriority((HANDLE)hThread, _dispatch_mgr_sched.prio) == TRUE); } +#endif CloseHandle((HANDLE)hThread); } while (--remaining); #endif // defined(_WIN32) @@ -6904,7 +6909,7 @@ _dispatch_sig_thread(void *ctxt DISPATCH_UNUSED) // never returns, so burn bridges behind us _dispatch_clear_stack(0); #if defined(_WIN32) - for (;;) SuspendThread(GetCurrentThread()); + Sleep(INFINITE); #else _dispatch_sigsuspend(); #endif diff --git a/src/shims/generic_win_stubs.h b/src/shims/generic_win_stubs.h index c983cdcee..1ce41f7ec 100644 --- a/src/shims/generic_win_stubs.h +++ b/src/shims/generic_win_stubs.h @@ -34,4 +34,6 @@ typedef __typeof__(_Generic((__SIZE_TYPE__)0, \ #define WIN_PORT_ERROR() \ _RPTF1(_CRT_ASSERT, "WIN_PORT_ERROR in %s", __FUNCTION__) +#define strcasecmp _stricmp + #endif diff --git a/src/shims/lock.c b/src/shims/lock.c index 35d77414b..e96408981 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -508,7 +508,7 @@ _dispatch_wait_on_address(uint32_t volatile *_address, uint32_t value, } return _dispatch_futex_wait(address, value, NULL, FUTEX_PRIVATE_FLAG); #elif defined(_WIN32) - WaitOnAddress(address, &value, sizeof(value), INFINITE); + return WaitOnAddress(address, &value, sizeof(value), INFINITE) == TRUE; #else #error _dispatch_wait_on_address unimplemented for this platform #endif @@ -649,7 +649,9 @@ _dispatch_once_wait(dispatch_once_gate_t dgo) { dispatch_lock self = _dispatch_lock_value_for_self(); uintptr_t old_v, new_v; +#if HAVE_UL_UNFAIR_LOCK || HAVE_FUTEX dispatch_lock *lock = &dgo->dgo_gate.dgl_lock; +#endif uint32_t timeout = 1; for (;;) { @@ -678,7 +680,7 @@ _dispatch_once_wait(dispatch_once_gate_t dgo) _dispatch_futex_wait(lock, (dispatch_lock)new_v, NULL, FUTEX_PRIVATE_FLAG); #else - _dispatch_thread_switch(new_v, flags, timeout++); + _dispatch_thread_switch(new_v, 0, timeout++); #endif (void)timeout; } diff --git a/src/shims/yield.h b/src/shims/yield.h index a9cf5d6e0..382ad42c9 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -154,8 +154,8 @@ void *_dispatch_wait_for_enqueuer(void **ptr); #define _dispatch_preemption_yield(n) { (void)n; pthread_yield_np(); } #define _dispatch_preemption_yield_to(th, n) { (void)n; pthread_yield_np(); } #elif defined(_WIN32) -#define _dispatch_preemption_yield(n) { (void)n; sched_yield(); } -#define _dispatch_preemption_yield_to(th, n) { (void)n; sched_yield(); } +#define _dispatch_preemption_yield(n) { (void)n; Sleep(0); } +#define _dispatch_preemption_yield_to(th, n) { (void)n; Sleep(0); } #else #define _dispatch_preemption_yield(n) { (void)n; pthread_yield(); } #define _dispatch_preemption_yield_to(th, n) { (void)n; pthread_yield(); } diff --git a/src/unifdef.g2Y5qV b/src/unifdef.283QS8 similarity index 99% rename from src/unifdef.g2Y5qV rename to src/unifdef.283QS8 index 29d8ac67c..0ce4e283f 100644 --- a/src/unifdef.g2Y5qV +++ b/src/unifdef.283QS8 @@ -4136,6 +4136,7 @@ static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue static void _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) { +#if defined(_POSIX_THREADS) dispatch_workloop_attr_t dwla = dwl->dwl_attr; pthread_attr_t attr; @@ -4180,6 +4181,7 @@ _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) } #endif // TARGET_OS_MAC pthread_attr_destroy(&attr); +#endif // defined(_POSIX_THREADS) } void @@ -6175,7 +6177,9 @@ static void _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) { int remaining = n; +#if !defined(_WIN32) int r = ENOSYS; +#endif _dispatch_root_queues_init(); _dispatch_debug_root_queue(dq, __func__); @@ -6273,9 +6277,11 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) } _dispatch_temporary_resource_shortage(); } +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES if (_dispatch_mgr_sched.prio > _dispatch_mgr_sched.default_prio) { (void)dispatch_assume_zero(SetThreadPriority((HANDLE)hThread, _dispatch_mgr_sched.prio) == TRUE); } +#endif CloseHandle((HANDLE)hThread); } while (--remaining); #endif // defined(_WIN32) @@ -7510,7 +7516,7 @@ _dispatch_sig_thread(void *ctxt DISPATCH_UNUSED) // never returns, so burn bridges behind us _dispatch_clear_stack(0); #if defined(_WIN32) - for (;;) SuspendThread(GetCurrentThread()); + Sleep(INFINITE); #else _dispatch_sigsuspend(); #endif From c54cbcaf92d6115f96e3a61e4e8875a3d3a59f7c Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 20 Mar 2019 07:16:54 -0700 Subject: [PATCH 107/249] Merge pull request #453 from compnerd/windows-improvements Windows improvements Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.283QS8 => unifdef.80ILBh} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.283QS8 => unifdef.80ILBh} (100%) diff --git a/PATCHES b/PATCHES index 5693ca8a6..cb894ef6b 100644 --- a/PATCHES +++ b/PATCHES @@ -474,3 +474,4 @@ github commits starting with 29bdc2f from [ff3bf51] APPLIED rdar://54572081 [bc00e13] APPLIED rdar://54572081 [d44acc0] APPLIED rdar://54572081 +[4659503] APPLIED rdar://54572081 diff --git a/src/unifdef.283QS8 b/src/unifdef.80ILBh similarity index 100% rename from src/unifdef.283QS8 rename to src/unifdef.80ILBh From 97e5dc47880f1498c35c2ec3739161dbecd45e1b Mon Sep 17 00:00:00 2001 From: Frederik Seiffert Date: Tue, 19 Mar 2019 12:02:40 +0100 Subject: [PATCH 108/249] Fixed printf format in _dispatch_semaphore_debug() dsema_value is of type long, and dsema_orig of type intptr_t. Signed-off-by: Kim Topley --- src/semaphore.c | 2 +- src/{unifdef.80ILBh => unifdef.lbqRrC} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/{unifdef.80ILBh => unifdef.lbqRrC} (100%) diff --git a/src/semaphore.c b/src/semaphore.c index 222d872ec..69da75f97 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -76,7 +76,7 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) dsema->dsema_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, - "value = %" PRId64 ", orig = %" PRId64 " }", dsema->dsema_value, dsema->dsema_orig); + "value = %ld, orig = %" PRIdPTR " }", dsema->dsema_value, dsema->dsema_orig); return offset; } diff --git a/src/unifdef.80ILBh b/src/unifdef.lbqRrC similarity index 100% rename from src/unifdef.80ILBh rename to src/unifdef.lbqRrC From d6a0bba64a164236e29d2a4432e50d4bf766fbd5 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 20 Mar 2019 07:18:04 -0700 Subject: [PATCH 109/249] Merge pull request #448 from triplef/fix-printf-ptr Fixed printf format macro for intptr_t arguments. Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.lbqRrC => unifdef.mScwXz} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.lbqRrC => unifdef.mScwXz} (100%) diff --git a/PATCHES b/PATCHES index cb894ef6b..ea0882af3 100644 --- a/PATCHES +++ b/PATCHES @@ -475,3 +475,4 @@ github commits starting with 29bdc2f from [bc00e13] APPLIED rdar://54572081 [d44acc0] APPLIED rdar://54572081 [4659503] APPLIED rdar://54572081 +[60fdf80] APPLIED rdar://54572081 diff --git a/src/unifdef.lbqRrC b/src/unifdef.mScwXz similarity index 100% rename from src/unifdef.lbqRrC rename to src/unifdef.mScwXz From e5308a4da199781525a22065c37f0ff9fea74b84 Mon Sep 17 00:00:00 2001 From: Frederik Seiffert Date: Tue, 19 Mar 2019 18:21:05 +0100 Subject: [PATCH 110/249] Added missing private headers to cmake install. These are included by private.h and should be installed alongside. Signed-off-by: Kim Topley --- private/CMakeLists.txt | 2 ++ src/{unifdef.mScwXz => unifdef.NeTZJq} | 0 2 files changed, 2 insertions(+) rename src/{unifdef.mScwXz => unifdef.NeTZJq} (100%) diff --git a/private/CMakeLists.txt b/private/CMakeLists.txt index a2ee9bdd9..f77a92d41 100644 --- a/private/CMakeLists.txt +++ b/private/CMakeLists.txt @@ -14,6 +14,8 @@ if (INSTALL_PRIVATE_HEADERS) private.h queue_private.h source_private.h + time_private.h + workloop_private.h DESTINATION "${INSTALL_DISPATCH_HEADERS_DIR}") endif() diff --git a/src/unifdef.mScwXz b/src/unifdef.NeTZJq similarity index 100% rename from src/unifdef.mScwXz rename to src/unifdef.NeTZJq From e17e1be8b4d8f5ea9be35e57060e9217f9a669a2 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 20 Mar 2019 07:18:56 -0700 Subject: [PATCH 111/249] Merge pull request #460 from triplef/fix-cmake-private-install Added missing private headers to cmake install. Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.NeTZJq => unifdef.dS34ML} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.NeTZJq => unifdef.dS34ML} (100%) diff --git a/PATCHES b/PATCHES index ea0882af3..e31aad2d9 100644 --- a/PATCHES +++ b/PATCHES @@ -476,3 +476,4 @@ github commits starting with 29bdc2f from [d44acc0] APPLIED rdar://54572081 [4659503] APPLIED rdar://54572081 [60fdf80] APPLIED rdar://54572081 +[7a74af4] APPLIED rdar://54572081 diff --git a/src/unifdef.NeTZJq b/src/unifdef.dS34ML similarity index 100% rename from src/unifdef.NeTZJq rename to src/unifdef.dS34ML From b7162835a4b50f0f16cc3ca2c02cbe6df1f5695b Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 20 Mar 2019 14:32:48 -0700 Subject: [PATCH 112/249] Merge pull request #457 from triplef/fix-signatures Fixed conflicting method signature for _dispatch_install_thread_detach_callback() Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.dS34ML => unifdef.2qxfve} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.dS34ML => unifdef.2qxfve} (100%) diff --git a/PATCHES b/PATCHES index e31aad2d9..9588ea79d 100644 --- a/PATCHES +++ b/PATCHES @@ -477,3 +477,4 @@ github commits starting with 29bdc2f from [4659503] APPLIED rdar://54572081 [60fdf80] APPLIED rdar://54572081 [7a74af4] APPLIED rdar://54572081 +[f20349f] APPLIED rdar://54572081 diff --git a/src/unifdef.dS34ML b/src/unifdef.2qxfve similarity index 100% rename from src/unifdef.dS34ML rename to src/unifdef.2qxfve From d14663f48c703fd9037a3f66c7283125615ffd75 Mon Sep 17 00:00:00 2001 From: Frederik Seiffert Date: Wed, 20 Mar 2019 16:38:22 +0100 Subject: [PATCH 113/249] Fix printf format macro for `dsema_value` Signed-off-by: Kim Topley --- src/semaphore.c | 2 +- src/{unifdef.2qxfve => unifdef.tWFV9f} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/{unifdef.2qxfve => unifdef.tWFV9f} (100%) diff --git a/src/semaphore.c b/src/semaphore.c index 69da75f97..b706b0b88 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -76,7 +76,7 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) dsema->dsema_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, - "value = %ld, orig = %" PRIdPTR " }", dsema->dsema_value, dsema->dsema_orig); + "value = %" PRIdPTR ", orig = %" PRIdPTR " }", dsema->dsema_value, dsema->dsema_orig); return offset; } diff --git a/src/unifdef.2qxfve b/src/unifdef.tWFV9f similarity index 100% rename from src/unifdef.2qxfve rename to src/unifdef.tWFV9f From 986610e590376331bb9411dbbfa54fc13c99cd3d Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 20 Mar 2019 14:58:28 -0700 Subject: [PATCH 114/249] Merge pull request #463 from triplef/fix-printf-dsma-value Fix printf format macro for `dsema_value` Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.tWFV9f => unifdef.1cCBsM} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.tWFV9f => unifdef.1cCBsM} (100%) diff --git a/PATCHES b/PATCHES index 9588ea79d..5270b009c 100644 --- a/PATCHES +++ b/PATCHES @@ -478,3 +478,4 @@ github commits starting with 29bdc2f from [60fdf80] APPLIED rdar://54572081 [7a74af4] APPLIED rdar://54572081 [f20349f] APPLIED rdar://54572081 +[ef9364c] APPLIED rdar://54572081 diff --git a/src/unifdef.tWFV9f b/src/unifdef.1cCBsM similarity index 100% rename from src/unifdef.tWFV9f rename to src/unifdef.1cCBsM From 536b9d09c309b40f539c31137e650c3129338291 Mon Sep 17 00:00:00 2001 From: Frederik Seiffert Date: Wed, 20 Mar 2019 16:33:22 +0100 Subject: [PATCH 115/249] Use sched_yield() as fallback for preemption yield. Signed-off-by: Kim Topley --- src/shims/yield.h | 4 ++-- src/{unifdef.1cCBsM => unifdef.JTJvgv} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename src/{unifdef.1cCBsM => unifdef.JTJvgv} (100%) diff --git a/src/shims/yield.h b/src/shims/yield.h index 382ad42c9..53eb80065 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -157,8 +157,8 @@ void *_dispatch_wait_for_enqueuer(void **ptr); #define _dispatch_preemption_yield(n) { (void)n; Sleep(0); } #define _dispatch_preemption_yield_to(th, n) { (void)n; Sleep(0); } #else -#define _dispatch_preemption_yield(n) { (void)n; pthread_yield(); } -#define _dispatch_preemption_yield_to(th, n) { (void)n; pthread_yield(); } +#define _dispatch_preemption_yield(n) { (void)n; sched_yield(); } +#define _dispatch_preemption_yield_to(th, n) { (void)n; sched_yield(); } #endif // HAVE_MACH #pragma mark - diff --git a/src/unifdef.1cCBsM b/src/unifdef.JTJvgv similarity index 100% rename from src/unifdef.1cCBsM rename to src/unifdef.JTJvgv From 436104dbcdc5ea115405f21a0cc00eb67e3cdb3f Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 21 Mar 2019 06:33:05 -0700 Subject: [PATCH 116/249] Merge pull request #459 from triplef/fix-sched-yield-android Use sched_yield() as fallback for preemption yield Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.JTJvgv => unifdef.cCrDkv} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.JTJvgv => unifdef.cCrDkv} (100%) diff --git a/PATCHES b/PATCHES index 5270b009c..489cf9579 100644 --- a/PATCHES +++ b/PATCHES @@ -479,3 +479,4 @@ github commits starting with 29bdc2f from [7a74af4] APPLIED rdar://54572081 [f20349f] APPLIED rdar://54572081 [ef9364c] APPLIED rdar://54572081 +[9d485ca] APPLIED rdar://54572081 diff --git a/src/unifdef.JTJvgv b/src/unifdef.cCrDkv similarity index 100% rename from src/unifdef.JTJvgv rename to src/unifdef.cCrDkv From 2fc6b18d1f2d5553287c330037e933ed6231abee Mon Sep 17 00:00:00 2001 From: Ron Olson Date: Tue, 19 Mar 2019 12:26:28 -0500 Subject: [PATCH 117/249] Added underscores to gettid(void) to remove name clash with system-provided gettid() in /usr/include/bits/unistd_ext.h that is not static Signed-off-by: Kim Topley --- src/queue.c | 8 ++++---- src/{unifdef.cCrDkv => unifdef.oD6HOK} | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) rename src/{unifdef.cCrDkv => unifdef.oD6HOK} (99%) diff --git a/src/queue.c b/src/queue.c index a34d7f4d3..8e31be056 100644 --- a/src/queue.c +++ b/src/queue.c @@ -7236,21 +7236,21 @@ libdispatch_init(void) #ifdef SYS_gettid DISPATCH_ALWAYS_INLINE static inline pid_t -gettid(void) +__gettid(void) { return (pid_t)syscall(SYS_gettid); } #elif defined(__FreeBSD__) DISPATCH_ALWAYS_INLINE static inline pid_t -gettid(void) +__gettid(void) { return (pid_t)pthread_getthreadid_np(); } #elif defined(_WIN32) DISPATCH_ALWAYS_INLINE static inline DWORD -gettid(void) +__gettid(void) { return GetCurrentThreadId(); } @@ -7365,7 +7365,7 @@ libdispatch_tsd_init(void) #else FlsSetValue(__dispatch_tsd_key, &__dispatch_tsd); #endif // defined(_WIN32) - __dispatch_tsd.tid = gettid(); + __dispatch_tsd.tid = __gettid(); } DISPATCH_NOTHROW diff --git a/src/unifdef.cCrDkv b/src/unifdef.oD6HOK similarity index 99% rename from src/unifdef.cCrDkv rename to src/unifdef.oD6HOK index 0ce4e283f..1e961c4ec 100644 --- a/src/unifdef.cCrDkv +++ b/src/unifdef.oD6HOK @@ -7843,21 +7843,21 @@ libdispatch_init(void) #ifdef SYS_gettid DISPATCH_ALWAYS_INLINE static inline pid_t -gettid(void) +__gettid(void) { return (pid_t)syscall(SYS_gettid); } #elif defined(__FreeBSD__) DISPATCH_ALWAYS_INLINE static inline pid_t -gettid(void) +__gettid(void) { return (pid_t)pthread_getthreadid_np(); } #elif defined(_WIN32) DISPATCH_ALWAYS_INLINE static inline DWORD -gettid(void) +__gettid(void) { return GetCurrentThreadId(); } @@ -7972,7 +7972,7 @@ libdispatch_tsd_init(void) #else FlsSetValue(__dispatch_tsd_key, &__dispatch_tsd); #endif // defined(_WIN32) - __dispatch_tsd.tid = gettid(); + __dispatch_tsd.tid = __gettid(); } DISPATCH_NOTHROW From f2bbd6b404d4be05b907726ab3fa22d04dffb906 Mon Sep 17 00:00:00 2001 From: Ron Olson Date: Thu, 21 Mar 2019 09:17:17 -0500 Subject: [PATCH 118/249] Changed __gettid() to _gettid() to be consistent with existing naming conventions Signed-off-by: Kim Topley --- src/queue.c | 8 ++++---- src/{unifdef.oD6HOK => unifdef.Wwwa0y} | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) rename src/{unifdef.oD6HOK => unifdef.Wwwa0y} (99%) diff --git a/src/queue.c b/src/queue.c index 8e31be056..1ef5f093e 100644 --- a/src/queue.c +++ b/src/queue.c @@ -7236,21 +7236,21 @@ libdispatch_init(void) #ifdef SYS_gettid DISPATCH_ALWAYS_INLINE static inline pid_t -__gettid(void) +_gettid(void) { return (pid_t)syscall(SYS_gettid); } #elif defined(__FreeBSD__) DISPATCH_ALWAYS_INLINE static inline pid_t -__gettid(void) +_gettid(void) { return (pid_t)pthread_getthreadid_np(); } #elif defined(_WIN32) DISPATCH_ALWAYS_INLINE static inline DWORD -__gettid(void) +_gettid(void) { return GetCurrentThreadId(); } @@ -7365,7 +7365,7 @@ libdispatch_tsd_init(void) #else FlsSetValue(__dispatch_tsd_key, &__dispatch_tsd); #endif // defined(_WIN32) - __dispatch_tsd.tid = __gettid(); + __dispatch_tsd.tid = _gettid(); } DISPATCH_NOTHROW diff --git a/src/unifdef.oD6HOK b/src/unifdef.Wwwa0y similarity index 99% rename from src/unifdef.oD6HOK rename to src/unifdef.Wwwa0y index 1e961c4ec..360b3155a 100644 --- a/src/unifdef.oD6HOK +++ b/src/unifdef.Wwwa0y @@ -7843,21 +7843,21 @@ libdispatch_init(void) #ifdef SYS_gettid DISPATCH_ALWAYS_INLINE static inline pid_t -__gettid(void) +_gettid(void) { return (pid_t)syscall(SYS_gettid); } #elif defined(__FreeBSD__) DISPATCH_ALWAYS_INLINE static inline pid_t -__gettid(void) +_gettid(void) { return (pid_t)pthread_getthreadid_np(); } #elif defined(_WIN32) DISPATCH_ALWAYS_INLINE static inline DWORD -__gettid(void) +_gettid(void) { return GetCurrentThreadId(); } @@ -7972,7 +7972,7 @@ libdispatch_tsd_init(void) #else FlsSetValue(__dispatch_tsd_key, &__dispatch_tsd); #endif // defined(_WIN32) - __dispatch_tsd.tid = __gettid(); + __dispatch_tsd.tid = _gettid(); } DISPATCH_NOTHROW From 96c28463cc318df4efa6ee4d041deaedade2c205 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 21 Mar 2019 10:23:45 -0700 Subject: [PATCH 119/249] Merge pull request #461 from tachoknight/master Added underscores to gettid(void) to remove name clash Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.Wwwa0y => unifdef.tYpjuE} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.Wwwa0y => unifdef.tYpjuE} (100%) diff --git a/PATCHES b/PATCHES index 489cf9579..08e83f1be 100644 --- a/PATCHES +++ b/PATCHES @@ -480,3 +480,4 @@ github commits starting with 29bdc2f from [f20349f] APPLIED rdar://54572081 [ef9364c] APPLIED rdar://54572081 [9d485ca] APPLIED rdar://54572081 +[cbd70d1] APPLIED rdar://54572081 diff --git a/src/unifdef.Wwwa0y b/src/unifdef.tYpjuE similarity index 100% rename from src/unifdef.Wwwa0y rename to src/unifdef.tYpjuE From 947047bb17035ca12575723a0a0788752f7fc29e Mon Sep 17 00:00:00 2001 From: Frederik Seiffert Date: Wed, 20 Mar 2019 16:30:21 +0100 Subject: [PATCH 120/249] Added missing macros in generic_sys_queue.h Signed-off-by: Kim Topley --- src/shims/generic_sys_queue.h | 11 +++++++++++ src/{unifdef.tYpjuE => unifdef.bPdVuT} | 0 2 files changed, 11 insertions(+) rename src/{unifdef.tYpjuE => unifdef.bPdVuT} (100%) diff --git a/src/shims/generic_sys_queue.h b/src/shims/generic_sys_queue.h index c6c658706..fd4ac1d48 100644 --- a/src/shims/generic_sys_queue.h +++ b/src/shims/generic_sys_queue.h @@ -110,6 +110,8 @@ struct type *le_prev; \ } +#define LIST_EMPTY(head) ((head)->lh_first == NULL) + #define LIST_FIRST(head) ((head)->lh_first) #define LIST_FOREACH(var, head, field) \ @@ -117,6 +119,15 @@ (var); \ (var) = LIST_NEXT((var), field)) +#define LIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = LIST_FIRST((head)); \ + (var) && ((tvar) = LIST_NEXT((var), field), 1); \ + (var) = (tvar)) + +#define LIST_INIT(head) do { \ + LIST_FIRST((head)) = NULL; \ +} while (0) + #define LIST_NEXT(elm, field) ((elm)->field.le_next) #define LIST_REMOVE(elm, field) do { \ diff --git a/src/unifdef.tYpjuE b/src/unifdef.bPdVuT similarity index 100% rename from src/unifdef.tYpjuE rename to src/unifdef.bPdVuT From d71fa40da1447c58289c7c46e98fa3fdd011bea4 Mon Sep 17 00:00:00 2001 From: Frederik Seiffert Date: Wed, 20 Mar 2019 16:30:44 +0100 Subject: [PATCH 121/249] Use generic_sys_queue.h on Android. The Android implementation of sys/queue.h is missing definitions for TAILQ_CONCAT() and LIST_FOREACH_SAFE(). Signed-off-by: Kim Topley --- src/internal.h | 2 +- src/shims.h | 5 ++++- src/{unifdef.bPdVuT => unifdef.RYLv1F} | 0 3 files changed, 5 insertions(+), 2 deletions(-) rename src/{unifdef.bPdVuT => unifdef.RYLv1F} (100%) diff --git a/src/internal.h b/src/internal.h index 990bfbe4a..117c11adc 100644 --- a/src/internal.h +++ b/src/internal.h @@ -277,12 +277,12 @@ upcast(dispatch_object_t dou) #if defined(_WIN32) #include #else -#include #include #ifdef __ANDROID__ #include #else #include +#include #endif /* __ANDROID__ */ #include #include diff --git a/src/shims.h b/src/shims.h index 6cb159b62..9d18bec68 100644 --- a/src/shims.h +++ b/src/shims.h @@ -31,9 +31,12 @@ #include #else // defined(_WIN32) #include "shims/generic_win_stubs.h" -#include "shims/generic_sys_queue.h" #endif // defined(_WIN32) +#if defined(_WIN32) || defined(__ANDROID__) +#include "shims/generic_sys_queue.h" +#endif + #ifdef __ANDROID__ #include "shims/android_stubs.h" #endif // __ANDROID__ diff --git a/src/unifdef.bPdVuT b/src/unifdef.RYLv1F similarity index 100% rename from src/unifdef.bPdVuT rename to src/unifdef.RYLv1F From ba2e4f0cd1f3f73b71894eff32460fb9ac5cf667 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 21 Mar 2019 10:34:00 -0700 Subject: [PATCH 122/249] Merge pull request #458 from triplef/fix-missing-queue-defs Added missing queue macros for building on Android Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.RYLv1F => unifdef.W3wmwv} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.RYLv1F => unifdef.W3wmwv} (100%) diff --git a/PATCHES b/PATCHES index 08e83f1be..9bb542b2e 100644 --- a/PATCHES +++ b/PATCHES @@ -481,3 +481,4 @@ github commits starting with 29bdc2f from [ef9364c] APPLIED rdar://54572081 [9d485ca] APPLIED rdar://54572081 [cbd70d1] APPLIED rdar://54572081 +[6e1825a] APPLIED rdar://54572081 diff --git a/src/unifdef.RYLv1F b/src/unifdef.W3wmwv similarity index 100% rename from src/unifdef.RYLv1F rename to src/unifdef.W3wmwv From 828f7db55633ee97020c7d57d295484acd49775c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Rodr=C3=ADguez=20Troiti=C3=B1o?= Date: Tue, 5 Mar 2019 14:26:11 -0800 Subject: [PATCH 123/249] [cmake] Skip regex when CMAKE_STATIC_LIBRARY_PREFIX is empty. In platforms like Windows, there's no prefix for static libraries. Interpolating it in the regex will end up with a regex "^", which seems not to be valid. Since the regex was removing the prefix, we can simply skip the operation and the result will be the same. Also do the same for CMAKE_STATIC_LIBRARY_SUFFIX, even if in that case, Windows will have a value (but if it is empty, it will fail trying to create the regex). Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 8 ++++++-- src/{unifdef.W3wmwv => unifdef.C4lVJV} | 0 2 files changed, 6 insertions(+), 2 deletions(-) rename src/{unifdef.W3wmwv => unifdef.C4lVJV} (100%) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index 60f8b45a3..adcf42fbc 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -153,8 +153,12 @@ function(add_swift_target target) add_library(${target}-static STATIC ${objs}) add_dependencies(${target}-static ${AST_DEPENDS}) get_filename_component(ast_output_bn ${AST_OUTPUT} NAME) - string(REGEX REPLACE "^${CMAKE_STATIC_LIBRARY_PREFIX}" "" ast_output_bn ${ast_output_bn}) - string(REGEX REPLACE "${CMAKE_STATIC_LIBRARY_SUFFIX}$" "" ast_output_bn ${ast_output_bn}) + if(NOT CMAKE_STATIC_LIBRARY_PREFIX STREQUAL "") + string(REGEX REPLACE "^${CMAKE_STATIC_LIBRARY_PREFIX}" "" ast_output_bn ${ast_output_bn}) + endif() + if(NOT CMAKE_STATIC_LIBRARY_SUFFIX STREQUAL "") + string(REGEX REPLACE "${CMAKE_STATIC_LIBRARY_SUFFIX}$" "" ast_output_bn ${ast_output_bn}) + endif() get_filename_component(ast_output_dn ${AST_OUTPUT} DIRECTORY) set_target_properties(${target}-static PROPERTIES diff --git a/src/unifdef.W3wmwv b/src/unifdef.C4lVJV similarity index 100% rename from src/unifdef.W3wmwv rename to src/unifdef.C4lVJV From f9b904f6959290dd7da542d8e428dc5b62cda390 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 22 Mar 2019 08:17:13 -0700 Subject: [PATCH 124/249] Merge pull request #452 from drodriguez/fix-regex-windows [cmake] Skip regex when CMAKE_STATIC_LIBRARY_PREFIX is empty. Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.C4lVJV => unifdef.T1e1x2} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.C4lVJV => unifdef.T1e1x2} (100%) diff --git a/PATCHES b/PATCHES index 9bb542b2e..9447ac556 100644 --- a/PATCHES +++ b/PATCHES @@ -482,3 +482,4 @@ github commits starting with 29bdc2f from [9d485ca] APPLIED rdar://54572081 [cbd70d1] APPLIED rdar://54572081 [6e1825a] APPLIED rdar://54572081 +[319bd33] APPLIED rdar://54572081 diff --git a/src/unifdef.C4lVJV b/src/unifdef.T1e1x2 similarity index 100% rename from src/unifdef.C4lVJV rename to src/unifdef.T1e1x2 From b106c4acc9a8a05940173dcbb25a5f688e616f60 Mon Sep 17 00:00:00 2001 From: Frederik Seiffert Date: Fri, 22 Mar 2019 15:07:32 +0100 Subject: [PATCH 125/249] Use SYS_gettid on Android. Signed-off-by: Kim Topley --- src/queue.c | 2 -- src/{unifdef.T1e1x2 => unifdef.H9ar29} | 2 -- 2 files changed, 4 deletions(-) rename src/{unifdef.T1e1x2 => unifdef.H9ar29} (99%) diff --git a/src/queue.c b/src/queue.c index 1ef5f093e..cdd73b3f1 100644 --- a/src/queue.c +++ b/src/queue.c @@ -7232,7 +7232,6 @@ libdispatch_init(void) #include #endif -#ifndef __ANDROID__ #ifdef SYS_gettid DISPATCH_ALWAYS_INLINE static inline pid_t @@ -7257,7 +7256,6 @@ _gettid(void) #else #error "SYS_gettid unavailable on this system" #endif /* SYS_gettid */ -#endif /* ! __ANDROID__ */ #define _tsd_call_cleanup(k, f) do { \ if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \ diff --git a/src/unifdef.T1e1x2 b/src/unifdef.H9ar29 similarity index 99% rename from src/unifdef.T1e1x2 rename to src/unifdef.H9ar29 index 360b3155a..76ec87402 100644 --- a/src/unifdef.T1e1x2 +++ b/src/unifdef.H9ar29 @@ -7839,7 +7839,6 @@ libdispatch_init(void) #include #endif -#ifndef __ANDROID__ #ifdef SYS_gettid DISPATCH_ALWAYS_INLINE static inline pid_t @@ -7864,7 +7863,6 @@ _gettid(void) #else #error "SYS_gettid unavailable on this system" #endif /* SYS_gettid */ -#endif /* ! __ANDROID__ */ #define _tsd_call_cleanup(k, f) do { \ if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \ From 86ec7b66041ec2774740bf5934e20086a279b18d Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 22 Mar 2019 10:19:38 -0700 Subject: [PATCH 126/249] Merge pull request #465 from triplef/fix-gettid-android Use SYS_gettid on Android Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.H9ar29 => unifdef.5zo64r} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.H9ar29 => unifdef.5zo64r} (100%) diff --git a/PATCHES b/PATCHES index 9447ac556..f0284a2b1 100644 --- a/PATCHES +++ b/PATCHES @@ -483,3 +483,4 @@ github commits starting with 29bdc2f from [cbd70d1] APPLIED rdar://54572081 [6e1825a] APPLIED rdar://54572081 [319bd33] APPLIED rdar://54572081 +[6c5b3ba] APPLIED rdar://54572081 diff --git a/src/unifdef.H9ar29 b/src/unifdef.5zo64r similarity index 100% rename from src/unifdef.H9ar29 rename to src/unifdef.5zo64r From 120ff0230e5309d626eb86f1a2bf63eb13f0d4fb Mon Sep 17 00:00:00 2001 From: Kim Topley Date: Fri, 22 Mar 2019 08:06:44 -0700 Subject: [PATCH 127/249] Workaround for Ubuntu 18.04 clang crashes in swift-corelibs-libdispath rdar://problem/49023449 Signed-off-by: Kim Topley --- src/init.c | 28 ++++++++++++++++++++++++++ src/internal.h | 2 ++ src/{unifdef.5zo64r => unifdef.SbQFU8} | 0 3 files changed, 30 insertions(+) rename src/{unifdef.5zo64r => unifdef.SbQFU8} (100%) diff --git a/src/init.c b/src/init.c index d5db67b61..0068cd1dd 100644 --- a/src/init.c +++ b/src/init.c @@ -29,6 +29,14 @@ #include "protocolServer.h" #endif +#ifdef __linux__ +// The clang compiler in Ubuntu 18.04 has a bug that causes it to crash +// when compiling _dispatch_bug_kevent_vanished(). As a workaround, use a +// less capable version of this function on Linux until a fixed version +// of the compiler is available. +#define RDAR_49023449 1 +#endif // __linux__ + #pragma mark - #pragma mark dispatch_init @@ -964,6 +972,7 @@ _dispatch_continuation_get_function_symbol(dispatch_continuation_t dc) return dc->dc_func; } +#if HAVE_MACH void _dispatch_bug_kevent_client(const char *msg, const char *filter, const char *operation, int err, uint64_t ident, uint64_t udata, @@ -1007,6 +1016,23 @@ _dispatch_bug_kevent_client(const char *msg, const char *filter, msg, strerror(err), err, udata, filter, ident, ident, func); } } +#endif // HAVE_MACH + +#if RDAR_49023449 + +// The clang compiler on Ubuntu18.04 crashes when compiling the full version of +// this function. This reduced version avoids the crash but logs less useful +// information. +void +_dispatch_bug_kevent_vanished(dispatch_unote_t du) +{ + _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_vanished", + "BUG in libdispatch client: %s, monitored resource vanished before " + "the source cancel handler was invoked", + dux_type(du._du)->dst_kind); +} + +#else // RDAR_49023449 void _dispatch_bug_kevent_vanished(dispatch_unote_t du) @@ -1036,6 +1062,8 @@ _dispatch_bug_kevent_vanished(dispatch_unote_t du) du._du->du_ident, du._du->du_ident, func); } +#endif // RDAR_49023449 + DISPATCH_NOINLINE DISPATCH_WEAK void _dispatch_bug_deprecated(const char *msg) diff --git a/src/internal.h b/src/internal.h index 117c11adc..0c40ac83d 100644 --- a/src/internal.h +++ b/src/internal.h @@ -487,10 +487,12 @@ void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr); struct dispatch_unote_class_s; +#if HAVE_MACH DISPATCH_NOINLINE DISPATCH_COLD void _dispatch_bug_kevent_client(const char *msg, const char *filter, const char *operation, int err, uint64_t ident, uint64_t udata, struct dispatch_unote_class_s *du); +#endif // HAVE_MACH DISPATCH_NOINLINE DISPATCH_COLD void _dispatch_bug_kevent_vanished(struct dispatch_unote_class_s *du); diff --git a/src/unifdef.5zo64r b/src/unifdef.SbQFU8 similarity index 100% rename from src/unifdef.5zo64r rename to src/unifdef.SbQFU8 From d95bb27752de4dba001a8d14072d7f067c4f36d3 Mon Sep 17 00:00:00 2001 From: Mishal Shah Date: Fri, 22 Mar 2019 11:45:03 -0700 Subject: [PATCH 128/249] Merge pull request #466 from apple/kwt-ubuntu1804-clang-workaround Workaround for Ubuntu 18.04 clang crashes in swift-corelibs-libdispath Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.SbQFU8 => unifdef.ooxldD} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.SbQFU8 => unifdef.ooxldD} (100%) diff --git a/PATCHES b/PATCHES index f0284a2b1..3573376ff 100644 --- a/PATCHES +++ b/PATCHES @@ -484,3 +484,4 @@ github commits starting with 29bdc2f from [6e1825a] APPLIED rdar://54572081 [319bd33] APPLIED rdar://54572081 [6c5b3ba] APPLIED rdar://54572081 +[7e7677b] APPLIED rdar://54572081 diff --git a/src/unifdef.SbQFU8 b/src/unifdef.ooxldD similarity index 100% rename from src/unifdef.SbQFU8 rename to src/unifdef.ooxldD From 4d5f99d9b3bdc45bc7001b448b6dc5721bbc468c Mon Sep 17 00:00:00 2001 From: Kim Topley Date: Mon, 1 Apr 2019 09:50:19 -0700 Subject: [PATCH 129/249] Fix typo in documentation for "deadline" parameter. rdar://problem/48919714 Signed-off-by: Kim Topley --- src/swift/Queue.swift | 8 ++++---- src/{unifdef.ooxldD => unifdef.Ads4hc} | 0 2 files changed, 4 insertions(+), 4 deletions(-) rename src/{unifdef.ooxldD => unifdef.Ads4hc} (100%) diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift index 377e27fdd..11b68dc7a 100644 --- a/src/swift/Queue.swift +++ b/src/swift/Queue.swift @@ -371,7 +371,7 @@ extension DispatchQueue { /// Submits a work item to a dispatch queue for asynchronous execution after /// a specified time. /// - /// - parameter: deadline the time after which the work item should be executed, + /// - parameter deadline the time after which the work item should be executed, /// given as a `DispatchTime`. /// - parameter qos: the QoS at which the work item should be executed. /// Defaults to `DispatchQoS.unspecified`. @@ -402,7 +402,7 @@ extension DispatchQueue { /// Submits a work item to a dispatch queue for asynchronous execution after /// a specified time. /// - /// - parameter: deadline the time after which the work item should be executed, + /// - parameter deadline the time after which the work item should be executed, /// given as a `DispatchWallTime`. /// - parameter qos: the QoS at which the work item should be executed. /// Defaults to `DispatchQoS.unspecified`. @@ -433,7 +433,7 @@ extension DispatchQueue { /// Submits a work item to a dispatch queue for asynchronous execution after /// a specified time. /// - /// - parameter: deadline the time after which the work item should be executed, + /// - parameter deadline the time after which the work item should be executed, /// given as a `DispatchTime`. /// - parameter execute: The work item to be invoked on the queue. /// - SeeAlso: `asyncAfter(deadline:qos:flags:execute:)` @@ -448,7 +448,7 @@ extension DispatchQueue { /// Submits a work item to a dispatch queue for asynchronous execution after /// a specified time. /// - /// - parameter: deadline the time after which the work item should be executed, + /// - parameter deadline the time after which the work item should be executed, /// given as a `DispatchWallTime`. /// - parameter execute: The work item to be invoked on the queue. /// - SeeAlso: `asyncAfter(wallDeadline:qos:flags:execute:)` diff --git a/src/unifdef.ooxldD b/src/unifdef.Ads4hc similarity index 100% rename from src/unifdef.ooxldD rename to src/unifdef.Ads4hc From 093f8f46cff7f8fd249c5ad3078205cafc52cb2e Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 1 Apr 2019 14:56:53 -0700 Subject: [PATCH 130/249] Merge pull request #468 from apple/kwt-fix-doc-typo Fix typo in documentation for "deadline" parameter. Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.Ads4hc => unifdef.6m3fXL} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.Ads4hc => unifdef.6m3fXL} (100%) diff --git a/PATCHES b/PATCHES index 3573376ff..e91938b4b 100644 --- a/PATCHES +++ b/PATCHES @@ -485,3 +485,4 @@ github commits starting with 29bdc2f from [319bd33] APPLIED rdar://54572081 [6c5b3ba] APPLIED rdar://54572081 [7e7677b] APPLIED rdar://54572081 +[9002f70] APPLIED rdar://54572081 diff --git a/src/unifdef.Ads4hc b/src/unifdef.6m3fXL similarity index 100% rename from src/unifdef.Ads4hc rename to src/unifdef.6m3fXL From 0f74ca64b7602e15f4f517de7f78ba7d15daba18 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 26 Mar 2019 09:23:58 -0700 Subject: [PATCH 131/249] build: address -Werror,-Wcovered-switch-default error The switch is fully covered over the enumeration and newer clang object to the `default` cause in such a situation. Remove the `default` case, which should have no functional change. Signed-off-by: Kim Topley --- src/init.c | 2 -- src/{unifdef.6m3fXL => unifdef.c7s3B7} | 0 2 files changed, 2 deletions(-) rename src/{unifdef.6m3fXL => unifdef.c7s3B7} (100%) diff --git a/src/init.c b/src/init.c index 0068cd1dd..7367c77c4 100644 --- a/src/init.c +++ b/src/init.c @@ -541,8 +541,6 @@ dispatch_queue_attr_make_with_autorelease_frequency(dispatch_queue_attr_t dqa, case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM: case DISPATCH_AUTORELEASE_FREQUENCY_NEVER: break; - default: - return (dispatch_queue_attr_t)dqa; } dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa); dqai.dqai_autorelease_frequency = (uint16_t)frequency; diff --git a/src/unifdef.6m3fXL b/src/unifdef.c7s3B7 similarity index 100% rename from src/unifdef.6m3fXL rename to src/unifdef.c7s3B7 From 8bee3ce0f39ae38f26850f4c3a976b0728744c8f Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 26 Mar 2019 09:36:04 -0700 Subject: [PATCH 132/249] build: switch `-Wswitch-enum` to `-Wswitch` -Wswitch-enum requires all cases be spelt out in a switch, even in the presence of a `default` case. This along with `-Wswitch-covered-default` means that `default` cannot be used in a switch over an enumerated type and every case must be spelt. Use `-Wswitch` instead which allows the use of `default` labels. Signed-off-by: Kim Topley --- src/{unifdef.c7s3B7 => unifdef.yoQNGb} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{unifdef.c7s3B7 => unifdef.yoQNGb} (100%) diff --git a/src/unifdef.c7s3B7 b/src/unifdef.yoQNGb similarity index 100% rename from src/unifdef.c7s3B7 rename to src/unifdef.yoQNGb From 71c94049c31982d1e5edadb57fd7baf1ff88135d Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 3 Apr 2019 12:53:53 -0700 Subject: [PATCH 133/249] Merge pull request #467 from compnerd/warnings Fix warnings (treated as errors) Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.yoQNGb => unifdef.OKMiGT} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.yoQNGb => unifdef.OKMiGT} (100%) diff --git a/PATCHES b/PATCHES index e91938b4b..edfbf81b1 100644 --- a/PATCHES +++ b/PATCHES @@ -486,3 +486,4 @@ github commits starting with 29bdc2f from [6c5b3ba] APPLIED rdar://54572081 [7e7677b] APPLIED rdar://54572081 [9002f70] APPLIED rdar://54572081 +[cc04868] APPLIED rdar://54572081 diff --git a/src/unifdef.yoQNGb b/src/unifdef.OKMiGT similarity index 100% rename from src/unifdef.yoQNGb rename to src/unifdef.OKMiGT From 8e960c23fa40486dcea590decdc2e1302b9bca19 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 8 Apr 2019 08:27:28 -0700 Subject: [PATCH 134/249] Merge pull request #470 from adierking/win_io_tests Port most remaining tests to Windows Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.OKMiGT => unifdef.UM9aKQ} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.OKMiGT => unifdef.UM9aKQ} (100%) diff --git a/PATCHES b/PATCHES index edfbf81b1..e40760a23 100644 --- a/PATCHES +++ b/PATCHES @@ -487,3 +487,4 @@ github commits starting with 29bdc2f from [7e7677b] APPLIED rdar://54572081 [9002f70] APPLIED rdar://54572081 [cc04868] APPLIED rdar://54572081 +[dc0dd64] APPLIED rdar://54572081 diff --git a/src/unifdef.OKMiGT b/src/unifdef.UM9aKQ similarity index 100% rename from src/unifdef.OKMiGT rename to src/unifdef.UM9aKQ From b2656b2ec688c8d9eecb3392a1eb86aded169f83 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 5 Apr 2019 09:52:59 -0700 Subject: [PATCH 135/249] event: create a typedef for the unote ident type File handles and sockets on Windows are pointer sized. Ensure that we have enough space in the unote to track the source properly. Signed-off-by: Kim Topley --- src/event/event.c | 2 +- src/event/event_internal.h | 8 +++++++- src/{unifdef.UM9aKQ => unifdef.OWdqiy} | 0 3 files changed, 8 insertions(+), 2 deletions(-) rename src/{unifdef.UM9aKQ => unifdef.OWdqiy} (100%) diff --git a/src/event/event.c b/src/event/event.c index 8ea5d17dd..b908419d2 100644 --- a/src/event/event.c +++ b/src/event/event.c @@ -51,7 +51,7 @@ _dispatch_unote_create(dispatch_source_type_t dst, } du->du_type = dst; du->du_can_be_wlh = dst->dst_per_trigger_qos; - du->du_ident = (uint32_t)handle; + du->du_ident = (dispatch_unote_ident_t)handle; du->du_filter = dst->dst_filter; du->du_fflags = (__typeof__(du->du_fflags))mask; if (dst->dst_flags & EV_UDATA_SPECIFIC) { diff --git a/src/event/event_internal.h b/src/event/event_internal.h index d0bdca8fb..1fb1de38a 100644 --- a/src/event/event_internal.h +++ b/src/event/event_internal.h @@ -123,11 +123,17 @@ _dispatch_timer_flags_from_clock(dispatch_clock_t clock) return (dispatch_unote_timer_flags_t)(clock << 2); } +#if defined(_WIN32) +typedef uintptr_t dispatch_unote_ident_t; +#else +typedef uint32_t dispatch_unote_ident_t; +#endif + #define DISPATCH_UNOTE_CLASS_HEADER() \ dispatch_source_type_t du_type; \ uintptr_t du_owner_wref; /* "weak" back reference to the owner object */ \ os_atomic(dispatch_unote_state_t) du_state; \ - uint32_t du_ident; \ + dispatch_unote_ident_t du_ident; \ int8_t du_filter; \ uint8_t du_is_direct : 1; \ uint8_t du_is_timer : 1; \ diff --git a/src/unifdef.UM9aKQ b/src/unifdef.OWdqiy similarity index 100% rename from src/unifdef.UM9aKQ rename to src/unifdef.OWdqiy From e6b30d4849ca6ee8176cda51c9b0ce7267d4d471 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 8 Apr 2019 08:36:58 -0700 Subject: [PATCH 136/249] Merge pull request #469 from compnerd/size event: create a typedef for the unote ident type Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.OWdqiy => unifdef.Hg169u} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.OWdqiy => unifdef.Hg169u} (100%) diff --git a/PATCHES b/PATCHES index e40760a23..6d6d01b46 100644 --- a/PATCHES +++ b/PATCHES @@ -488,3 +488,4 @@ github commits starting with 29bdc2f from [9002f70] APPLIED rdar://54572081 [cc04868] APPLIED rdar://54572081 [dc0dd64] APPLIED rdar://54572081 +[a5f5a92] APPLIED rdar://54572081 diff --git a/src/unifdef.OWdqiy b/src/unifdef.Hg169u similarity index 100% rename from src/unifdef.OWdqiy rename to src/unifdef.Hg169u From bb63edb5a32c06eeb5949f2fd8a473c93ca5089d Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Wed, 10 Apr 2019 11:09:10 -0700 Subject: [PATCH 137/249] build: install swiftDispatch.dll to bin The DLLs are runtime components. The Windows target installs the runtime components into the bin directory, and the import libraries into the lib directory. This enables the runtime setup to be simplified on Windows. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 27 +++++++++++++++----------- src/{unifdef.Hg169u => unifdef.zysakI} | 0 2 files changed, 16 insertions(+), 11 deletions(-) rename src/{unifdef.Hg169u => unifdef.zysakI} (100%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 4da1b3f15..afa785b6c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -269,19 +269,24 @@ if(ENABLE_SWIFT) else() set(library_kind STATIC) endif() + set(swiftDispatch_OUTPUT_FILE ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_${library_kind}_LIBRARY_PREFIX}swiftDispatch${CMAKE_${library_kind}_LIBRARY_SUFFIX}) - install(FILES - ${swiftDispatch_OUTPUT_FILE} - DESTINATION - ${INSTALL_TARGET_DIR}) - if(CMAKE_SYSTEM_NAME STREQUAL Windows) - if(BUILD_SHARED_LIBS) - install(FILES - ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_IMPORT_LIBRARY_PREFIX}swiftDispatch${CMAKE_IMPORT_LIBRARY_SUFFIX} - DESTINATION - ${INSTALL_TARGET_DIR}) - endif() + + if(CMAKE_SYSTEM_NAME STREQUAL Windows AND BUILD_SHARED_LIBS) + install(FILES + ${swiftDispatch_OUTPUT_FILE} + DESTINATION + bin) + install(FILES + ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_IMPORT_LIBRARY_PREFIX}swiftDispatch${CMAKE_IMPORT_LIBRARY_SUFFIX} + DESTINATION + ${INSTALL_TARGET_DIR}) + else() + install(FILES + ${swiftDispatch_OUTPUT_FILE} + DESTINATION + ${INSTALL_TARGET_DIR}) endif() endif() diff --git a/src/unifdef.Hg169u b/src/unifdef.zysakI similarity index 100% rename from src/unifdef.Hg169u rename to src/unifdef.zysakI From 173e6edda6fbaeff6e2fae935c5b8b0b43e1430c Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 10 Apr 2019 13:12:51 -0700 Subject: [PATCH 138/249] Merge pull request #471 from compnerd/runtime-binaries build: install swiftDispatch.dll to bin Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.zysakI => unifdef.YbWazG} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.zysakI => unifdef.YbWazG} (100%) diff --git a/PATCHES b/PATCHES index 6d6d01b46..e710f29d0 100644 --- a/PATCHES +++ b/PATCHES @@ -489,3 +489,4 @@ github commits starting with 29bdc2f from [cc04868] APPLIED rdar://54572081 [dc0dd64] APPLIED rdar://54572081 [a5f5a92] APPLIED rdar://54572081 +[e5ba042] APPLIED rdar://54572081 diff --git a/src/unifdef.zysakI b/src/unifdef.YbWazG similarity index 100% rename from src/unifdef.zysakI rename to src/unifdef.YbWazG From 85c61dceb20ad3a33cb238ebf806044d8da3597b Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 19 Apr 2019 15:07:42 -0700 Subject: [PATCH 139/249] shims: correct list macros This adjusts the `LIST_ENTRY` to define the structure correctly. This allows for the `LIST_REMOVE` and `LIST_INSERT_HEAD` macros to be defined properly and set the fields previous pointers as well when updating the list. This is needed to enable the use of the macros for list manipulations. Found while implementing file sources for Windows. Signed-off-by: Kim Topley --- src/shims/generic_sys_queue.h | 7 ++++--- src/{unifdef.YbWazG => unifdef.Uqb0Kh} | 0 2 files changed, 4 insertions(+), 3 deletions(-) rename src/{unifdef.YbWazG => unifdef.Uqb0Kh} (100%) diff --git a/src/shims/generic_sys_queue.h b/src/shims/generic_sys_queue.h index fd4ac1d48..301bf6760 100644 --- a/src/shims/generic_sys_queue.h +++ b/src/shims/generic_sys_queue.h @@ -107,7 +107,7 @@ #define LIST_ENTRY(type) struct { \ struct type *le_next; \ - struct type *le_prev; \ + struct type **le_prev; \ } #define LIST_EMPTY(head) ((head)->lh_first == NULL) @@ -133,13 +133,14 @@ #define LIST_REMOVE(elm, field) do { \ if (LIST_NEXT((elm), field) != NULL) \ LIST_NEXT((elm), field)->field.le_prev = (elm)->field.le_prev; \ + *(elm)->field.le_prev = LIST_NEXT((elm), field); \ } while (0) #define LIST_INSERT_HEAD(head, elm, field) do { \ if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ - LIST_FIRST((head))->field.le_prev = LIST_NEXT((elm), field); \ + LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field); \ LIST_FIRST((head)) = (elm); \ - (elm)->field.le_prev = LIST_FIRST((head)); \ + (elm)->field.le_prev = &LIST_FIRST((head)); \ } while (0) #endif // __DISPATCH_SHIMS_SYS_QUEUE__ diff --git a/src/unifdef.YbWazG b/src/unifdef.Uqb0Kh similarity index 100% rename from src/unifdef.YbWazG rename to src/unifdef.Uqb0Kh From 947b4dffc69a693aa616d082445a47319e933b51 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 25 Apr 2019 08:31:48 -0700 Subject: [PATCH 140/249] Merge pull request #472 from compnerd/prev-next-prev shims: correct list macros Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.Uqb0Kh => unifdef.yhDUzv} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.Uqb0Kh => unifdef.yhDUzv} (100%) diff --git a/PATCHES b/PATCHES index e710f29d0..e9d515e3a 100644 --- a/PATCHES +++ b/PATCHES @@ -490,3 +490,4 @@ github commits starting with 29bdc2f from [dc0dd64] APPLIED rdar://54572081 [a5f5a92] APPLIED rdar://54572081 [e5ba042] APPLIED rdar://54572081 +[a3bff44] APPLIED rdar://54572081 diff --git a/src/unifdef.Uqb0Kh b/src/unifdef.yhDUzv similarity index 100% rename from src/unifdef.Uqb0Kh rename to src/unifdef.yhDUzv From 27b1a9f5e7b8337d519bd1d0c105b6e318022d2d Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Fri, 3 May 2019 15:42:20 -0700 Subject: [PATCH 141/249] SR-9033: handle EPOLLHUP on Linux If epoll_wait() reports EPOLLHUP, we must respond immediately and unregister the event or else Dispatch will go into a spinloop. This makes dispatch_io_pipe_close (#476) pass on Linux with identical output to Darwin. Signed-off-by: Kim Topley --- src/event/event_epoll.c | 28 ++++++++++++++++++++++++++ src/{unifdef.yhDUzv => unifdef.I6EdjA} | 0 2 files changed, 28 insertions(+) rename src/{unifdef.yhDUzv => unifdef.I6EdjA} (100%) diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index 7c746c0f2..8210b923b 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -549,6 +549,20 @@ _dispatch_get_buffer_size(dispatch_muxnote_t dmn, bool writer) return (uintptr_t)n; } +static void +_dispatch_event_merge_hangup(dispatch_unote_t du) +{ + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_unote_state_t du_state = _dispatch_unote_state(du); + du_state |= DU_STATE_NEEDS_DELETE; + du_state &= ~DU_STATE_ARMED; + _dispatch_unote_state_set(du, du_state); + uintptr_t data = 0; // EOF + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + dux_merge_evt(du._du, EV_DELETE|EV_DISPATCH, data, 0); +} + static void _dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events) { @@ -583,6 +597,20 @@ _dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events) } } + // SR-9033: EPOLLHUP is an unmaskable event which we must respond to + if (events & EPOLLHUP) { + LIST_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + _dispatch_event_merge_hangup(du); + } + LIST_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + _dispatch_event_merge_hangup(du); + } + epoll_ctl(_dispatch_epfd, EPOLL_CTL_DEL, dmn->dmn_fd, NULL); + return; + } + events = _dispatch_muxnote_armed_events(dmn); if (events) _dispatch_epoll_update(dmn, events, EPOLL_CTL_MOD); } diff --git a/src/unifdef.yhDUzv b/src/unifdef.I6EdjA similarity index 100% rename from src/unifdef.yhDUzv rename to src/unifdef.I6EdjA From d9b009fe7e64490ff0a25f6d5933d4c53e2fc838 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 6 May 2019 14:05:38 -0700 Subject: [PATCH 142/249] Merge pull request #478 from gottesmm/sr-9033 Combined PR: SR-9033 handle EPOLLHUP Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.I6EdjA => unifdef.y6xoaF} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.I6EdjA => unifdef.y6xoaF} (100%) diff --git a/PATCHES b/PATCHES index e9d515e3a..00b00b3bf 100644 --- a/PATCHES +++ b/PATCHES @@ -491,3 +491,4 @@ github commits starting with 29bdc2f from [a5f5a92] APPLIED rdar://54572081 [e5ba042] APPLIED rdar://54572081 [a3bff44] APPLIED rdar://54572081 +[2e3d5c0] APPLIED rdar://54572081 diff --git a/src/unifdef.I6EdjA b/src/unifdef.y6xoaF similarity index 100% rename from src/unifdef.I6EdjA rename to src/unifdef.y6xoaF From 4fc6fdc5e5b3fa22e7f1510a306333cc6cf6ff43 Mon Sep 17 00:00:00 2001 From: Michael Roitzsch Date: Mon, 29 Apr 2019 10:27:05 +0200 Subject: [PATCH 143/249] explicitly ignore return value fixes compilation with _FORTIFY_SOURCE=2, which would otherwise fail with -Werror and -Wunused Signed-off-by: Kim Topley --- src/internal.h | 4 ++-- src/{unifdef.y6xoaF => unifdef.xjXGYT} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename src/{unifdef.y6xoaF => unifdef.xjXGYT} (100%) diff --git a/src/internal.h b/src/internal.h index 0c40ac83d..973f32271 100644 --- a/src/internal.h +++ b/src/internal.h @@ -1074,14 +1074,14 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, dispatch_assert(_length != -1); \ _msg = (char *)malloc((unsigned)_length + 1); \ dispatch_assert(_msg); \ - snprintf(_msg, (unsigned)_length + 1, "%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, ##__VA_ARGS__); \ + (void)snprintf(_msg, (unsigned)_length + 1, "%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, ##__VA_ARGS__); \ _dispatch_assert_crash(_msg); \ free(_msg); \ } while (0) #else #define _dispatch_client_assert_fail(fmt, ...) do { \ char *_msg = NULL; \ - asprintf(&_msg, "%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, \ + (void)asprintf(&_msg, "%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, \ ##__VA_ARGS__); \ _dispatch_assert_crash(_msg); \ free(_msg); \ diff --git a/src/unifdef.y6xoaF b/src/unifdef.xjXGYT similarity index 100% rename from src/unifdef.y6xoaF rename to src/unifdef.xjXGYT From 309163b72e1f6f7c2fd86dd2e49f285237f4f5e6 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 10 May 2019 09:13:03 -0700 Subject: [PATCH 144/249] Merge pull request #474 from mroi/patch-1 [SR-10559] libdispatch fails to build with _FORTIFY_SOURCE=2 Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.xjXGYT => unifdef.x8pm7c} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.xjXGYT => unifdef.x8pm7c} (100%) diff --git a/PATCHES b/PATCHES index 00b00b3bf..2a099c226 100644 --- a/PATCHES +++ b/PATCHES @@ -492,3 +492,4 @@ github commits starting with 29bdc2f from [e5ba042] APPLIED rdar://54572081 [a3bff44] APPLIED rdar://54572081 [2e3d5c0] APPLIED rdar://54572081 +[1482ec9] APPLIED rdar://54572081 diff --git a/src/unifdef.xjXGYT b/src/unifdef.x8pm7c similarity index 100% rename from src/unifdef.xjXGYT rename to src/unifdef.x8pm7c From ccef90b7ef65fdd35679ea87bb436344503a19b7 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 19 Apr 2019 15:43:11 -0700 Subject: [PATCH 145/249] dispatch: fix a couple of printf specifiers Convert a couple of specifiers to PRI* style to make them portable. Signed-off-by: Kim Topley --- src/init.c | 2 +- src/{unifdef.x8pm7c => unifdef.zeykj9} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/{unifdef.x8pm7c => unifdef.zeykj9} (100%) diff --git a/src/init.c b/src/init.c index 7367c77c4..adb32b966 100644 --- a/src/init.c +++ b/src/init.c @@ -1054,7 +1054,7 @@ _dispatch_bug_kevent_vanished(dispatch_unote_t du) _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_vanished", "BUG in libdispatch client: %s, monitored resource vanished before " "the source cancel handler was invoked " - "{ %p[%s], ident: %d / 0x%x, handler: %p }", + "{ %p[%s], ident: %" PRIdPTR " / 0x%" PRIxPTR ", handler: %p }", dux_type(du._du)->dst_kind, dou._dq, dou._dq->dq_label ? dou._dq->dq_label : "", du._du->du_ident, du._du->du_ident, func); diff --git a/src/unifdef.x8pm7c b/src/unifdef.zeykj9 similarity index 100% rename from src/unifdef.x8pm7c rename to src/unifdef.zeykj9 From 2559da479e67bd55f47227a973b6358c2e536f00 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 19 Apr 2019 15:47:39 -0700 Subject: [PATCH 146/249] windows: add initial cut of file sources This adds support for regular file types as an event source for dispatch. This leaves much to be desired (pipes, character devices), but improves the overall coverage. A number of additional tests now pass on Windows. Signed-off-by: Kim Topley --- src/event/event_windows.c | 209 ++++++++++++++++++++++++- src/{unifdef.zeykj9 => unifdef.6XaIiF} | 0 2 files changed, 201 insertions(+), 8 deletions(-) rename src/{unifdef.zeykj9 => unifdef.6XaIiF} (100%) diff --git a/src/event/event_windows.c b/src/event/event_windows.c index 1e3fae7db..33a8bad73 100644 --- a/src/event/event_windows.c +++ b/src/event/event_windows.c @@ -27,28 +27,217 @@ enum _dispatch_windows_port { DISPATCH_PORT_TIMER_CLOCK_WALL, DISPATCH_PORT_TIMER_CLOCK_UPTIME, DISPATCH_PORT_TIMER_CLOCK_MONOTONIC, + DISPATCH_PORT_FILE_HANDLE, }; #pragma mark dispatch_unote_t +typedef struct dispatch_muxnote_s { + LIST_ENTRY(dispatch_muxnote_s) dmn_list; + dispatch_unote_ident_t dmn_ident; + int8_t dmn_filter; + enum _dispatch_muxnote_handle_type { + DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID, + DISPATCH_MUXNOTE_HANDLE_TYPE_FILE, + } dmn_handle_type; +} *dispatch_muxnote_t; + +static LIST_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s) + _dispatch_sources[DSL_HASH_SIZE]; + +static SRWLOCK _dispatch_file_handles_lock = SRWLOCK_INIT; +static LIST_HEAD(, dispatch_unote_linkage_s) _dispatch_file_handles; + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_muxnote_bucket_s * +_dispatch_unote_muxnote_bucket(uint32_t ident) +{ + return &_dispatch_sources[DSL_HASH(ident)]; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_muxnote_t +_dispatch_unote_muxnote_find(struct dispatch_muxnote_bucket_s *dmb, + dispatch_unote_ident_t ident, int8_t filter) +{ + dispatch_muxnote_t dmn; + if (filter == EVFILT_WRITE) filter = EVFILT_READ; + LIST_FOREACH(dmn, dmb, dmn_list) { + if (dmn->dmn_ident == ident && dmn->dmn_filter == filter) { + break; + } + } + return dmn; +} + +static dispatch_muxnote_t +_dispatch_muxnote_create(dispatch_unote_t du) +{ + dispatch_muxnote_t dmn; + int8_t filter = du._du->du_filter; + HANDLE handle = (HANDLE)du._du->du_ident; + + dmn = _dispatch_calloc(1, sizeof(*dmn)); + if (dmn == NULL) { + DISPATCH_INTERNAL_CRASH(0, "_dispatch_calloc"); + } + dmn->dmn_ident = (dispatch_unote_ident_t)handle; + dmn->dmn_filter = filter; + + switch (filter) { + case EVFILT_SIGNAL: + WIN_PORT_ERROR(); + + case EVFILT_WRITE: + case EVFILT_READ: + switch (GetFileType(handle)) { + case FILE_TYPE_UNKNOWN: + // ensure that an invalid handle was not passed + (void)dispatch_assume(GetLastError() == NO_ERROR); + DISPATCH_INTERNAL_CRASH(0, "unknown handle type"); + + case FILE_TYPE_REMOTE: + DISPATCH_INTERNAL_CRASH(0, "unused handle type"); + + case FILE_TYPE_CHAR: + // The specified file is a character file, typically a + // LPT device or a console. + WIN_PORT_ERROR(); + + case FILE_TYPE_DISK: + // The specified file is a disk file + dmn->dmn_handle_type = + DISPATCH_MUXNOTE_HANDLE_TYPE_FILE; + break; + + case FILE_TYPE_PIPE: + // The specified file is a socket, a named pipe, or an + // anonymous pipe. + WIN_PORT_ERROR(); + } + + break; + + default: + DISPATCH_INTERNAL_CRASH(0, "unexpected filter"); + } + + + return dmn; +} + +static void +_dispatch_muxnote_dispose(dispatch_muxnote_t dmn) +{ + free(dmn); +} + +DISPATCH_ALWAYS_INLINE +static BOOL +_dispatch_io_trigger(dispatch_muxnote_t dmn) +{ + BOOL bSuccess; + + switch (dmn->dmn_handle_type) { + case DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID: + DISPATCH_INTERNAL_CRASH(0, "invalid handle"); + + case DISPATCH_MUXNOTE_HANDLE_TYPE_FILE: + bSuccess = PostQueuedCompletionStatus(hPort, 0, + (ULONG_PTR)DISPATCH_PORT_FILE_HANDLE, NULL); + if (bSuccess == FALSE) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } + break; + } + + return bSuccess; +} + bool -_dispatch_unote_register_muxed(dispatch_unote_t du DISPATCH_UNUSED) +_dispatch_unote_register_muxed(dispatch_unote_t du) { - WIN_PORT_ERROR(); - return false; + struct dispatch_muxnote_bucket_s *dmb; + dispatch_muxnote_t dmn; + + dmb = _dispatch_unote_muxnote_bucket(du._du->du_ident); + dmn = _dispatch_unote_muxnote_find(dmb, du._du->du_ident, + du._du->du_filter); + if (dmn) { + WIN_PORT_ERROR(); + } else { + dmn = _dispatch_muxnote_create(du); + if (dmn) { + if (_dispatch_io_trigger(dmn) == FALSE) { + _dispatch_muxnote_dispose(dmn); + dmn = NULL; + } else { + LIST_INSERT_HEAD(dmb, dmn, dmn_list); + } + } + } + + if (dmn) { + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + + AcquireSRWLockExclusive(&_dispatch_file_handles_lock); + LIST_INSERT_HEAD(&_dispatch_file_handles, dul, du_link); + ReleaseSRWLockExclusive(&_dispatch_file_handles_lock); + + dul->du_muxnote = dmn; + _dispatch_unote_state_set(du, DISPATCH_WLH_ANON, + DU_STATE_ARMED); + } + + return dmn != NULL; } void -_dispatch_unote_resume_muxed(dispatch_unote_t du DISPATCH_UNUSED) +_dispatch_unote_resume_muxed(dispatch_unote_t du) { - WIN_PORT_ERROR(); + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + dispatch_muxnote_t dmn = dul->du_muxnote; + dispatch_assert(_dispatch_unote_registered(du)); + _dispatch_io_trigger(dmn); } bool -_dispatch_unote_unregister_muxed(dispatch_unote_t du DISPATCH_UNUSED) +_dispatch_unote_unregister_muxed(dispatch_unote_t du) { - WIN_PORT_ERROR(); - return false; + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + dispatch_muxnote_t dmn = dul->du_muxnote; + + AcquireSRWLockExclusive(&_dispatch_file_handles_lock); + LIST_REMOVE(dul, du_link); + _LIST_TRASH_ENTRY(dul, du_link); + ReleaseSRWLockExclusive(&_dispatch_file_handles_lock); + dul->du_muxnote = NULL; + + LIST_REMOVE(dmn, dmn_list); + _dispatch_muxnote_dispose(dmn); + + _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); + return true; +} + +static void +_dispatch_event_merge_file_handle() +{ + dispatch_unote_linkage_t dul, dul_next; + + AcquireSRWLockExclusive(&_dispatch_file_handles_lock); + LIST_FOREACH_SAFE(dul, &_dispatch_file_handles, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_assert(dux_needs_rearm(du._du)); + _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); + os_atomic_store2o(du._dr, ds_pending_data, ~1, relaxed); + dux_merge_evt(du._du, EV_ADD | EV_ENABLE | EV_DISPATCH, 1, 0); + } + ReleaseSRWLockExclusive(&_dispatch_file_handles_lock); } #pragma mark timers @@ -221,6 +410,10 @@ _dispatch_event_loop_drain(uint32_t flags) _dispatch_event_merge_timer(DISPATCH_CLOCK_MONOTONIC); break; + case DISPATCH_PORT_FILE_HANDLE: + _dispatch_event_merge_file_handle(); + break; + default: DISPATCH_INTERNAL_CRASH(ulCompletionKey, "unsupported completion key"); diff --git a/src/unifdef.zeykj9 b/src/unifdef.6XaIiF similarity index 100% rename from src/unifdef.zeykj9 rename to src/unifdef.6XaIiF From 798fc5d0cf7825614da116ed687327f25e389508 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 10 May 2019 09:14:40 -0700 Subject: [PATCH 147/249] Merge pull request #473 from compnerd/file-sources Windows File Sources Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.6XaIiF => unifdef.nPIpcH} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.6XaIiF => unifdef.nPIpcH} (100%) diff --git a/PATCHES b/PATCHES index 2a099c226..530b7864f 100644 --- a/PATCHES +++ b/PATCHES @@ -493,3 +493,4 @@ github commits starting with 29bdc2f from [a3bff44] APPLIED rdar://54572081 [2e3d5c0] APPLIED rdar://54572081 [1482ec9] APPLIED rdar://54572081 +[6bf6cb1] APPLIED rdar://54572081 diff --git a/src/unifdef.6XaIiF b/src/unifdef.nPIpcH similarity index 100% rename from src/unifdef.6XaIiF rename to src/unifdef.nPIpcH From a5f4e31c4a4c19726494ceb062af062b3528bca9 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 29 Apr 2019 09:57:03 -0700 Subject: [PATCH 148/249] shims: correct `TAILQ_CONCAT` This fixes libdispatch on Windows where we use the shims rather than a system provided `sys/queue.h` as that is a BSD extension. Signed-off-by: Kim Topley --- src/shims/generic_sys_queue.h | 2 +- src/{unifdef.nPIpcH => unifdef.SR6P57} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/{unifdef.nPIpcH => unifdef.SR6P57} (100%) diff --git a/src/shims/generic_sys_queue.h b/src/shims/generic_sys_queue.h index 301bf6760..b4b74aeef 100644 --- a/src/shims/generic_sys_queue.h +++ b/src/shims/generic_sys_queue.h @@ -95,7 +95,7 @@ #define TAILQ_CONCAT(head1, head2, field) do { \ if (!TAILQ_EMPTY(head2)) { \ (head1)->tq_last = (head2)->tq_first; \ - (head1)->tq_first->field.te_prev = (head1)->tq_last; \ + (head2)->tq_first->field.te_prev = (head1)->tq_last; \ (head1)->tq_last = (head2)->tq_last; \ TAILQ_INIT((head2)); \ } \ diff --git a/src/unifdef.nPIpcH b/src/unifdef.SR6P57 similarity index 100% rename from src/unifdef.nPIpcH rename to src/unifdef.SR6P57 From e203e93e6d1a5fdda39902f34a48905b5fd458ba Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 10 May 2019 09:15:30 -0700 Subject: [PATCH 149/249] Merge pull request #475 from compnerd/chasing-my-tail shims: correct `TAILQ_CONCAT` Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.SR6P57 => unifdef.qtI8Y2} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.SR6P57 => unifdef.qtI8Y2} (100%) diff --git a/PATCHES b/PATCHES index 530b7864f..f9cfd3b81 100644 --- a/PATCHES +++ b/PATCHES @@ -494,3 +494,4 @@ github commits starting with 29bdc2f from [2e3d5c0] APPLIED rdar://54572081 [1482ec9] APPLIED rdar://54572081 [6bf6cb1] APPLIED rdar://54572081 +[aa13cad] APPLIED rdar://54572081 diff --git a/src/unifdef.SR6P57 b/src/unifdef.qtI8Y2 similarity index 100% rename from src/unifdef.SR6P57 rename to src/unifdef.qtI8Y2 From 5dc5e076fb8ea5f96146890f46ac4bfff5b8bd05 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 9 May 2019 16:21:52 -0700 Subject: [PATCH 150/249] Swift: correct dispatch source construction on Win32 Windows uses handles internally for the dispatch sources. Convert the `fileDescriptor` parameter on the incoming point to `HANDLE`s when building the `DispatchSource`. This fixes passing invalid parameters to the Windows system functions. Signed-off-by: Kim Topley --- src/swift/Source.swift | 19 +++++++++++++++++-- src/{unifdef.qtI8Y2 => unifdef.xjUhqg} | 0 2 files changed, 17 insertions(+), 2 deletions(-) rename src/{unifdef.qtI8Y2 => unifdef.xjUhqg} (100%) diff --git a/src/swift/Source.swift b/src/swift/Source.swift index 8d9fcba35..a38066c72 100644 --- a/src/swift/Source.swift +++ b/src/swift/Source.swift @@ -12,6 +12,9 @@ import CDispatch import _SwiftDispatchOverlayShims +#if os(Windows) +import WinSDK +#endif extension DispatchSourceProtocol { @@ -179,7 +182,13 @@ extension DispatchSource { #endif public class func makeReadSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceRead { - let source = dispatch_source_create(_swift_dispatch_source_type_READ(), UInt(fileDescriptor), 0, queue?.__wrapped) +#if os(Windows) + let handle: UInt = UInt(_get_osfhandle(fileDescriptor)) + if handle == UInt(bitPattern: INVALID_HANDLE_VALUE) { fatalError("unable to get underlying handle from file descriptor") } +#else + let handle: UInt = UInt(fileDescriptor) +#endif + let source = dispatch_source_create(_swift_dispatch_source_type_READ(), handle, 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceRead } @@ -216,7 +225,13 @@ extension DispatchSource { #endif public class func makeWriteSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceWrite { - let source = dispatch_source_create(_swift_dispatch_source_type_WRITE(), UInt(fileDescriptor), 0, queue?.__wrapped) +#if os(Windows) + let handle: UInt = UInt(_get_osfhandle(fileDescriptor)) + if handle == UInt(bitPattern: INVALID_HANDLE_VALUE) { fatalError("unable to get underlying handle from file descriptor") } +#else + let handle: UInt = UInt(fileDescriptor) +#endif + let source = dispatch_source_create(_swift_dispatch_source_type_WRITE(), handle, 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceWrite } } diff --git a/src/unifdef.qtI8Y2 b/src/unifdef.xjUhqg similarity index 100% rename from src/unifdef.qtI8Y2 rename to src/unifdef.xjUhqg From 07591d5cb14ef486a6e75d38781434fa8b5e5be6 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 10 May 2019 09:16:36 -0700 Subject: [PATCH 151/249] Merge pull request #479 from compnerd/get-a-handle-on-the-situation Swift: correct dispatch source construction on Win32 Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.xjUhqg => unifdef.NQ34TI} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.xjUhqg => unifdef.NQ34TI} (100%) diff --git a/PATCHES b/PATCHES index f9cfd3b81..d247af87a 100644 --- a/PATCHES +++ b/PATCHES @@ -495,3 +495,4 @@ github commits starting with 29bdc2f from [1482ec9] APPLIED rdar://54572081 [6bf6cb1] APPLIED rdar://54572081 [aa13cad] APPLIED rdar://54572081 +[b073d89] APPLIED rdar://54572081 diff --git a/src/unifdef.xjUhqg b/src/unifdef.NQ34TI similarity index 100% rename from src/unifdef.xjUhqg rename to src/unifdef.NQ34TI From d268787ec6ed2f6385c8a30474037b3b7576b0ed Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 10 May 2019 08:25:33 -0700 Subject: [PATCH 152/249] build: ensure that we link against the correct library Ensure that we link against the correct VC runtime libraries. Additionally, enable the macros to indicate that we are linking against the VC runtimes dynamically to get proper DLL storage. This fixes memory issues and file descriptor table synchrony. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 5 +++++ src/{unifdef.NQ34TI => unifdef.4q2keD} | 0 2 files changed, 5 insertions(+) rename src/{unifdef.NQ34TI => unifdef.4q2keD} (100%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index afa785b6c..c7c6e33b2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -123,6 +123,9 @@ if(ENABLE_SWIFT) CFLAGS -fblocks -fmodule-map-file=${PROJECT_SOURCE_DIR}/dispatch/module.modulemap + $<$:-D_MT> + # TODO(compnerd) handle /MT builds + $<$:-D_DLL> DEPENDS module-maps DispatchStubs @@ -133,6 +136,8 @@ if(ENABLE_SWIFT) -lBlocksRuntime -L $ -ldispatch + $<$,$>:-lmsvcrtd> + $<$,$>>:-lmsvcrt> MODULE_NAME Dispatch MODULE_LINK_NAME diff --git a/src/unifdef.NQ34TI b/src/unifdef.4q2keD similarity index 100% rename from src/unifdef.NQ34TI rename to src/unifdef.4q2keD From 9687520d40a3f300a630b1d07c1326c7c1d5a1b2 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 10 May 2019 16:10:47 -0700 Subject: [PATCH 153/249] Merge pull request #480 from compnerd/one-is-better-than-many build: ensure that we link against the correct library Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.4q2keD => unifdef.AoNGe6} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.4q2keD => unifdef.AoNGe6} (100%) diff --git a/PATCHES b/PATCHES index d247af87a..9e13df942 100644 --- a/PATCHES +++ b/PATCHES @@ -496,3 +496,4 @@ github commits starting with 29bdc2f from [6bf6cb1] APPLIED rdar://54572081 [aa13cad] APPLIED rdar://54572081 [b073d89] APPLIED rdar://54572081 +[7784917] APPLIED rdar://54572081 diff --git a/src/unifdef.4q2keD b/src/unifdef.AoNGe6 similarity index 100% rename from src/unifdef.4q2keD rename to src/unifdef.AoNGe6 From e91341dcc5a89ad267e586fdd43cce9558294c88 Mon Sep 17 00:00:00 2001 From: Mishal Shah Date: Thu, 16 May 2019 12:40:35 -0700 Subject: [PATCH 154/249] Merge pull request #484 from apple/disable-group-starfish dispatch_group and dispatch_starfish are failing sporadically in CI Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.AoNGe6 => unifdef.NH19fL} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.AoNGe6 => unifdef.NH19fL} (100%) diff --git a/PATCHES b/PATCHES index 9e13df942..cad66db87 100644 --- a/PATCHES +++ b/PATCHES @@ -497,3 +497,4 @@ github commits starting with 29bdc2f from [aa13cad] APPLIED rdar://54572081 [b073d89] APPLIED rdar://54572081 [7784917] APPLIED rdar://54572081 +[717b3f7] APPLIED rdar://54572081 diff --git a/src/unifdef.AoNGe6 b/src/unifdef.NH19fL similarity index 100% rename from src/unifdef.AoNGe6 rename to src/unifdef.NH19fL From dd47f712a8dda5b17fe4208ecbbb95ebb1fd801c Mon Sep 17 00:00:00 2001 From: Mishal Shah Date: Thu, 16 May 2019 13:10:29 -0700 Subject: [PATCH 155/249] Merge pull request #485 from apple/disable-group-followup Don't set target_link_libraries on disabled group test Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.NH19fL => unifdef.0712ED} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.NH19fL => unifdef.0712ED} (100%) diff --git a/PATCHES b/PATCHES index cad66db87..31fbb75ab 100644 --- a/PATCHES +++ b/PATCHES @@ -498,3 +498,4 @@ github commits starting with 29bdc2f from [b073d89] APPLIED rdar://54572081 [7784917] APPLIED rdar://54572081 [717b3f7] APPLIED rdar://54572081 +[37010f0] APPLIED rdar://54572081 diff --git a/src/unifdef.NH19fL b/src/unifdef.0712ED similarity index 100% rename from src/unifdef.NH19fL rename to src/unifdef.0712ED From a061b6bff0bbf154d76ea30166eb0a6318a8521c Mon Sep 17 00:00:00 2001 From: Simon Evans Date: Thu, 16 May 2019 16:41:16 +0100 Subject: [PATCH 156/249] When adding conditionally included compiler flags, dont use the CFLAGS section. - cmake/modules/SwiftSupport.cmake prepends each flag with '-Xcc' but if the flag evaluates to empty then an extra -Xcc is added to the compiler options. - Explicitly add the option with a conditional '-Xcc' before it as well. - This fixes the "warning: argument unused during compilation: '-Xcc'" seen in the error logs. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 8 +++++--- src/{unifdef.0712ED => unifdef.3KX9ZA} | 0 2 files changed, 5 insertions(+), 3 deletions(-) rename src/{unifdef.0712ED => unifdef.3KX9ZA} (100%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c7c6e33b2..14ed8be24 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -123,9 +123,6 @@ if(ENABLE_SWIFT) CFLAGS -fblocks -fmodule-map-file=${PROJECT_SOURCE_DIR}/dispatch/module.modulemap - $<$:-D_MT> - # TODO(compnerd) handle /MT builds - $<$:-D_DLL> DEPENDS module-maps DispatchStubs @@ -157,6 +154,11 @@ if(ENABLE_SWIFT) SWIFT_FLAGS -I ${PROJECT_SOURCE_DIR} ${swift_optimization_flags} + $<$:-Xcc> + $<$:-D_MT> + # TODO(compnerd) handle /MT builds + $<$:-Xcc> + $<$:-D_DLL> TARGET ${CMAKE_C_COMPILER_TARGET}) endif() diff --git a/src/unifdef.0712ED b/src/unifdef.3KX9ZA similarity index 100% rename from src/unifdef.0712ED rename to src/unifdef.3KX9ZA From f272fbd1db7c8cfd75fe0859406f5534866d52b2 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 16 May 2019 13:45:09 -0700 Subject: [PATCH 157/249] Merge pull request #483 from spevans/pr_cflags_fix When adding conditionally included compiler flags, dont use the CFLAGS section Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.3KX9ZA => unifdef.clufhJ} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.3KX9ZA => unifdef.clufhJ} (100%) diff --git a/PATCHES b/PATCHES index 31fbb75ab..99bf01fd7 100644 --- a/PATCHES +++ b/PATCHES @@ -499,3 +499,4 @@ github commits starting with 29bdc2f from [7784917] APPLIED rdar://54572081 [717b3f7] APPLIED rdar://54572081 [37010f0] APPLIED rdar://54572081 +[251dba4] APPLIED rdar://54572081 diff --git a/src/unifdef.3KX9ZA b/src/unifdef.clufhJ similarity index 100% rename from src/unifdef.3KX9ZA rename to src/unifdef.clufhJ From 6cdf7b97dfdf9c9d8971e0d13e8fb0f7bcf63549 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 11 May 2019 18:05:43 -0700 Subject: [PATCH 158/249] swift: add a set of Windows extensions This adds a new set of API overloads which provide an interface which use the HANDLE rather than file descriptor. This makes it easier to create the dispatch sources without having to convert the file handle to a file descriptor. Signed-off-by: Kim Topley --- src/swift/IO.swift | 19 +++++++++++++++++++ src/swift/Source.swift | 14 ++++++++++++++ src/{unifdef.clufhJ => unifdef.1vlKxG} | 0 3 files changed, 33 insertions(+) rename src/{unifdef.clufhJ => unifdef.1vlKxG} (100%) diff --git a/src/swift/IO.swift b/src/swift/IO.swift index 7b0bb81a9..ad985c944 100644 --- a/src/swift/IO.swift +++ b/src/swift/IO.swift @@ -11,6 +11,9 @@ //===----------------------------------------------------------------------===// import CDispatch +#if os(Windows) +import WinSDK +#endif extension DispatchIO { @@ -34,12 +37,28 @@ extension DispatchIO { public static let strictInterval = IntervalFlags(rawValue: 1) } +#if os(Windows) + public class func read(fromHandle: HANDLE, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData, _ error: Int32) -> Void) { + dispatch_read(dispatch_fd_t(bitPattern: fromHandle), maxLength, queue.__wrapped) { (data: dispatch_data_t, error: Int32) in + handler(DispatchData(borrowedData: data), error) + } + } +#endif + public class func read(fromFileDescriptor: Int32, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData, _ error: Int32) -> Void) { dispatch_read(dispatch_fd_t(fromFileDescriptor), maxLength, queue.__wrapped) { (data: dispatch_data_t, error: Int32) in handler(DispatchData(borrowedData: data), error) } } +#if os(Windows) + public class func write(toHandle: HANDLE, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: @escaping(_ data: DispatchData??, _ error: Int32) -> Void) { + dispatch_write(dispatch_fd_t(bitPattern: toHandle), data.__wrapped.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in + handler(data.map { DispatchData(borrowedData: $0) }, error) + } + } +#endif + public class func write(toFileDescriptor: Int32, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData?, _ error: Int32) -> Void) { dispatch_write(dispatch_fd_t(toFileDescriptor), data.__wrapped.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in handler(data.map { DispatchData(borrowedData: $0) }, error) diff --git a/src/swift/Source.swift b/src/swift/Source.swift index a38066c72..b4315c6cf 100644 --- a/src/swift/Source.swift +++ b/src/swift/Source.swift @@ -181,6 +181,13 @@ extension DispatchSource { } #endif +#if os(Windows) + public class func makeReadSource(handle: HANDLE, queue: DispatchQueue? = nil) -> DispatchSourceRead { + let source = dispatch_source_create(_swift_dispatch_source_type_READ(), UInt(bitPattern: handle), 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceRead + } +#endif + public class func makeReadSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceRead { #if os(Windows) let handle: UInt = UInt(_get_osfhandle(fileDescriptor)) @@ -224,6 +231,13 @@ extension DispatchSource { } #endif +#if os(Windows) + public class func makeWriteSource(handle: HANDLE, queue: DispatchQueue? = nil) -> DispatchSourceWrite { + let source = dispatch_source_create(_swift_dispatch_source_type_WRITE(), UInt(bitPattern: handle), 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceWrite + } +#endif + public class func makeWriteSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceWrite { #if os(Windows) let handle: UInt = UInt(_get_osfhandle(fileDescriptor)) diff --git a/src/unifdef.clufhJ b/src/unifdef.1vlKxG similarity index 100% rename from src/unifdef.clufhJ rename to src/unifdef.1vlKxG From 932cfbe7860957c9404ea87a0bf29c83714111b7 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 16 May 2019 13:47:02 -0700 Subject: [PATCH 159/249] Merge pull request #481 from compnerd/handle-extensions swift: add a set of Windows extensions Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.1vlKxG => unifdef.hYJWs9} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.1vlKxG => unifdef.hYJWs9} (100%) diff --git a/PATCHES b/PATCHES index 99bf01fd7..2d2636862 100644 --- a/PATCHES +++ b/PATCHES @@ -500,3 +500,4 @@ github commits starting with 29bdc2f from [717b3f7] APPLIED rdar://54572081 [37010f0] APPLIED rdar://54572081 [251dba4] APPLIED rdar://54572081 +[a18aa1f] APPLIED rdar://54572081 diff --git a/src/unifdef.1vlKxG b/src/unifdef.hYJWs9 similarity index 100% rename from src/unifdef.1vlKxG rename to src/unifdef.hYJWs9 From 88103d224187da1902ee69e83b5b33e6da232000 Mon Sep 17 00:00:00 2001 From: Simon Evans Date: Mon, 27 May 2019 19:36:39 +0100 Subject: [PATCH 160/249] Install libDispatchStubs.a if building static libraries. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 7 +++++++ src/{unifdef.hYJWs9 => unifdef.p8lSxi} | 0 2 files changed, 7 insertions(+) rename src/{unifdef.hYJWs9 => unifdef.p8lSxi} (100%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 14ed8be24..92691cd3b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -295,5 +295,12 @@ if(ENABLE_SWIFT) DESTINATION ${INSTALL_TARGET_DIR}) endif() + + if(NOT BUILD_SHARED_LIBS) + install(FILES + $ + DESTINATION + ${INSTALL_TARGET_DIR}) + endif() endif() diff --git a/src/unifdef.hYJWs9 b/src/unifdef.p8lSxi similarity index 100% rename from src/unifdef.hYJWs9 rename to src/unifdef.p8lSxi From 0f49effaaa1cea05a8a6cb52a742fae89d92c653 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 30 May 2019 08:48:00 -0700 Subject: [PATCH 161/249] Merge pull request #489 from spevans/pr_install_dispatch_stubs Install libDispatchStubs.a if building static libraries. Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.p8lSxi => unifdef.G5YquT} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.p8lSxi => unifdef.G5YquT} (100%) diff --git a/PATCHES b/PATCHES index 2d2636862..0e2950499 100644 --- a/PATCHES +++ b/PATCHES @@ -501,3 +501,4 @@ github commits starting with 29bdc2f from [37010f0] APPLIED rdar://54572081 [251dba4] APPLIED rdar://54572081 [a18aa1f] APPLIED rdar://54572081 +[e8d020e] APPLIED rdar://54572081 diff --git a/src/unifdef.p8lSxi b/src/unifdef.G5YquT similarity index 100% rename from src/unifdef.p8lSxi rename to src/unifdef.G5YquT From 97bf93ae9fd2d1c86373f1aa2c983bd4caf80a6c Mon Sep 17 00:00:00 2001 From: Vlad Gorloff Date: Sat, 1 Jun 2019 20:12:28 +0200 Subject: [PATCH 162/249] Fixes Cmake error "Unrecognized architecture on host system: armv7-a" Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 2 ++ src/{unifdef.G5YquT => unifdef.X9Mikf} | 0 2 files changed, 2 insertions(+) rename src/{unifdef.G5YquT => unifdef.X9Mikf} (100%) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index adcf42fbc..031627c9c 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -232,6 +232,8 @@ function(get_swift_host_arch result_var_name) set("${result_var_name}" "s390x" PARENT_SCOPE) elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "armv6l") set("${result_var_name}" "armv6" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "armv7-a") + set("${result_var_name}" "armv7" PARENT_SCOPE) elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "armv7l") set("${result_var_name}" "armv7" PARENT_SCOPE) elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "AMD64") diff --git a/src/unifdef.G5YquT b/src/unifdef.X9Mikf similarity index 100% rename from src/unifdef.G5YquT rename to src/unifdef.X9Mikf From e2f1980e21f45031c0f097efb800aec798430406 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 4 Jun 2019 13:14:11 -0700 Subject: [PATCH 163/249] Merge pull request #490 from vgorloff/macos-to-android-crosscompile Android CrossCompile on Mac: Fixes Cmake error "Unrecognized architecture on host system: armv7-a" Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.X9Mikf => unifdef.6TveoJ} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.X9Mikf => unifdef.6TveoJ} (100%) diff --git a/PATCHES b/PATCHES index 0e2950499..0da9ab7b9 100644 --- a/PATCHES +++ b/PATCHES @@ -502,3 +502,4 @@ github commits starting with 29bdc2f from [251dba4] APPLIED rdar://54572081 [a18aa1f] APPLIED rdar://54572081 [e8d020e] APPLIED rdar://54572081 +[90a84a1] APPLIED rdar://54572081 diff --git a/src/unifdef.X9Mikf b/src/unifdef.6TveoJ similarity index 100% rename from src/unifdef.X9Mikf rename to src/unifdef.6TveoJ From 0907ae12ad2868438f34faae4ddc95580c06d6f8 Mon Sep 17 00:00:00 2001 From: Marc Prud'hommeaux Date: Mon, 3 Jun 2019 08:41:47 -0400 Subject: [PATCH 164/249] Fix formatting in documentation for "deadline" parameter. The typo fix at https://github.com/apple/swift-corelibs-libdispatch/commit/a33e4fd965eadd815c4a1b8e8ab80a4cd702d053#diff-8a1894abc995edb93f386d821287e44f did not add the colon after the parameter name. Signed-off-by: Kim Topley --- src/swift/Queue.swift | 8 ++++---- src/{unifdef.6TveoJ => unifdef.SyG3qg} | 0 2 files changed, 4 insertions(+), 4 deletions(-) rename src/{unifdef.6TveoJ => unifdef.SyG3qg} (100%) diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift index 11b68dc7a..fe7406c42 100644 --- a/src/swift/Queue.swift +++ b/src/swift/Queue.swift @@ -371,7 +371,7 @@ extension DispatchQueue { /// Submits a work item to a dispatch queue for asynchronous execution after /// a specified time. /// - /// - parameter deadline the time after which the work item should be executed, + /// - parameter deadline: the time after which the work item should be executed, /// given as a `DispatchTime`. /// - parameter qos: the QoS at which the work item should be executed. /// Defaults to `DispatchQoS.unspecified`. @@ -402,7 +402,7 @@ extension DispatchQueue { /// Submits a work item to a dispatch queue for asynchronous execution after /// a specified time. /// - /// - parameter deadline the time after which the work item should be executed, + /// - parameter deadline: the time after which the work item should be executed, /// given as a `DispatchWallTime`. /// - parameter qos: the QoS at which the work item should be executed. /// Defaults to `DispatchQoS.unspecified`. @@ -433,7 +433,7 @@ extension DispatchQueue { /// Submits a work item to a dispatch queue for asynchronous execution after /// a specified time. /// - /// - parameter deadline the time after which the work item should be executed, + /// - parameter deadline: the time after which the work item should be executed, /// given as a `DispatchTime`. /// - parameter execute: The work item to be invoked on the queue. /// - SeeAlso: `asyncAfter(deadline:qos:flags:execute:)` @@ -448,7 +448,7 @@ extension DispatchQueue { /// Submits a work item to a dispatch queue for asynchronous execution after /// a specified time. /// - /// - parameter deadline the time after which the work item should be executed, + /// - parameter deadline: the time after which the work item should be executed, /// given as a `DispatchWallTime`. /// - parameter execute: The work item to be invoked on the queue. /// - SeeAlso: `asyncAfter(wallDeadline:qos:flags:execute:)` diff --git a/src/unifdef.6TveoJ b/src/unifdef.SyG3qg similarity index 100% rename from src/unifdef.6TveoJ rename to src/unifdef.SyG3qg From e27d0bfe797a8c15d62435aee93295fc5a597e4e Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 7 Jun 2019 09:03:42 -0700 Subject: [PATCH 165/249] Merge pull request #491 from marcprux/patch-1 Fix formatting in documentation for "deadline" parameter. Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.SyG3qg => unifdef.PM8day} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.SyG3qg => unifdef.PM8day} (100%) diff --git a/PATCHES b/PATCHES index 0da9ab7b9..26dad4f50 100644 --- a/PATCHES +++ b/PATCHES @@ -503,3 +503,4 @@ github commits starting with 29bdc2f from [a18aa1f] APPLIED rdar://54572081 [e8d020e] APPLIED rdar://54572081 [90a84a1] APPLIED rdar://54572081 +[7721660] APPLIED rdar://54572081 diff --git a/src/unifdef.SyG3qg b/src/unifdef.PM8day similarity index 100% rename from src/unifdef.SyG3qg rename to src/unifdef.PM8day From 899d468329ccae5124056d1b6d67d684a46083cd Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Tue, 11 Jun 2019 13:37:07 -0700 Subject: [PATCH 166/249] io: free Windows operation data with _aligned_free() `_dispatch_operation_perform()` uses `_aligned_malloc()` on Windows to allocate the data buffer. This means that we cannot use `DISPATCH_DATA_DESTRUCTOR_FREE` when we create the `dispatch_data_t`. Provide a custom destructor which correctly frees the buffer. Discovered while porting the dispatch_io test to Windows. Signed-off-by: Kim Topley --- src/io.c | 6 ++++++ src/{unifdef.PM8day => unifdef.0MgDkv} | 0 2 files changed, 6 insertions(+) rename src/{unifdef.PM8day => unifdef.0MgDkv} (100%) diff --git a/src/io.c b/src/io.c index 9c0fff83d..b0b3b9a9b 100644 --- a/src/io.c +++ b/src/io.c @@ -2478,8 +2478,14 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, if (op->direction == DOP_DIR_READ) { if (op->buf_len) { void *buf = op->buf; +#if defined(_WIN32) + // buf is allocated with _aligned_malloc() + data = dispatch_data_create(buf, op->buf_len, NULL, + ^{ _aligned_free(buf); }); +#else data = dispatch_data_create(buf, op->buf_len, NULL, DISPATCH_DATA_DESTRUCTOR_FREE); +#endif op->buf = NULL; op->buf_len = 0; dispatch_data_t d = dispatch_data_create_concat(op->data, data); diff --git a/src/unifdef.PM8day b/src/unifdef.0MgDkv similarity index 100% rename from src/unifdef.PM8day rename to src/unifdef.0MgDkv From e3fbeb389e0aed17fd28965ae4265e0a214fcfcb Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 11 Jun 2019 15:47:44 -0700 Subject: [PATCH 167/249] Merge pull request #493 from adierking/alignedfree io: free Windows operation data with _aligned_free() Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.0MgDkv => unifdef.IykzTb} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.0MgDkv => unifdef.IykzTb} (100%) diff --git a/PATCHES b/PATCHES index 26dad4f50..8e4fc5254 100644 --- a/PATCHES +++ b/PATCHES @@ -504,3 +504,4 @@ github commits starting with 29bdc2f from [e8d020e] APPLIED rdar://54572081 [90a84a1] APPLIED rdar://54572081 [7721660] APPLIED rdar://54572081 +[c5af10f] APPLIED rdar://54572081 diff --git a/src/unifdef.0MgDkv b/src/unifdef.IykzTb similarity index 100% rename from src/unifdef.0MgDkv rename to src/unifdef.IykzTb From 105e638e8ddea7f1a34a632dc31de9838468f6b7 Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Tue, 11 Jun 2019 20:38:10 -0700 Subject: [PATCH 168/249] shims: fix TAILQ_CONCAT() edge cases This needs to set the `te_next` of the last element in `head1` and also handle the case where `head1` is empty. This fixes a hang in the dispatch_context_for_key test on Windows because the the queue-specific list was getting corrupted in `_dispatch_queue_specific_head_dispose()`. Signed-off-by: Kim Topley --- src/shims/generic_sys_queue.h | 6 +++++- src/{unifdef.IykzTb => unifdef.3ykhSz} | 0 2 files changed, 5 insertions(+), 1 deletion(-) rename src/{unifdef.IykzTb => unifdef.3ykhSz} (100%) diff --git a/src/shims/generic_sys_queue.h b/src/shims/generic_sys_queue.h index b4b74aeef..b1edeb2f2 100644 --- a/src/shims/generic_sys_queue.h +++ b/src/shims/generic_sys_queue.h @@ -94,7 +94,11 @@ #define TAILQ_CONCAT(head1, head2, field) do { \ if (!TAILQ_EMPTY(head2)) { \ - (head1)->tq_last = (head2)->tq_first; \ + if ((head1)->tq_last) { \ + (head1)->tq_last->field.te_next = (head2)->tq_first; \ + } else { \ + (head1)->tq_first = (head2)->tq_first; \ + } \ (head2)->tq_first->field.te_prev = (head1)->tq_last; \ (head1)->tq_last = (head2)->tq_last; \ TAILQ_INIT((head2)); \ diff --git a/src/unifdef.IykzTb b/src/unifdef.3ykhSz similarity index 100% rename from src/unifdef.IykzTb rename to src/unifdef.3ykhSz From 815a1b79ff02357264cf343862dadaa528d13a2d Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 12 Jun 2019 07:09:01 -0700 Subject: [PATCH 169/249] Merge pull request #496 from adierking/tailq shims: fix TAILQ_CONCAT() edge cases Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.3ykhSz => unifdef.KI5eu4} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.3ykhSz => unifdef.KI5eu4} (100%) diff --git a/PATCHES b/PATCHES index 8e4fc5254..98353e9e2 100644 --- a/PATCHES +++ b/PATCHES @@ -505,3 +505,4 @@ github commits starting with 29bdc2f from [90a84a1] APPLIED rdar://54572081 [7721660] APPLIED rdar://54572081 [c5af10f] APPLIED rdar://54572081 +[f01432d] APPLIED rdar://54572081 diff --git a/src/unifdef.3ykhSz b/src/unifdef.KI5eu4 similarity index 100% rename from src/unifdef.3ykhSz rename to src/unifdef.KI5eu4 From d253b3c436026292fce9b69892c2c4c569de7c10 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 10 Jun 2019 19:39:55 -0700 Subject: [PATCH 170/249] Remove libbsd dependency for Linux Remove the libbsd dependency on Linux. This simplifies the build for Linux where sometimes we would get the wrong sys/queue.h instead. Signed-off-by: Kim Topley --- CMakeLists.txt | 9 --------- src/CMakeLists.txt | 8 -------- src/internal.h | 4 +++- src/shims.h | 2 +- src/{unifdef.KI5eu4 => unifdef.Dp13Lj} | 0 5 files changed, 4 insertions(+), 19 deletions(-) rename src/{unifdef.KI5eu4 => unifdef.Dp13Lj} (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 462be4e04..2e2ada1c7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -197,15 +197,6 @@ check_function_exists(strlcpy HAVE_STRLCPY) check_function_exists(sysconf HAVE_SYSCONF) check_function_exists(arc4random HAVE_ARC4RANDOM) -if(NOT HAVE_STRLCPY AND NOT HAVE_GETPROGNAME) - include(FindPkgConfig) - pkg_check_modules(BSD_OVERLAY libbsd-overlay) - if(BSD_OVERLAY_FOUND) - set(HAVE_STRLCPY 1 CACHE INTERNAL "Have function strlcpy" FORCE) - set(HAVE_GETPROGNAME 1 CACHE INTERNAL "Have function getprogname" FORCE) - endif() -endif() - find_package(Threads REQUIRED) check_include_files("TargetConditionals.h" HAVE_TARGETCONDITIONALS_H) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 92691cd3b..6d6d201c6 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -204,11 +204,6 @@ elseif(CMAKE_SYSTEM_NAME STREQUAL Android) PRIVATE -U_GNU_SOURCE) endif() -if(BSD_OVERLAY_FOUND) - target_compile_options(dispatch - PRIVATE - ${BSD_OVERLAY_CFLAGS}) -endif() if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") target_compile_options(dispatch PRIVATE @@ -230,9 +225,6 @@ else() -fblocks -momit-leaf-frame-pointer) endif() -if(BSD_OVERLAY_FOUND) - target_link_libraries(dispatch PRIVATE ${BSD_OVERLAY_LDFLAGS}) -endif() if(LibRT_FOUND) target_link_libraries(dispatch PRIVATE RT::rt) endif() diff --git a/src/internal.h b/src/internal.h index 973f32271..c50591bfd 100644 --- a/src/internal.h +++ b/src/internal.h @@ -282,8 +282,10 @@ upcast(dispatch_object_t dou) #include #else #include -#include #endif /* __ANDROID__ */ +#if !defined(__linux__) +#include +#endif #include #include #include diff --git a/src/shims.h b/src/shims.h index 9d18bec68..bce5d08f1 100644 --- a/src/shims.h +++ b/src/shims.h @@ -33,7 +33,7 @@ #include "shims/generic_win_stubs.h" #endif // defined(_WIN32) -#if defined(_WIN32) || defined(__ANDROID__) +#if defined(_WIN32) || defined(__linux__) #include "shims/generic_sys_queue.h" #endif diff --git a/src/unifdef.KI5eu4 b/src/unifdef.Dp13Lj similarity index 100% rename from src/unifdef.KI5eu4 rename to src/unifdef.Dp13Lj From 89f8898ff748f049810cbab48aeddeb05ef0a531 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 12 Jun 2019 13:48:00 -0700 Subject: [PATCH 171/249] Merge pull request #492 from compnerd/bsd-free-bsd Remove libbsd dependency for Linux Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.Dp13Lj => unifdef.ZWw065} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.Dp13Lj => unifdef.ZWw065} (100%) diff --git a/PATCHES b/PATCHES index 98353e9e2..edc650770 100644 --- a/PATCHES +++ b/PATCHES @@ -506,3 +506,4 @@ github commits starting with 29bdc2f from [7721660] APPLIED rdar://54572081 [c5af10f] APPLIED rdar://54572081 [f01432d] APPLIED rdar://54572081 +[d0394bf] APPLIED rdar://54572081 diff --git a/src/unifdef.Dp13Lj b/src/unifdef.ZWw065 similarity index 100% rename from src/unifdef.Dp13Lj rename to src/unifdef.ZWw065 From c684ab93e5c12fcf37b73e376eeb7fb74a4bed45 Mon Sep 17 00:00:00 2001 From: Ben Langmuir Date: Wed, 12 Jun 2019 15:41:45 -0700 Subject: [PATCH 172/249] Revert "Remove libbsd dependency for Linux" Signed-off-by: Kim Topley --- CMakeLists.txt | 9 +++++++++ src/CMakeLists.txt | 8 ++++++++ src/internal.h | 4 +--- src/shims.h | 2 +- src/{unifdef.ZWw065 => unifdef.KDFu5j} | 0 5 files changed, 19 insertions(+), 4 deletions(-) rename src/{unifdef.ZWw065 => unifdef.KDFu5j} (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2e2ada1c7..462be4e04 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -197,6 +197,15 @@ check_function_exists(strlcpy HAVE_STRLCPY) check_function_exists(sysconf HAVE_SYSCONF) check_function_exists(arc4random HAVE_ARC4RANDOM) +if(NOT HAVE_STRLCPY AND NOT HAVE_GETPROGNAME) + include(FindPkgConfig) + pkg_check_modules(BSD_OVERLAY libbsd-overlay) + if(BSD_OVERLAY_FOUND) + set(HAVE_STRLCPY 1 CACHE INTERNAL "Have function strlcpy" FORCE) + set(HAVE_GETPROGNAME 1 CACHE INTERNAL "Have function getprogname" FORCE) + endif() +endif() + find_package(Threads REQUIRED) check_include_files("TargetConditionals.h" HAVE_TARGETCONDITIONALS_H) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 6d6d201c6..92691cd3b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -204,6 +204,11 @@ elseif(CMAKE_SYSTEM_NAME STREQUAL Android) PRIVATE -U_GNU_SOURCE) endif() +if(BSD_OVERLAY_FOUND) + target_compile_options(dispatch + PRIVATE + ${BSD_OVERLAY_CFLAGS}) +endif() if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") target_compile_options(dispatch PRIVATE @@ -225,6 +230,9 @@ else() -fblocks -momit-leaf-frame-pointer) endif() +if(BSD_OVERLAY_FOUND) + target_link_libraries(dispatch PRIVATE ${BSD_OVERLAY_LDFLAGS}) +endif() if(LibRT_FOUND) target_link_libraries(dispatch PRIVATE RT::rt) endif() diff --git a/src/internal.h b/src/internal.h index c50591bfd..973f32271 100644 --- a/src/internal.h +++ b/src/internal.h @@ -282,10 +282,8 @@ upcast(dispatch_object_t dou) #include #else #include -#endif /* __ANDROID__ */ -#if !defined(__linux__) #include -#endif +#endif /* __ANDROID__ */ #include #include #include diff --git a/src/shims.h b/src/shims.h index bce5d08f1..9d18bec68 100644 --- a/src/shims.h +++ b/src/shims.h @@ -33,7 +33,7 @@ #include "shims/generic_win_stubs.h" #endif // defined(_WIN32) -#if defined(_WIN32) || defined(__linux__) +#if defined(_WIN32) || defined(__ANDROID__) #include "shims/generic_sys_queue.h" #endif diff --git a/src/unifdef.ZWw065 b/src/unifdef.KDFu5j similarity index 100% rename from src/unifdef.ZWw065 rename to src/unifdef.KDFu5j From 39f0cd4afacc7a7bb889c071f55e53eafc73f64f Mon Sep 17 00:00:00 2001 From: Mishal Shah Date: Wed, 12 Jun 2019 16:29:27 -0700 Subject: [PATCH 173/249] Merge pull request #497 from apple/revert-492-bsd-free-bsd Revert "Remove libbsd dependency for Linux" Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.KDFu5j => unifdef.9RZtHw} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.KDFu5j => unifdef.9RZtHw} (100%) diff --git a/PATCHES b/PATCHES index edc650770..edf53b9f3 100644 --- a/PATCHES +++ b/PATCHES @@ -507,3 +507,4 @@ github commits starting with 29bdc2f from [c5af10f] APPLIED rdar://54572081 [f01432d] APPLIED rdar://54572081 [d0394bf] APPLIED rdar://54572081 +[2b14a98] APPLIED rdar://54572081 diff --git a/src/unifdef.KDFu5j b/src/unifdef.9RZtHw similarity index 100% rename from src/unifdef.KDFu5j rename to src/unifdef.9RZtHw From 1b4c03fb98c3f02c5d9ba2e53d944d38c8911f50 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Wed, 12 Jun 2019 17:07:40 -0700 Subject: [PATCH 174/249] Revert "Revert "Remove libbsd dependency for Linux"" This reverts commit 4b87733f3e5e62a1c7a243d7e06cd35ae4b665b3. Signed-off-by: Kim Topley --- CMakeLists.txt | 9 --------- src/CMakeLists.txt | 8 -------- src/internal.h | 4 +++- src/shims.h | 2 +- src/{unifdef.9RZtHw => unifdef.BRH9nv} | 0 5 files changed, 4 insertions(+), 19 deletions(-) rename src/{unifdef.9RZtHw => unifdef.BRH9nv} (100%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 462be4e04..2e2ada1c7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -197,15 +197,6 @@ check_function_exists(strlcpy HAVE_STRLCPY) check_function_exists(sysconf HAVE_SYSCONF) check_function_exists(arc4random HAVE_ARC4RANDOM) -if(NOT HAVE_STRLCPY AND NOT HAVE_GETPROGNAME) - include(FindPkgConfig) - pkg_check_modules(BSD_OVERLAY libbsd-overlay) - if(BSD_OVERLAY_FOUND) - set(HAVE_STRLCPY 1 CACHE INTERNAL "Have function strlcpy" FORCE) - set(HAVE_GETPROGNAME 1 CACHE INTERNAL "Have function getprogname" FORCE) - endif() -endif() - find_package(Threads REQUIRED) check_include_files("TargetConditionals.h" HAVE_TARGETCONDITIONALS_H) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 92691cd3b..6d6d201c6 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -204,11 +204,6 @@ elseif(CMAKE_SYSTEM_NAME STREQUAL Android) PRIVATE -U_GNU_SOURCE) endif() -if(BSD_OVERLAY_FOUND) - target_compile_options(dispatch - PRIVATE - ${BSD_OVERLAY_CFLAGS}) -endif() if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") target_compile_options(dispatch PRIVATE @@ -230,9 +225,6 @@ else() -fblocks -momit-leaf-frame-pointer) endif() -if(BSD_OVERLAY_FOUND) - target_link_libraries(dispatch PRIVATE ${BSD_OVERLAY_LDFLAGS}) -endif() if(LibRT_FOUND) target_link_libraries(dispatch PRIVATE RT::rt) endif() diff --git a/src/internal.h b/src/internal.h index 973f32271..c50591bfd 100644 --- a/src/internal.h +++ b/src/internal.h @@ -282,8 +282,10 @@ upcast(dispatch_object_t dou) #include #else #include -#include #endif /* __ANDROID__ */ +#if !defined(__linux__) +#include +#endif #include #include #include diff --git a/src/shims.h b/src/shims.h index 9d18bec68..bce5d08f1 100644 --- a/src/shims.h +++ b/src/shims.h @@ -33,7 +33,7 @@ #include "shims/generic_win_stubs.h" #endif // defined(_WIN32) -#if defined(_WIN32) || defined(__ANDROID__) +#if defined(_WIN32) || defined(__linux__) #include "shims/generic_sys_queue.h" #endif diff --git a/src/unifdef.9RZtHw b/src/unifdef.BRH9nv similarity index 100% rename from src/unifdef.9RZtHw rename to src/unifdef.BRH9nv From d9be150842219c759dcabdd19db8bab4d67e811b Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 13 Jun 2019 07:15:42 -0700 Subject: [PATCH 175/249] Merge pull request #499 from compnerd/marsha-marsha-marsha Revert "Revert "Remove libbsd dependency for Linux"" Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.BRH9nv => unifdef.OwoD5A} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.BRH9nv => unifdef.OwoD5A} (100%) diff --git a/PATCHES b/PATCHES index edf53b9f3..3f362d620 100644 --- a/PATCHES +++ b/PATCHES @@ -508,3 +508,4 @@ github commits starting with 29bdc2f from [f01432d] APPLIED rdar://54572081 [d0394bf] APPLIED rdar://54572081 [2b14a98] APPLIED rdar://54572081 +[d32596b] APPLIED rdar://54572081 diff --git a/src/unifdef.BRH9nv b/src/unifdef.OwoD5A similarity index 100% rename from src/unifdef.BRH9nv rename to src/unifdef.OwoD5A From 62e28148f67dd0c5b1b5cc566f3a675f7c5c7af8 Mon Sep 17 00:00:00 2001 From: Kim Topley Date: Fri, 23 Aug 2019 10:30:44 -0700 Subject: [PATCH 176/249] Put back generic_win_stubs.c --- src/shims/generic_win_stubs.c | 24 ++++++++++++++++++++++++ src/{unifdef.OwoD5A => unifdef.yjQ8tQ} | 0 2 files changed, 24 insertions(+) create mode 100644 src/shims/generic_win_stubs.c rename src/{unifdef.OwoD5A => unifdef.yjQ8tQ} (100%) diff --git a/src/shims/generic_win_stubs.c b/src/shims/generic_win_stubs.c new file mode 100644 index 000000000..67b6f5134 --- /dev/null +++ b/src/shims/generic_win_stubs.c @@ -0,0 +1,24 @@ +#include "internal.h" + +/* + * This file contains stubbed out functions we are using during + * the initial Windows port. When the port is complete, this file + * should be empty (and thus removed). + */ + +void +_dispatch_runloop_queue_dispose(dispatch_queue_t dq DISPATCH_UNUSED, + bool *allow_free DISPATCH_UNUSED) +{ + WIN_PORT_ERROR(); +} + +void +_dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq DISPATCH_UNUSED) +{ + WIN_PORT_ERROR(); +} + +/* + * Stubbed out static data + */ diff --git a/src/unifdef.OwoD5A b/src/unifdef.yjQ8tQ similarity index 100% rename from src/unifdef.OwoD5A rename to src/unifdef.yjQ8tQ From 1837177b9962938a2229f2d0e1138d8d409a16f7 Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Wed, 12 Jun 2019 15:36:03 -0700 Subject: [PATCH 177/249] shims: use the precise Windows time functions `_dispatch_uptime()` and `_dispatch_monotonic_time()` are currently using interrupt time APIs which return values cached by the scheduler. This is causing the dispatch_after test to fail on Windows. Switch them to the "Precise" APIs which incorporate a hardware time reading into their results. Unfortunately, these functions are not exported by any import libraries and we have to locate them at runtime. Also, `_dispatch_monotonic_time()` should not be using the unbiased version of the API because it is expected to include suspend time. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 1 + src/shims/generic_win_stubs.c | 44 ++++++++++++++++++-------- src/shims/generic_win_stubs.h | 6 ++++ src/shims/time.h | 6 ++-- src/{unifdef.yjQ8tQ => unifdef.aL9kWc} | 0 5 files changed, 39 insertions(+), 18 deletions(-) rename src/{unifdef.yjQ8tQ => unifdef.aL9kWc} (100%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 6d6d201c6..6bdff36a9 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -63,6 +63,7 @@ if(WIN32) target_sources(dispatch PRIVATE shims/generic_sys_queue.h + shims/generic_win_stubs.c shims/generic_win_stubs.h shims/getprogname.c) endif() diff --git a/src/shims/generic_win_stubs.c b/src/shims/generic_win_stubs.c index 67b6f5134..c48eef66a 100644 --- a/src/shims/generic_win_stubs.c +++ b/src/shims/generic_win_stubs.c @@ -1,24 +1,40 @@ #include "internal.h" -/* - * This file contains stubbed out functions we are using during - * the initial Windows port. When the port is complete, this file - * should be empty (and thus removed). - */ +typedef void (WINAPI *_precise_time_fn_t)(PULONGLONG); -void -_dispatch_runloop_queue_dispose(dispatch_queue_t dq DISPATCH_UNUSED, - bool *allow_free DISPATCH_UNUSED) +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_precise_time_pred); +DISPATCH_STATIC_GLOBAL(_precise_time_fn_t _dispatch_QueryInterruptTimePrecise_ptr); +DISPATCH_STATIC_GLOBAL(_precise_time_fn_t _dispatch_QueryUnbiasedInterruptTimePrecise_ptr); + +static void +_dispatch_init_precise_time(void *context DISPATCH_UNUSED) { - WIN_PORT_ERROR(); + HMODULE kernelbase = LoadLibraryW(L"KernelBase.dll"); + if (!kernelbase) { + DISPATCH_INTERNAL_CRASH(0, "failed to load KernelBase.dll"); + } + _dispatch_QueryInterruptTimePrecise_ptr = (_precise_time_fn_t) + GetProcAddress(kernelbase, "QueryInterruptTimePrecise"); + _dispatch_QueryUnbiasedInterruptTimePrecise_ptr = (_precise_time_fn_t) + GetProcAddress(kernelbase, "QueryUnbiasedInterruptTimePrecise"); + if (!_dispatch_QueryInterruptTimePrecise_ptr) { + DISPATCH_INTERNAL_CRASH(0, "could not locate QueryInterruptTimePrecise"); + } + if (!_dispatch_QueryUnbiasedInterruptTimePrecise_ptr) { + DISPATCH_INTERNAL_CRASH(0, "could not locate QueryUnbiasedInterruptTimePrecise"); + } } void -_dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq DISPATCH_UNUSED) +_dispatch_QueryInterruptTimePrecise(PULONGLONG lpInterruptTimePrecise) { - WIN_PORT_ERROR(); + dispatch_once_f(&_dispatch_precise_time_pred, NULL, _dispatch_init_precise_time); + return _dispatch_QueryInterruptTimePrecise_ptr(lpInterruptTimePrecise); } -/* - * Stubbed out static data - */ +void +_dispatch_QueryUnbiasedInterruptTimePrecise(PULONGLONG lpUnbiasedInterruptTimePrecise) +{ + dispatch_once_f(&_dispatch_precise_time_pred, NULL, _dispatch_init_precise_time); + return _dispatch_QueryUnbiasedInterruptTimePrecise_ptr(lpUnbiasedInterruptTimePrecise); +} diff --git a/src/shims/generic_win_stubs.h b/src/shims/generic_win_stubs.h index 1ce41f7ec..10b14beeb 100644 --- a/src/shims/generic_win_stubs.h +++ b/src/shims/generic_win_stubs.h @@ -36,4 +36,10 @@ typedef __typeof__(_Generic((__SIZE_TYPE__)0, \ #define strcasecmp _stricmp +/* + * Wrappers for dynamically loaded Windows APIs + */ +void _dispatch_QueryInterruptTimePrecise(PULONGLONG lpInterruptTimePrecise); +void _dispatch_QueryUnbiasedInterruptTimePrecise(PULONGLONG lpUnbiasedInterruptTimePrecise); + #endif diff --git a/src/shims/time.h b/src/shims/time.h index 8fae5a2f2..b57731c9a 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -151,7 +151,7 @@ _dispatch_uptime(void) return _dispatch_timespec_to_nano(ts); #elif defined(_WIN32) ULONGLONG ullUnbiasedTime; - QueryUnbiasedInterruptTime(&ullUnbiasedTime); + _dispatch_QueryUnbiasedInterruptTimePrecise(&ullUnbiasedTime); return ullUnbiasedTime * 100; #else #error platform needs to implement _dispatch_uptime() @@ -173,9 +173,7 @@ _dispatch_monotonic_time(void) return _dispatch_timespec_to_nano(ts); #elif defined(_WIN32) ULONGLONG ullTime; - if (!QueryUnbiasedInterruptTime(&ullTime)) - return 0; - + _dispatch_QueryInterruptTimePrecise(&ullTime); return ullTime * 100ull; #else #error platform needs to implement _dispatch_monotonic_time() diff --git a/src/unifdef.yjQ8tQ b/src/unifdef.aL9kWc similarity index 100% rename from src/unifdef.yjQ8tQ rename to src/unifdef.aL9kWc From 82d490be18e2c2352a07b4add0baaf737e548bc8 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 13 Jun 2019 09:00:02 -0700 Subject: [PATCH 178/249] Merge pull request #498 from adierking/precise-time shims: use the precise Windows time functions Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.aL9kWc => unifdef.07DULo} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.aL9kWc => unifdef.07DULo} (100%) diff --git a/PATCHES b/PATCHES index 3f362d620..cee54933c 100644 --- a/PATCHES +++ b/PATCHES @@ -509,3 +509,4 @@ github commits starting with 29bdc2f from [d0394bf] APPLIED rdar://54572081 [2b14a98] APPLIED rdar://54572081 [d32596b] APPLIED rdar://54572081 +[52bc6b2] APPLIED rdar://54572081 diff --git a/src/unifdef.aL9kWc b/src/unifdef.07DULo similarity index 100% rename from src/unifdef.aL9kWc rename to src/unifdef.07DULo From 70374dbab9156ef11ff46f03b7f4073004f69faa Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 14 Jun 2019 22:06:39 -0700 Subject: [PATCH 179/249] build: allow passing Swift specific link flags This mirrors the `CMAKE_SWIFT_FLAGS` allowing control over the flags passed to the linker. This is needed until the libraries are built with a newer CMake which supports Swift properly. This enables cross-compiling libdispatch for Android on Windows. Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 2 +- src/{unifdef.07DULo => unifdef.fwAj06} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/{unifdef.07DULo => unifdef.fwAj06} (100%) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index 031627c9c..ead1958b8 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -9,7 +9,7 @@ function(add_swift_target target) cmake_parse_arguments(AST "${options}" "${single_value_options}" "${multiple_value_options}" ${ARGN}) set(compile_flags ${CMAKE_SWIFT_FLAGS}) - set(link_flags) + set(link_flags ${CMAKE_SWIFT_LINK_FLAGS}) if(AST_TARGET) list(APPEND compile_flags -target;${AST_TARGET}) diff --git a/src/unifdef.07DULo b/src/unifdef.fwAj06 similarity index 100% rename from src/unifdef.07DULo rename to src/unifdef.fwAj06 From ee500daf0ce8ae95c79e956572177441f0c5119f Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 14 Jun 2019 22:12:54 -0700 Subject: [PATCH 180/249] build: spell the linker suffix properly on Windows Windows requires that the .exe suffix be present to use the tools from the Android NDK. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 12 ++++++++++-- src/{unifdef.fwAj06 => unifdef.3Sycbk} | 0 2 files changed, 10 insertions(+), 2 deletions(-) rename src/{unifdef.fwAj06 => unifdef.3Sycbk} (100%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 6bdff36a9..eea7fa1fe 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -115,9 +115,17 @@ if(ENABLE_SWIFT) POSITION_INDEPENDENT_CODE YES) if(USE_LLD_LINKER) - set(use_ld_flag -use-ld=lld) + if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) + set(use_ld_flag -use-ld=lld.exe) + else() + set(use_ld_flag -use-ld=lld) + endif() elseif(USE_GOLD_LINKER) - set(use_ld_flag -use-ld=gold) + if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) + set(use_ld_flag -use-ld=gold.exe) + else() + set(use_ld_flag -use-ld=gold) + endif() endif() add_swift_library(swiftDispatch diff --git a/src/unifdef.fwAj06 b/src/unifdef.3Sycbk similarity index 100% rename from src/unifdef.fwAj06 rename to src/unifdef.3Sycbk From c57b823deaaf626e25e03f5d87d3e55c694b83c8 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 15 Jun 2019 09:31:54 -0700 Subject: [PATCH 181/249] Merge pull request #500 from compnerd/windows-android Windows Android cross-compilation build support Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.3Sycbk => unifdef.TJ0fuH} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.3Sycbk => unifdef.TJ0fuH} (100%) diff --git a/PATCHES b/PATCHES index cee54933c..a455aad97 100644 --- a/PATCHES +++ b/PATCHES @@ -510,3 +510,4 @@ github commits starting with 29bdc2f from [2b14a98] APPLIED rdar://54572081 [d32596b] APPLIED rdar://54572081 [52bc6b2] APPLIED rdar://54572081 +[4169c8d] APPLIED rdar://54572081 diff --git a/src/unifdef.3Sycbk b/src/unifdef.TJ0fuH similarity index 100% rename from src/unifdef.3Sycbk rename to src/unifdef.TJ0fuH From 97b5c25bac67775423f1ca53761275d20e6096c6 Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Tue, 11 Jun 2019 15:14:46 -0700 Subject: [PATCH 182/249] io: fix dispatch_io_create_with_path() on Windows This function assumes POSIX-style paths. Add Windows-specific checks for absolute paths and separators. Additionally, `_dispatch_fd_entry_guarded_open()` does not handle the open flags correctly. `O_RDONLY` is 0, so we need to use a switch statement to check for it. `O_EXCL` is not handled at all. `O_TRUNC`'s behavior needs to change depending on whether `O_CREAT` is specified. Finally, we must open files with maximum sharing to match the semantics of other platforms. (The dispatch_io test already has a handle open when it calls `dispatch_io_create_with_path()`.) Signed-off-by: Kim Topley --- src/CMakeLists.txt | 1 + src/io.c | 65 ++++++++++++++++++++------ src/shims/generic_win_stubs.h | 1 + src/{unifdef.TJ0fuH => unifdef.ABj5jI} | 0 4 files changed, 53 insertions(+), 14 deletions(-) rename src/{unifdef.TJ0fuH => unifdef.ABj5jI} (100%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index eea7fa1fe..843d1e4c7 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -244,6 +244,7 @@ target_link_libraries(dispatch if(CMAKE_SYSTEM_NAME STREQUAL Windows) target_link_libraries(dispatch PRIVATE + ShLwApi WS2_32 WinMM synchronization) diff --git a/src/io.c b/src/io.c index b0b3b9a9b..b55e8a477 100644 --- a/src/io.c +++ b/src/io.c @@ -401,15 +401,29 @@ dispatch_io_create_f(dispatch_io_type_t type, dispatch_fd_t fd, ^(int error){ cleanup_handler(context, error); }); } +#if defined(_WIN32) +#define _is_separator(ch) ((ch) == '/' || (ch) == '\\') +#else +#define _is_separator(ch) ((ch) == '/') +#endif + dispatch_io_t dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, int oflag, mode_t mode, dispatch_queue_t queue, void (^cleanup_handler)(int error)) { - if ((type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) || - !(*path == '/')) { + if (type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) { + return DISPATCH_BAD_INPUT; + } +#if defined(_WIN32) + if (PathIsRelativeA(path)) { + return DISPATCH_BAD_INPUT; + } +#else + if (!_is_separator(*path)) { return DISPATCH_BAD_INPUT; } +#endif size_t pathlen = strlen(path); dispatch_io_path_data_t path_data = malloc(sizeof(*path_data) + pathlen+1); if (!path_data) { @@ -444,9 +458,15 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, break; default: if ((path_data->oflag & O_CREAT) && - (*(path_data->path + path_data->pathlen - 1) != '/')) { + !_is_separator(*(path_data->path + path_data->pathlen - 1))) { // Check parent directory - char *c = strrchr(path_data->path, '/'); + char *c = NULL; + for (ssize_t i = (ssize_t)path_data->pathlen - 1; i >= 0; i--) { + if (_is_separator(path_data->path[i])) { + c = &path_data->path[i]; + break; + } + } dispatch_assert(c); *c = 0; int perr; @@ -460,7 +480,11 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, err = 0; break; ); +#if defined(_WIN32) + *c = '\\'; +#else *c = '/'; +#endif } break; ); @@ -1282,18 +1306,31 @@ _dispatch_fd_entry_guarded_open(dispatch_fd_entry_t fd_entry, const char *path, #if defined(_WIN32) (void)mode; DWORD dwDesiredAccess = 0; - if (oflag & _O_RDWR) - dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; - else if (oflag & _O_RDONLY) - dwDesiredAccess = GENERIC_READ; - else if (oflag & _O_WRONLY) - dwDesiredAccess = GENERIC_WRITE; + switch (oflag & (_O_RDONLY | _O_WRONLY | _O_RDWR)) { + case _O_RDONLY: + dwDesiredAccess = GENERIC_READ; + break; + case _O_WRONLY: + dwDesiredAccess = GENERIC_WRITE; + break; + case _O_RDWR: + dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; + break; + } DWORD dwCreationDisposition = OPEN_EXISTING; - if (oflag & _O_CREAT) + if (oflag & _O_CREAT) { dwCreationDisposition = OPEN_ALWAYS; - if (oflag & _O_TRUNC) - dwCreationDisposition = CREATE_ALWAYS; - return (dispatch_fd_t)CreateFile(path, dwDesiredAccess, 0, NULL, dwCreationDisposition, 0, NULL); + if (oflag & _O_EXCL) { + dwCreationDisposition = CREATE_NEW; + } else if (oflag & _O_TRUNC) { + dwCreationDisposition = CREATE_ALWAYS; + } + } else if (oflag & _O_TRUNC) { + dwCreationDisposition = TRUNCATE_EXISTING; + } + return (dispatch_fd_t)CreateFile(path, dwDesiredAccess, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, + dwCreationDisposition, 0, NULL); #else return open(path, oflag, mode); #endif diff --git a/src/shims/generic_win_stubs.h b/src/shims/generic_win_stubs.h index 10b14beeb..7d38adb29 100644 --- a/src/shims/generic_win_stubs.h +++ b/src/shims/generic_win_stubs.h @@ -6,6 +6,7 @@ #include #include +#include #include #include diff --git a/src/unifdef.TJ0fuH b/src/unifdef.ABj5jI similarity index 100% rename from src/unifdef.TJ0fuH rename to src/unifdef.ABj5jI From 68724f51f1c90656310a4f9c6367fdaaf85797a3 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 26 Jun 2019 15:07:07 -0700 Subject: [PATCH 183/249] Merge pull request #494 from adierking/iopath io: fix dispatch_io_create_with_path() on Windows Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.ABj5jI => unifdef.TyHCfh} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.ABj5jI => unifdef.TyHCfh} (100%) diff --git a/PATCHES b/PATCHES index a455aad97..57748a0b4 100644 --- a/PATCHES +++ b/PATCHES @@ -511,3 +511,4 @@ github commits starting with 29bdc2f from [d32596b] APPLIED rdar://54572081 [52bc6b2] APPLIED rdar://54572081 [4169c8d] APPLIED rdar://54572081 +[318f6e5] APPLIED rdar://54572081 diff --git a/src/unifdef.ABj5jI b/src/unifdef.TyHCfh similarity index 100% rename from src/unifdef.ABj5jI rename to src/unifdef.TyHCfh From 129e9432d406dff15141407c871cd1ace663c970 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 27 Jun 2019 06:49:45 -0700 Subject: [PATCH 184/249] Merge pull request #495 from adierking/test-all-the-things tests: port dispatch_io and dispatch_io_pipe_close to Windows Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.TyHCfh => unifdef.L4FT8G} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.TyHCfh => unifdef.L4FT8G} (100%) diff --git a/PATCHES b/PATCHES index 57748a0b4..2222b2c59 100644 --- a/PATCHES +++ b/PATCHES @@ -512,3 +512,4 @@ github commits starting with 29bdc2f from [52bc6b2] APPLIED rdar://54572081 [4169c8d] APPLIED rdar://54572081 [318f6e5] APPLIED rdar://54572081 +[6a36af8] APPLIED rdar://54572081 diff --git a/src/unifdef.TyHCfh b/src/unifdef.L4FT8G similarity index 100% rename from src/unifdef.TyHCfh rename to src/unifdef.L4FT8G From 97cf07a7bb0d00d32160261d89509b2ef2acb549 Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Tue, 18 Jun 2019 15:27:45 -0700 Subject: [PATCH 185/249] event: support pipe sources on Windows This implements full support for using pipe sources on Windows to trigger an event when a pipe becomes readable or writable. Due to differences between the Windows asynchronous I/O model and the Unix model, this is rather complex. On Windows, the standard mechanism to achieve asynchronous pipe I/O is to just read or write from the pipe and then get notified once the operation completes. Unlike the Unix `select()`/`poll()` model, there is no way to simply know when a pipe becomes readable or writable without actually running an operation. So we need to resort to several tricks in order to achieve the semantics that Dispatch wants here. To monitor a pipe for readability, we take advantage of the fact that a zero-byte `ReadFile()` on a pipe will block until data becomes available in the pipe. A muxnote which monitors a pipe for reading will spin up a lightweight thread which repeatedly calls `ReadFile()` (blocking) on the pipe and posts back to the I/O completion queue when it returns. To monitor pipes for writability, we use the `NtQueryInformationFile()` kernel API to get the amount of available space in the pipe's write buffer. There is no way to block here, so we have no choice but to continually disarm and rearm unotes until space becomes available. This is inefficient, but it generally seems to work OK. In order to test this, I implemented a new dispatch_io_pipe test which performs various read and write operations on pipes. On Windows, this will run the tests on most of the different pipe kinds (anonymous, named, inbound, outbound, overlapped). This caught a lot of issues in the Windows `_dispatch_operation_perform()` which I fixed along the way. The dispatch_io and dispatch_io_pipe_close tests pass as well with my other pull request applied. Signed-off-by: Kim Topley --- src/event/event_windows.c | 356 +++++++++++++++++++++++-- src/io.c | 90 ++++++- src/shims/generic_win_stubs.c | 31 +++ src/shims/generic_win_stubs.h | 24 ++ src/{unifdef.L4FT8G => unifdef.ayHOEu} | 0 5 files changed, 470 insertions(+), 31 deletions(-) rename src/{unifdef.L4FT8G => unifdef.ayHOEu} (100%) diff --git a/src/event/event_windows.c b/src/event/event_windows.c index 33a8bad73..23e1eb78c 100644 --- a/src/event/event_windows.c +++ b/src/event/event_windows.c @@ -28,18 +28,44 @@ enum _dispatch_windows_port { DISPATCH_PORT_TIMER_CLOCK_UPTIME, DISPATCH_PORT_TIMER_CLOCK_MONOTONIC, DISPATCH_PORT_FILE_HANDLE, + DISPATCH_PORT_PIPE_HANDLE_READ, + DISPATCH_PORT_PIPE_HANDLE_WRITE, +}; + +enum _dispatch_muxnote_events { + DISPATCH_MUXNOTE_EVENT_READ = 1 << 0, + DISPATCH_MUXNOTE_EVENT_WRITE = 1 << 1, }; #pragma mark dispatch_unote_t typedef struct dispatch_muxnote_s { LIST_ENTRY(dispatch_muxnote_s) dmn_list; + LIST_HEAD(, dispatch_unote_linkage_s) dmn_readers_head; + LIST_HEAD(, dispatch_unote_linkage_s) dmn_writers_head; + + // This refcount solves a race condition that can happen with I/O completion + // ports. When we enqueue packets with muxnote pointers associated with + // them, it's possible that those packets might not be processed until after + // the event has been unregistered. We increment this upon creating a + // muxnote or posting to a completion port, and we decrement it upon + // unregistering the event or processing a packet. When it hits zero, we + // dispose the muxnote. + os_atomic(uintptr_t) dmn_refcount; + dispatch_unote_ident_t dmn_ident; int8_t dmn_filter; enum _dispatch_muxnote_handle_type { DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID, DISPATCH_MUXNOTE_HANDLE_TYPE_FILE, + DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE, } dmn_handle_type; + enum _dispatch_muxnote_events dmn_events; + + // Used by the pipe monitoring thread + HANDLE dmn_thread; + HANDLE dmn_event; + os_atomic(bool) dmn_stop; } *dispatch_muxnote_t; static LIST_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s) @@ -71,7 +97,8 @@ _dispatch_unote_muxnote_find(struct dispatch_muxnote_bucket_s *dmb, } static dispatch_muxnote_t -_dispatch_muxnote_create(dispatch_unote_t du) +_dispatch_muxnote_create(dispatch_unote_t du, + enum _dispatch_muxnote_events events) { dispatch_muxnote_t dmn; int8_t filter = du._du->du_filter; @@ -81,12 +108,18 @@ _dispatch_muxnote_create(dispatch_unote_t du) if (dmn == NULL) { DISPATCH_INTERNAL_CRASH(0, "_dispatch_calloc"); } + os_atomic_store(&dmn->dmn_refcount, 1, relaxed); dmn->dmn_ident = (dispatch_unote_ident_t)handle; dmn->dmn_filter = filter; + dmn->dmn_events = events; + LIST_INIT(&dmn->dmn_readers_head); + LIST_INIT(&dmn->dmn_writers_head); switch (filter) { case EVFILT_SIGNAL: WIN_PORT_ERROR(); + free(dmn); + return NULL; case EVFILT_WRITE: case EVFILT_READ: @@ -103,17 +136,28 @@ _dispatch_muxnote_create(dispatch_unote_t du) // The specified file is a character file, typically a // LPT device or a console. WIN_PORT_ERROR(); + free(dmn); + return NULL; case FILE_TYPE_DISK: // The specified file is a disk file - dmn->dmn_handle_type = - DISPATCH_MUXNOTE_HANDLE_TYPE_FILE; + dmn->dmn_handle_type = DISPATCH_MUXNOTE_HANDLE_TYPE_FILE; break; case FILE_TYPE_PIPE: // The specified file is a socket, a named pipe, or an - // anonymous pipe. - WIN_PORT_ERROR(); + // anonymous pipe. Use GetNamedPipeInfo() to distinguish between + // a pipe and a socket. Despite its name, it also succeeds for + // anonymous pipes. + if (!GetNamedPipeInfo(handle, NULL, NULL, NULL, NULL)) { + // We'll get ERROR_ACCESS_DENIED for outbound pipes. + if (GetLastError() != ERROR_ACCESS_DENIED) { + // The file is probably a socket. + WIN_PORT_ERROR(); + } + } + dmn->dmn_handle_type = DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE; + break; } break; @@ -126,13 +170,136 @@ _dispatch_muxnote_create(dispatch_unote_t du) return dmn; } +static void +_dispatch_muxnote_stop(dispatch_muxnote_t dmn) +{ + if (dmn->dmn_thread) { + // Keep trying to cancel ReadFile() until the thread exits + os_atomic_store(&dmn->dmn_stop, true, relaxed); + SetEvent(dmn->dmn_event); + do { + CancelIoEx((HANDLE)dmn->dmn_ident, /* lpOverlapped */ NULL); + } while (WaitForSingleObject(dmn->dmn_thread, 1) == WAIT_TIMEOUT); + CloseHandle(dmn->dmn_thread); + dmn->dmn_thread = NULL; + } + if (dmn->dmn_event) { + CloseHandle(dmn->dmn_event); + dmn->dmn_event = NULL; + } +} + static void _dispatch_muxnote_dispose(dispatch_muxnote_t dmn) { + if (dmn->dmn_thread) { + DISPATCH_INTERNAL_CRASH(0, "disposed a muxnote with an active thread"); + } free(dmn); } -DISPATCH_ALWAYS_INLINE +static void +_dispatch_muxnote_retain(dispatch_muxnote_t dmn) +{ + uintptr_t refcount = os_atomic_inc(&dmn->dmn_refcount, relaxed); + if (refcount == 0) { + DISPATCH_INTERNAL_CRASH(0, "muxnote refcount overflow"); + } + if (refcount == 1) { + DISPATCH_INTERNAL_CRASH(0, "retained a disposing muxnote"); + } +} + +static void +_dispatch_muxnote_release(dispatch_muxnote_t dmn) +{ + uintptr_t refcount = os_atomic_dec(&dmn->dmn_refcount, relaxed); + if (refcount == 0) { + _dispatch_muxnote_dispose(dmn); + } else if (refcount == UINTPTR_MAX) { + DISPATCH_INTERNAL_CRASH(0, "muxnote refcount underflow"); + } +} + +static unsigned WINAPI +_dispatch_pipe_monitor_thread(void *context) +{ + dispatch_muxnote_t dmn = (dispatch_muxnote_t)context; + HANDLE hPipe = (HANDLE)dmn->dmn_ident; + do { + char cBuffer[1]; + DWORD dwNumberOfBytesTransferred; + OVERLAPPED ov = {0}; + BOOL bSuccess = ReadFile(hPipe, cBuffer, /* nNumberOfBytesToRead */ 0, + &dwNumberOfBytesTransferred, &ov); + DWORD dwBytesAvailable; + DWORD dwError = GetLastError(); + if (!bSuccess && dwError == ERROR_IO_PENDING) { + bSuccess = GetOverlappedResult(hPipe, &ov, + &dwNumberOfBytesTransferred, /* bWait */ TRUE); + dwError = GetLastError(); + } + if (bSuccess) { + bSuccess = PeekNamedPipe(hPipe, NULL, 0, NULL, &dwBytesAvailable, + NULL); + dwError = GetLastError(); + } + if (bSuccess) { + if (dwBytesAvailable == 0) { + // This can happen with a zero-byte write. Try again. + continue; + } + } else if (dwError == ERROR_NO_DATA) { + // The pipe is nonblocking. Try again. + Sleep(0); + continue; + } else { + _dispatch_debug("pipe[0x%llx]: GetLastError() returned %lu", + (long long)hPipe, dwError); + if (dwError == ERROR_OPERATION_ABORTED) { + continue; + } + os_atomic_store(&dmn->dmn_stop, true, relaxed); + dwBytesAvailable = 0; + } + + // Make sure the muxnote stays alive until the packet is dequeued + _dispatch_muxnote_retain(dmn); + + // The lpOverlapped parameter does not actually need to point to an + // OVERLAPPED struct. It's really just a pointer to pass back to + // GetQueuedCompletionStatus(). + bSuccess = PostQueuedCompletionStatus(hPort, + dwBytesAvailable, (ULONG_PTR)DISPATCH_PORT_PIPE_HANDLE_READ, + (LPOVERLAPPED)dmn); + if (!bSuccess) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } + + // If data is written into the pipe and not read right away, ReadFile() + // will keep returning immediately and we'll flood the completion port. + // This event lets us synchronize with _dispatch_event_loop_drain() so + // that we only post events when it's ready for them. + WaitForSingleObject(dmn->dmn_event, INFINITE); + } while (!os_atomic_load(&dmn->dmn_stop, relaxed)); + _dispatch_debug("pipe[0x%llx]: monitor exiting", (long long)hPipe); + return 0; +} + +static DWORD +_dispatch_pipe_write_availability(HANDLE hPipe) +{ + IO_STATUS_BLOCK iosb; + FILE_PIPE_LOCAL_INFORMATION fpli; + NTSTATUS status = _dispatch_NtQueryInformationFile(hPipe, &iosb, &fpli, + sizeof(fpli), FilePipeLocalInformation); + if (!NT_SUCCESS(status)) { + return 1; + } + return fpli.WriteQuotaAvailable; +} + static BOOL _dispatch_io_trigger(dispatch_muxnote_t dmn) { @@ -150,9 +317,56 @@ _dispatch_io_trigger(dispatch_muxnote_t dmn) "PostQueuedCompletionStatus"); } break; + + case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: + if ((dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_READ) && + !dmn->dmn_thread) { + HANDLE hThread = (HANDLE)_beginthreadex(/* security */ NULL, + /* stack_size */ 1, _dispatch_pipe_monitor_thread, + (void *)dmn, /* initflag */ 0, /* thrdaddr */ NULL); + if (!hThread) { + DISPATCH_INTERNAL_CRASH(errno, "_beginthread"); + } + HANDLE hEvent = CreateEventW(NULL, /* bManualReset */ FALSE, + /* bInitialState */ FALSE, NULL); + if (!hEvent) { + DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateEventW"); + } + dmn->dmn_thread = hThread; + dmn->dmn_event = hEvent; + } + if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_WRITE) { + _dispatch_muxnote_retain(dmn); + DWORD available = + _dispatch_pipe_write_availability((HANDLE)dmn->dmn_ident); + bSuccess = PostQueuedCompletionStatus(hPort, available, + (ULONG_PTR)DISPATCH_PORT_PIPE_HANDLE_WRITE, + (LPOVERLAPPED)dmn); + if (bSuccess == FALSE) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } + } + break; } - return bSuccess; + return TRUE; +} + +DISPATCH_ALWAYS_INLINE +static inline enum _dispatch_muxnote_events +_dispatch_unote_required_events(dispatch_unote_t du) +{ + switch (du._du->du_filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_CUSTOM_REPLACE: + return 0; + case EVFILT_WRITE: + return DISPATCH_MUXNOTE_EVENT_WRITE; + default: + return DISPATCH_MUXNOTE_EVENT_READ; + } } bool @@ -160,37 +374,52 @@ _dispatch_unote_register_muxed(dispatch_unote_t du) { struct dispatch_muxnote_bucket_s *dmb; dispatch_muxnote_t dmn; + enum _dispatch_muxnote_events events; + + events = _dispatch_unote_required_events(du); dmb = _dispatch_unote_muxnote_bucket(du._du->du_ident); dmn = _dispatch_unote_muxnote_find(dmb, du._du->du_ident, du._du->du_filter); if (dmn) { WIN_PORT_ERROR(); + DISPATCH_INTERNAL_CRASH(0, "muxnote updating is not supported"); } else { - dmn = _dispatch_muxnote_create(du); - if (dmn) { - if (_dispatch_io_trigger(dmn) == FALSE) { - _dispatch_muxnote_dispose(dmn); - dmn = NULL; - } else { - LIST_INSERT_HEAD(dmb, dmn, dmn_list); - } + dmn = _dispatch_muxnote_create(du, events); + if (!dmn) { + return false; } + if (_dispatch_io_trigger(dmn) == FALSE) { + _dispatch_muxnote_release(dmn); + return false; + } + LIST_INSERT_HEAD(dmb, dmn, dmn_list); } - if (dmn) { - dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + switch (dmn->dmn_handle_type) { + case DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID: + DISPATCH_INTERNAL_CRASH(0, "invalid handle"); + case DISPATCH_MUXNOTE_HANDLE_TYPE_FILE: AcquireSRWLockExclusive(&_dispatch_file_handles_lock); LIST_INSERT_HEAD(&_dispatch_file_handles, dul, du_link); ReleaseSRWLockExclusive(&_dispatch_file_handles_lock); + break; - dul->du_muxnote = dmn; - _dispatch_unote_state_set(du, DISPATCH_WLH_ANON, - DU_STATE_ARMED); + case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: + if (events & DISPATCH_MUXNOTE_EVENT_READ) { + LIST_INSERT_HEAD(&dmn->dmn_readers_head, dul, du_link); + } else if (events & DISPATCH_MUXNOTE_EVENT_WRITE) { + LIST_INSERT_HEAD(&dmn->dmn_writers_head, dul, du_link); + } + break; } - return dmn != NULL; + dul->du_muxnote = dmn; + _dispatch_unote_state_set(du, DISPATCH_WLH_ANON, DU_STATE_ARMED); + + return true; } void @@ -208,21 +437,34 @@ _dispatch_unote_unregister_muxed(dispatch_unote_t du) dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); dispatch_muxnote_t dmn = dul->du_muxnote; - AcquireSRWLockExclusive(&_dispatch_file_handles_lock); - LIST_REMOVE(dul, du_link); - _LIST_TRASH_ENTRY(dul, du_link); - ReleaseSRWLockExclusive(&_dispatch_file_handles_lock); + switch (dmn->dmn_handle_type) { + case DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID: + DISPATCH_INTERNAL_CRASH(0, "invalid handle"); + + case DISPATCH_MUXNOTE_HANDLE_TYPE_FILE: + AcquireSRWLockExclusive(&_dispatch_file_handles_lock); + LIST_REMOVE(dul, du_link); + _LIST_TRASH_ENTRY(dul, du_link); + ReleaseSRWLockExclusive(&_dispatch_file_handles_lock); + break; + + case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: + LIST_REMOVE(dul, du_link); + _LIST_TRASH_ENTRY(dul, du_link); + break; + } dul->du_muxnote = NULL; LIST_REMOVE(dmn, dmn_list); - _dispatch_muxnote_dispose(dmn); + _dispatch_muxnote_stop(dmn); + _dispatch_muxnote_release(dmn); _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); return true; } static void -_dispatch_event_merge_file_handle() +_dispatch_event_merge_file_handle(void) { dispatch_unote_linkage_t dul, dul_next; @@ -240,6 +482,56 @@ _dispatch_event_merge_file_handle() ReleaseSRWLockExclusive(&_dispatch_file_handles_lock); } +static void +_dispatch_event_merge_pipe_handle_read(dispatch_muxnote_t dmn, + DWORD dwBytesAvailable) +{ + dispatch_unote_linkage_t dul, dul_next; + LIST_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_unote_state_t du_state = _dispatch_unote_state(du); + du_state &= ~DU_STATE_ARMED; + uintptr_t data = dwBytesAvailable; + uint32_t flags; + if (dwBytesAvailable > 0) { + flags = EV_ADD | EV_ENABLE | EV_DISPATCH; + } else { + du_state |= DU_STATE_NEEDS_DELETE; + flags = EV_DELETE | EV_DISPATCH; + } + _dispatch_unote_state_set(du, du_state); + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + dux_merge_evt(du._du, flags, data, 0); + } + SetEvent(dmn->dmn_event); + // Retained when posting the completion packet + _dispatch_muxnote_release(dmn); +} + +static void +_dispatch_event_merge_pipe_handle_write(dispatch_muxnote_t dmn, + DWORD dwBytesAvailable) +{ + dispatch_unote_linkage_t dul, dul_next; + LIST_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); + uintptr_t data = dwBytesAvailable; + if (dwBytesAvailable > 0) { + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + } else { + os_atomic_store2o(du._dr, ds_pending_data, 0, relaxed); + } + dux_merge_evt(du._du, EV_ADD | EV_ENABLE | EV_DISPATCH, data, 0); + } + // Retained when posting the completion packet + _dispatch_muxnote_release(dmn); +} + #pragma mark timers typedef struct _dispatch_windows_timeout_s { @@ -414,6 +706,16 @@ _dispatch_event_loop_drain(uint32_t flags) _dispatch_event_merge_file_handle(); break; + case DISPATCH_PORT_PIPE_HANDLE_READ: + _dispatch_event_merge_pipe_handle_read((dispatch_muxnote_t)pOV, + dwNumberOfBytesTransferred); + break; + + case DISPATCH_PORT_PIPE_HANDLE_WRITE: + _dispatch_event_merge_pipe_handle_write((dispatch_muxnote_t)pOV, + dwNumberOfBytesTransferred); + break; + default: DISPATCH_INTERNAL_CRASH(ulCompletionKey, "unsupported completion key"); diff --git a/src/io.c b/src/io.c index b55e8a477..1c63a6b85 100644 --- a/src/io.c +++ b/src/io.c @@ -2405,7 +2405,43 @@ _dispatch_operation_perform(dispatch_operation_t op) if (op->direction == DOP_DIR_READ) { if (op->params.type == DISPATCH_IO_STREAM) { #if defined(_WIN32) - ReadFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, (LPDWORD)&processed, NULL); + HANDLE hFile = (HANDLE)op->fd_entry->fd; + BOOL bSuccess; + if (GetFileType(hFile) == FILE_TYPE_PIPE) { + OVERLAPPED ovlOverlapped = {}; + DWORD dwTotalBytesAvail; + bSuccess = PeekNamedPipe(hFile, NULL, 0, NULL, + &dwTotalBytesAvail, NULL); + if (bSuccess) { + if (dwTotalBytesAvail == 0) { + err = EAGAIN; + goto error; + } + len = MIN(len, dwTotalBytesAvail); + bSuccess = ReadFile(hFile, buf, (DWORD)len, + (LPDWORD)&processed, &ovlOverlapped); + } + if (!bSuccess) { + DWORD dwError = GetLastError(); + if (dwError == ERROR_IO_PENDING) { + bSuccess = GetOverlappedResult(hFile, &ovlOverlapped, + (LPDWORD)&processed, /* bWait */ TRUE); + dwError = GetLastError(); + } + if (dwError == ERROR_BROKEN_PIPE || + dwError == ERROR_NO_DATA) { + bSuccess = TRUE; + processed = 0; + } + } + } else { + bSuccess = ReadFile(hFile, buf, (DWORD)len, + (LPDWORD)&processed, NULL); + } + if (!bSuccess) { + err = EIO; + goto error; + } #else processed = read(op->fd_entry->fd, buf, len); #endif @@ -2414,7 +2450,8 @@ _dispatch_operation_perform(dispatch_operation_t op) OVERLAPPED ovlOverlapped = {}; ovlOverlapped.Offset = off & 0xffffffff; ovlOverlapped.OffsetHigh = (off >> 32) & 0xffffffff; - ReadFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, (LPDWORD)&processed, &ovlOverlapped); + ReadFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, + (LPDWORD)&processed, &ovlOverlapped); #else processed = pread(op->fd_entry->fd, buf, len, off); #endif @@ -2422,7 +2459,51 @@ _dispatch_operation_perform(dispatch_operation_t op) } else if (op->direction == DOP_DIR_WRITE) { if (op->params.type == DISPATCH_IO_STREAM) { #if defined(_WIN32) - WriteFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, (LPDWORD)&processed, NULL); + HANDLE hFile = (HANDLE)op->fd_entry->fd; + BOOL bSuccess; + if (GetFileType(hFile) == FILE_TYPE_PIPE) { + // Unfortunately there isn't a good way to achieve O_NONBLOCK + // semantics when writing to a pipe. SetNamedPipeHandleState() + // can allow pipes to be switched into a "no wait" mode, but + // that doesn't work on most pipe handles because Windows + // doesn't consistently create pipes with FILE_WRITE_ATTRIBUTES + // access. The best we can do is to try to query the write quota + // and then write as much as we can. + IO_STATUS_BLOCK iosb; + FILE_PIPE_LOCAL_INFORMATION fpli; + NTSTATUS status = _dispatch_NtQueryInformationFile(hFile, &iosb, + &fpli, sizeof(fpli), FilePipeLocalInformation); + if (NT_SUCCESS(status)) { + if (fpli.WriteQuotaAvailable == 0) { + err = EAGAIN; + goto error; + } + len = MIN(len, fpli.WriteQuotaAvailable); + } + OVERLAPPED ovlOverlapped = {}; + bSuccess = WriteFile(hFile, buf, (DWORD)len, + (LPDWORD)&processed, &ovlOverlapped); + if (!bSuccess) { + DWORD dwError = GetLastError(); + if (dwError == ERROR_IO_PENDING) { + bSuccess = GetOverlappedResult(hFile, &ovlOverlapped, + (LPDWORD)&processed, /* bWait */ TRUE); + dwError = GetLastError(); + } + if (dwError == ERROR_BROKEN_PIPE || + dwError == ERROR_NO_DATA) { + bSuccess = TRUE; + processed = 0; + } + } + } else { + bSuccess = WriteFile(hFile, buf, (DWORD)len, + (LPDWORD)&processed, NULL); + } + if (!bSuccess) { + err = EIO; + goto error; + } #else processed = write(op->fd_entry->fd, buf, len); #endif @@ -2431,7 +2512,8 @@ _dispatch_operation_perform(dispatch_operation_t op) OVERLAPPED ovlOverlapped = {}; ovlOverlapped.Offset = off & 0xffffffff; ovlOverlapped.OffsetHigh = (off >> 32) & 0xffffffff; - WriteFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, (LPDWORD)&processed, &ovlOverlapped); + WriteFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, + (LPDWORD)&processed, &ovlOverlapped); #else processed = pwrite(op->fd_entry->fd, buf, len, off); #endif diff --git a/src/shims/generic_win_stubs.c b/src/shims/generic_win_stubs.c index c48eef66a..b976075af 100644 --- a/src/shims/generic_win_stubs.c +++ b/src/shims/generic_win_stubs.c @@ -6,6 +6,13 @@ DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_precise_time_pred); DISPATCH_STATIC_GLOBAL(_precise_time_fn_t _dispatch_QueryInterruptTimePrecise_ptr); DISPATCH_STATIC_GLOBAL(_precise_time_fn_t _dispatch_QueryUnbiasedInterruptTimePrecise_ptr); +typedef NTSTATUS (NTAPI *_NtQueryInformationFile_fn_t)(HANDLE FileHandle, + PIO_STATUS_BLOCK IoStatusBlock, PVOID FileInformation, ULONG Length, + FILE_INFORMATION_CLASS FileInformationClass); + +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_ntdll_pred); +DISPATCH_STATIC_GLOBAL(_NtQueryInformationFile_fn_t _dispatch_NtQueryInformationFile_ptr); + static void _dispatch_init_precise_time(void *context DISPATCH_UNUSED) { @@ -38,3 +45,27 @@ _dispatch_QueryUnbiasedInterruptTimePrecise(PULONGLONG lpUnbiasedInterruptTimePr dispatch_once_f(&_dispatch_precise_time_pred, NULL, _dispatch_init_precise_time); return _dispatch_QueryUnbiasedInterruptTimePrecise_ptr(lpUnbiasedInterruptTimePrecise); } + +static void +_dispatch_init_ntdll(void *context DISPATCH_UNUSED) +{ + HMODULE ntdll = LoadLibraryW(L"ntdll.dll"); + if (!ntdll) { + // ntdll is not required. + return; + } + _dispatch_NtQueryInformationFile_ptr = (_NtQueryInformationFile_fn_t) + GetProcAddress(ntdll, "NtQueryInformationFile"); +} + +NTSTATUS _dispatch_NtQueryInformationFile(HANDLE FileHandle, + PIO_STATUS_BLOCK IoStatusBlock, PVOID FileInformation, ULONG Length, + FILE_INFORMATION_CLASS FileInformationClass) +{ + dispatch_once_f(&_dispatch_ntdll_pred, NULL, _dispatch_init_ntdll); + if (!_dispatch_NtQueryInformationFile_ptr) { + return STATUS_NOT_SUPPORTED; + } + return _dispatch_NtQueryInformationFile_ptr(FileHandle, IoStatusBlock, + FileInformation, Length, FileInformationClass); +} diff --git a/src/shims/generic_win_stubs.h b/src/shims/generic_win_stubs.h index 7d38adb29..1f7f4eaa3 100644 --- a/src/shims/generic_win_stubs.h +++ b/src/shims/generic_win_stubs.h @@ -6,7 +6,9 @@ #include #include +#include #include +#include #include #include @@ -40,7 +42,29 @@ typedef __typeof__(_Generic((__SIZE_TYPE__)0, \ /* * Wrappers for dynamically loaded Windows APIs */ + void _dispatch_QueryInterruptTimePrecise(PULONGLONG lpInterruptTimePrecise); void _dispatch_QueryUnbiasedInterruptTimePrecise(PULONGLONG lpUnbiasedInterruptTimePrecise); +enum { + FilePipeLocalInformation = 24, +}; + +typedef struct _FILE_PIPE_LOCAL_INFORMATION { + ULONG NamedPipeType; + ULONG NamedPipeConfiguration; + ULONG MaximumInstances; + ULONG CurrentInstances; + ULONG InboundQuota; + ULONG ReadDataAvailable; + ULONG OutboundQuota; + ULONG WriteQuotaAvailable; + ULONG NamedPipeState; + ULONG NamedPipeEnd; +} FILE_PIPE_LOCAL_INFORMATION, *PFILE_PIPE_LOCAL_INFORMATION; + +NTSTATUS _dispatch_NtQueryInformationFile(HANDLE FileHandle, + PIO_STATUS_BLOCK IoStatusBlock, PVOID FileInformation, ULONG Length, + FILE_INFORMATION_CLASS FileInformationClass); + #endif diff --git a/src/unifdef.L4FT8G b/src/unifdef.ayHOEu similarity index 100% rename from src/unifdef.L4FT8G rename to src/unifdef.ayHOEu From 0e21537f220dd42297416cd0af2f7e82cf4816d5 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 27 Jun 2019 06:50:12 -0700 Subject: [PATCH 186/249] Merge pull request #501 from adierking/peep-the-pipes event: support pipe sources on Windows Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.ayHOEu => unifdef.MbnXlb} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.ayHOEu => unifdef.MbnXlb} (100%) diff --git a/PATCHES b/PATCHES index 2222b2c59..490053e5e 100644 --- a/PATCHES +++ b/PATCHES @@ -513,3 +513,4 @@ github commits starting with 29bdc2f from [4169c8d] APPLIED rdar://54572081 [318f6e5] APPLIED rdar://54572081 [6a36af8] APPLIED rdar://54572081 +[d11d565] APPLIED rdar://54572081 diff --git a/src/unifdef.ayHOEu b/src/unifdef.MbnXlb similarity index 100% rename from src/unifdef.ayHOEu rename to src/unifdef.MbnXlb From 0546f27fbe0e7d39457309359ecace5693efd003 Mon Sep 17 00:00:00 2001 From: Ron Olson Date: Sat, 29 Jun 2019 18:40:17 -0500 Subject: [PATCH 187/249] sys/sysctl.h is deprecated on Linux and a warning has been added to that file that is treated as an error when compiling; see https://patches-gcc.linaro.org/patch/19443/#34639 for the discussion. This patch adds a guard to prevent the file from being included when compiling on Linux. Signed-off-by: Kim Topley --- src/internal.h | 3 +-- src/{unifdef.MbnXlb => unifdef.oiq1iV} | 0 2 files changed, 1 insertion(+), 2 deletions(-) rename src/{unifdef.MbnXlb => unifdef.oiq1iV} (100%) diff --git a/src/internal.h b/src/internal.h index c50591bfd..a65aff691 100644 --- a/src/internal.h +++ b/src/internal.h @@ -280,10 +280,9 @@ upcast(dispatch_object_t dou) #include #ifdef __ANDROID__ #include -#else -#include #endif /* __ANDROID__ */ #if !defined(__linux__) +#include #include #endif #include diff --git a/src/unifdef.MbnXlb b/src/unifdef.oiq1iV similarity index 100% rename from src/unifdef.MbnXlb rename to src/unifdef.oiq1iV From 5cc85691decc7075984df56ed788d5dcb7f3f52c Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 2 Jul 2019 06:57:53 -0700 Subject: [PATCH 188/249] Merge pull request #502 from tachoknight/master sys/sysctl.h is deprecated on Linux Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.oiq1iV => unifdef.Dqo6vU} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.oiq1iV => unifdef.Dqo6vU} (100%) diff --git a/PATCHES b/PATCHES index 490053e5e..c365aba6f 100644 --- a/PATCHES +++ b/PATCHES @@ -514,3 +514,4 @@ github commits starting with 29bdc2f from [318f6e5] APPLIED rdar://54572081 [6a36af8] APPLIED rdar://54572081 [d11d565] APPLIED rdar://54572081 +[d9740c2] APPLIED rdar://54572081 diff --git a/src/unifdef.oiq1iV b/src/unifdef.Dqo6vU similarity index 100% rename from src/unifdef.oiq1iV rename to src/unifdef.Dqo6vU From 8c8cda47c53f2f52dad4fc8ed1f892bba2ecac3f Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 12 Jul 2019 18:34:55 -0700 Subject: [PATCH 189/249] build: add DT_SONAME for ELF libraries ELF libraries should have DT_SONAME set so that they can be loaded properly. This is needed currently since we do not use proper CMake support for Swift and that requires that we pass along the `-soname` flag to the linker ourselves. Account for that. This improves loading of swiftDispatch on android. Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 6 +++++- src/{unifdef.Dqo6vU => unifdef.EKMKwY} | 0 2 files changed, 5 insertions(+), 1 deletion(-) rename src/{unifdef.Dqo6vU => unifdef.EKMKwY} (100%) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index ead1958b8..ebdfd5d92 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -133,7 +133,11 @@ function(add_swift_target target) endif() if(AST_LIBRARY) - set(emit_library -emit-library) + if(CMAKE_SYSTEM_NAME STREQUAL Windows OR CMAKE_SYSTEM_NAME STREQUAL Darwin) + set(emit_library -emit-library) + else() + set(emit_library -emit-library -Xlinker -soname -Xlinker ${AST_OUTPUT}) + endif() endif() if(NOT AST_LIBRARY OR library_kind STREQUAL SHARED) add_custom_command(OUTPUT diff --git a/src/unifdef.Dqo6vU b/src/unifdef.EKMKwY similarity index 100% rename from src/unifdef.Dqo6vU rename to src/unifdef.EKMKwY From 3add5820728aa1db597d3dedcb64bc9767ef5ae2 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 13 Jul 2019 22:26:07 -0700 Subject: [PATCH 190/249] Merge pull request #503 from compnerd/soname-sosad build: add DT_SONAME for ELF libraries Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.EKMKwY => unifdef.zKO5nq} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.EKMKwY => unifdef.zKO5nq} (100%) diff --git a/PATCHES b/PATCHES index c365aba6f..568ada6dd 100644 --- a/PATCHES +++ b/PATCHES @@ -515,3 +515,4 @@ github commits starting with 29bdc2f from [6a36af8] APPLIED rdar://54572081 [d11d565] APPLIED rdar://54572081 [d9740c2] APPLIED rdar://54572081 +[fc917b4] APPLIED rdar://54572081 diff --git a/src/unifdef.EKMKwY b/src/unifdef.zKO5nq similarity index 100% rename from src/unifdef.EKMKwY rename to src/unifdef.zKO5nq From 0dec444a6039855791154d1931f64cc5dbdb33b3 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 15 Jul 2019 08:42:01 -0700 Subject: [PATCH 191/249] build: correct the `DT_SONAME` When ninja is invoked from a subdirectory it will convert the path to a relative path, and that resulted in incorrect test results. This ensures that we get the correct soname. Signed-off-by: Kim Topley --- cmake/modules/SwiftSupport.cmake | 2 +- src/{unifdef.zKO5nq => unifdef.F6mUtB} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/{unifdef.zKO5nq => unifdef.F6mUtB} (100%) diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index ebdfd5d92..da7a201e3 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -136,7 +136,7 @@ function(add_swift_target target) if(CMAKE_SYSTEM_NAME STREQUAL Windows OR CMAKE_SYSTEM_NAME STREQUAL Darwin) set(emit_library -emit-library) else() - set(emit_library -emit-library -Xlinker -soname -Xlinker ${AST_OUTPUT}) + set(emit_library -emit-library -Xlinker -soname -Xlinker ${CMAKE_SHARED_LIBRARY_PREFIX}${target}${CMAKE_SHARED_LIBRARY_SUFFIX}) endif() endif() if(NOT AST_LIBRARY OR library_kind STREQUAL SHARED) diff --git a/src/unifdef.zKO5nq b/src/unifdef.F6mUtB similarity index 100% rename from src/unifdef.zKO5nq rename to src/unifdef.F6mUtB From 48011c48082b597761e4142033d72f3d55d0df53 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 15 Jul 2019 11:20:09 -0700 Subject: [PATCH 192/249] Merge pull request #504 from compnerd/the-path-challenge build: correct the `DT_SONAME` Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.F6mUtB => unifdef.hRtf8Q} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.F6mUtB => unifdef.hRtf8Q} (100%) diff --git a/PATCHES b/PATCHES index 568ada6dd..4486d7ba2 100644 --- a/PATCHES +++ b/PATCHES @@ -516,3 +516,4 @@ github commits starting with 29bdc2f from [d11d565] APPLIED rdar://54572081 [d9740c2] APPLIED rdar://54572081 [fc917b4] APPLIED rdar://54572081 +[f911a44] APPLIED rdar://54572081 diff --git a/src/unifdef.F6mUtB b/src/unifdef.hRtf8Q similarity index 100% rename from src/unifdef.F6mUtB rename to src/unifdef.hRtf8Q From 9b2063a0b5f0224ddf6905729e8daf79052393d0 Mon Sep 17 00:00:00 2001 From: Parker Schuh Date: Tue, 16 Jul 2019 13:10:17 -0700 Subject: [PATCH 193/249] Address TODO to unblock master-next. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 11 ++++++----- src/{unifdef.hRtf8Q => unifdef.0DrjBW} | 0 2 files changed, 6 insertions(+), 5 deletions(-) rename src/{unifdef.hRtf8Q => unifdef.0DrjBW} (100%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 843d1e4c7..99548bf95 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,3 +1,4 @@ +include(CheckCCompilerFlag) include(SwiftSupport) include(DTrace) @@ -228,11 +229,11 @@ if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") PRIVATE -Xclang -fblocks) else() - # FIXME(compnerd) add check for -momit-leaf-frame-pointer? - target_compile_options(dispatch - PRIVATE - -fblocks - -momit-leaf-frame-pointer) + check_c_compiler_flag("-momit-leaf-frame-pointer -Werror -Wall -O3" C_SUPPORTS_OMIT_LEAF_FRAME_POINTER) + target_compile_options(dispatch PRIVATE -fblocks) + if (C_SUPPORTS_OMIT_LEAF_FRAME_POINTER) + target_compile_options(dispatch PRIVATE -momit-leaf-frame-pointer) + endif() endif() if(LibRT_FOUND) target_link_libraries(dispatch PRIVATE RT::rt) diff --git a/src/unifdef.hRtf8Q b/src/unifdef.0DrjBW similarity index 100% rename from src/unifdef.hRtf8Q rename to src/unifdef.0DrjBW From 9e40148f5e596b0e6ac853f7e2a27d9a55246449 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 16 Jul 2019 18:28:53 -0700 Subject: [PATCH 194/249] Merge pull request #505 from pschuh/f-1 Address TODO to unblock master-next. Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.0DrjBW => unifdef.t9sz4J} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.0DrjBW => unifdef.t9sz4J} (100%) diff --git a/PATCHES b/PATCHES index 4486d7ba2..eb16bc0f9 100644 --- a/PATCHES +++ b/PATCHES @@ -517,3 +517,4 @@ github commits starting with 29bdc2f from [d9740c2] APPLIED rdar://54572081 [fc917b4] APPLIED rdar://54572081 [f911a44] APPLIED rdar://54572081 +[6d32c4d] APPLIED rdar://54572081 diff --git a/src/unifdef.0DrjBW b/src/unifdef.t9sz4J similarity index 100% rename from src/unifdef.0DrjBW rename to src/unifdef.t9sz4J From 3f3b5e532bc637f86f09c766f74cad71d77aa39d Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Thu, 18 Jul 2019 15:45:52 -0700 Subject: [PATCH 195/249] event: support socket sources on Windows Provide an initial implementation of Windows socket sources using WSAEventSelect(). This is sufficient for the dispatch_io_net test to pass on Windows. Signed-off-by: Kim Topley --- src/event/event_windows.c | 211 ++++++++++++++++++++++--- src/io.c | 26 ++- src/shims/generic_win_stubs.c | 13 ++ src/shims/generic_win_stubs.h | 3 + src/{unifdef.t9sz4J => unifdef.gckzOS} | 0 5 files changed, 228 insertions(+), 25 deletions(-) rename src/{unifdef.t9sz4J => unifdef.gckzOS} (100%) diff --git a/src/event/event_windows.c b/src/event/event_windows.c index 23e1eb78c..512d0b536 100644 --- a/src/event/event_windows.c +++ b/src/event/event_windows.c @@ -30,6 +30,8 @@ enum _dispatch_windows_port { DISPATCH_PORT_FILE_HANDLE, DISPATCH_PORT_PIPE_HANDLE_READ, DISPATCH_PORT_PIPE_HANDLE_WRITE, + DISPATCH_PORT_SOCKET_READ, + DISPATCH_PORT_SOCKET_WRITE, }; enum _dispatch_muxnote_events { @@ -59,13 +61,24 @@ typedef struct dispatch_muxnote_s { DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID, DISPATCH_MUXNOTE_HANDLE_TYPE_FILE, DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE, + DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET, } dmn_handle_type; enum _dispatch_muxnote_events dmn_events; - // Used by the pipe monitoring thread - HANDLE dmn_thread; + // For pipes, this event is used to synchronize the monitoring thread with + // I/O completion port processing. For sockets, this is the event used with + // WSAEventSelect(). HANDLE dmn_event; + + // Pipe monitoring thread control + HANDLE dmn_thread; os_atomic(bool) dmn_stop; + + // Socket events registered with WSAEventSelect() + long dmn_network_events; + + // Threadpool wait handle for socket events + PTP_WAIT dmn_threadpool_wait; } *dispatch_muxnote_t; static LIST_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s) @@ -146,17 +159,10 @@ _dispatch_muxnote_create(dispatch_unote_t du, case FILE_TYPE_PIPE: // The specified file is a socket, a named pipe, or an - // anonymous pipe. Use GetNamedPipeInfo() to distinguish between - // a pipe and a socket. Despite its name, it also succeeds for - // anonymous pipes. - if (!GetNamedPipeInfo(handle, NULL, NULL, NULL, NULL)) { - // We'll get ERROR_ACCESS_DENIED for outbound pipes. - if (GetLastError() != ERROR_ACCESS_DENIED) { - // The file is probably a socket. - WIN_PORT_ERROR(); - } - } - dmn->dmn_handle_type = DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE; + // anonymous pipe. + dmn->dmn_handle_type = _dispatch_handle_is_socket(handle) + ? DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET + : DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE; break; } @@ -183,18 +189,27 @@ _dispatch_muxnote_stop(dispatch_muxnote_t dmn) CloseHandle(dmn->dmn_thread); dmn->dmn_thread = NULL; } - if (dmn->dmn_event) { - CloseHandle(dmn->dmn_event); - dmn->dmn_event = NULL; + if (dmn->dmn_threadpool_wait) { + SetThreadpoolWait(dmn->dmn_threadpool_wait, NULL, NULL); + WaitForThreadpoolWaitCallbacks(dmn->dmn_threadpool_wait, + /* fCancelPendingCallbacks */ FALSE); + CloseThreadpoolWait(dmn->dmn_threadpool_wait); + dmn->dmn_threadpool_wait = NULL; + } + if (dmn->dmn_handle_type == DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET) { + WSAEventSelect((SOCKET)dmn->dmn_ident, NULL, 0); } } static void _dispatch_muxnote_dispose(dispatch_muxnote_t dmn) { - if (dmn->dmn_thread) { + if (dmn->dmn_thread || dmn->dmn_threadpool_wait) { DISPATCH_INTERNAL_CRASH(0, "disposed a muxnote with an active thread"); } + if (dmn->dmn_event) { + CloseHandle(dmn->dmn_event); + } free(dmn); } @@ -300,10 +315,51 @@ _dispatch_pipe_write_availability(HANDLE hPipe) return fpli.WriteQuotaAvailable; } +static VOID CALLBACK +_dispatch_socket_callback(PTP_CALLBACK_INSTANCE inst, void *context, + PTP_WAIT pwa, TP_WAIT_RESULT res) +{ + dispatch_muxnote_t dmn = (dispatch_muxnote_t)context; + SOCKET sock = (SOCKET)dmn->dmn_ident; + WSANETWORKEVENTS events; + if (WSAEnumNetworkEvents(sock, (WSAEVENT)dmn->dmn_event, &events) == 0) { + long lNetworkEvents = events.lNetworkEvents; + DWORD dwBytesAvailable = 1; + if (lNetworkEvents & FD_CLOSE) { + dwBytesAvailable = 0; + // Post to all registered read and write handlers + lNetworkEvents |= FD_READ | FD_WRITE; + } else if (lNetworkEvents & FD_READ) { + ioctlsocket(sock, FIONREAD, &dwBytesAvailable); + } + if (lNetworkEvents & FD_READ) { + _dispatch_muxnote_retain(dmn); + if (!PostQueuedCompletionStatus(hPort, dwBytesAvailable, + (ULONG_PTR)DISPATCH_PORT_SOCKET_READ, (LPOVERLAPPED)dmn)) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } + } + if (lNetworkEvents & FD_WRITE) { + _dispatch_muxnote_retain(dmn); + if (!PostQueuedCompletionStatus(hPort, dwBytesAvailable, + (ULONG_PTR)DISPATCH_PORT_SOCKET_WRITE, (LPOVERLAPPED)dmn)) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } + } + } else { + _dispatch_debug("socket[0x%llx]: WSAEnumNetworkEvents() failed (%d)", + (long long)sock, WSAGetLastError()); + } + SetThreadpoolWait(pwa, dmn->dmn_event, /* pftTimeout */ NULL); +} + static BOOL _dispatch_io_trigger(dispatch_muxnote_t dmn) { BOOL bSuccess; + long lNetworkEvents; switch (dmn->dmn_handle_type) { case DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID: @@ -321,19 +377,17 @@ _dispatch_io_trigger(dispatch_muxnote_t dmn) case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: if ((dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_READ) && !dmn->dmn_thread) { - HANDLE hThread = (HANDLE)_beginthreadex(/* security */ NULL, + dmn->dmn_thread = (HANDLE)_beginthreadex(/* security */ NULL, /* stack_size */ 1, _dispatch_pipe_monitor_thread, (void *)dmn, /* initflag */ 0, /* thrdaddr */ NULL); - if (!hThread) { + if (!dmn->dmn_thread) { DISPATCH_INTERNAL_CRASH(errno, "_beginthread"); } - HANDLE hEvent = CreateEventW(NULL, /* bManualReset */ FALSE, + dmn->dmn_event = CreateEventW(NULL, /* bManualReset */ FALSE, /* bInitialState */ FALSE, NULL); - if (!hEvent) { + if (!dmn->dmn_event) { DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateEventW"); } - dmn->dmn_thread = hThread; - dmn->dmn_event = hEvent; } if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_WRITE) { _dispatch_muxnote_retain(dmn); @@ -348,6 +402,59 @@ _dispatch_io_trigger(dispatch_muxnote_t dmn) } } break; + + case DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET: + if (!dmn->dmn_event) { + dmn->dmn_event = CreateEventW(NULL, /* bManualReset */ FALSE, + /* bInitialState */ FALSE, NULL); + if (!dmn->dmn_event) { + DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateEventW"); + } + } + if (!dmn->dmn_threadpool_wait) { + dmn->dmn_threadpool_wait = CreateThreadpoolWait( + _dispatch_socket_callback, dmn, + /* PTP_CALLBACK_ENVIRON */ NULL); + if (!dmn->dmn_threadpool_wait) { + DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateThreadpoolWait"); + } + SetThreadpoolWait(dmn->dmn_threadpool_wait, dmn->dmn_event, + /* pftTimeout */ NULL); + } + lNetworkEvents = FD_CLOSE; + if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_READ) { + lNetworkEvents |= FD_READ; + } + if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_WRITE) { + lNetworkEvents |= FD_WRITE; + } + if (dmn->dmn_network_events != lNetworkEvents) { + if (WSAEventSelect((SOCKET)dmn->dmn_ident, (WSAEVENT)dmn->dmn_event, + lNetworkEvents) != 0) { + DISPATCH_INTERNAL_CRASH(WSAGetLastError(), "WSAEventSelect"); + } + dmn->dmn_network_events = lNetworkEvents; + } + if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_WRITE) { + // FD_WRITE is edge-triggered, not level-triggered, so it will only + // be signaled if the socket becomes writable after a send() fails + // with WSAEWOULDBLOCK. We can work around this by performing a + // zero-byte send(). If the socket is writable, the send() will + // succeed and we can immediately post a packet, and if it isn't, it + // will fail with WSAEWOULDBLOCK and WSAEventSelect() will report + // the next time it becomes available. + if (send((SOCKET)dmn->dmn_ident, "", 0, 0) == 0) { + _dispatch_muxnote_retain(dmn); + bSuccess = PostQueuedCompletionStatus(hPort, 1, + (ULONG_PTR)DISPATCH_PORT_SOCKET_WRITE, + (LPOVERLAPPED)dmn); + if (bSuccess == FALSE) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } + } + } + break; } return TRUE; @@ -408,6 +515,7 @@ _dispatch_unote_register_muxed(dispatch_unote_t du) break; case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: + case DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET: if (events & DISPATCH_MUXNOTE_EVENT_READ) { LIST_INSERT_HEAD(&dmn->dmn_readers_head, dul, du_link); } else if (events & DISPATCH_MUXNOTE_EVENT_WRITE) { @@ -449,6 +557,7 @@ _dispatch_unote_unregister_muxed(dispatch_unote_t du) break; case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: + case DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET: LIST_REMOVE(dul, du_link); _LIST_TRASH_ENTRY(dul, du_link); break; @@ -532,6 +641,52 @@ _dispatch_event_merge_pipe_handle_write(dispatch_muxnote_t dmn, _dispatch_muxnote_release(dmn); } +static void +_dispatch_event_merge_socket(dispatch_unote_t du, DWORD dwBytesAvailable) +{ + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_unote_state_t du_state = _dispatch_unote_state(du); + du_state &= ~DU_STATE_ARMED; + uintptr_t data = dwBytesAvailable; + uint32_t flags; + if (dwBytesAvailable > 0) { + flags = EV_ADD | EV_ENABLE | EV_DISPATCH; + } else { + du_state |= DU_STATE_NEEDS_DELETE; + flags = EV_DELETE | EV_DISPATCH; + } + _dispatch_unote_state_set(du, du_state); + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + dux_merge_evt(du._du, flags, data, 0); +} + +static void +_dispatch_event_merge_socket_read(dispatch_muxnote_t dmn, + DWORD dwBytesAvailable) +{ + dispatch_unote_linkage_t dul, dul_next; + LIST_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + _dispatch_event_merge_socket(du, dwBytesAvailable); + } + // Retained when posting the completion packet + _dispatch_muxnote_release(dmn); +} + +static void +_dispatch_event_merge_socket_write(dispatch_muxnote_t dmn, + DWORD dwBytesAvailable) +{ + dispatch_unote_linkage_t dul, dul_next; + LIST_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + _dispatch_event_merge_socket(du, dwBytesAvailable); + } + // Retained when posting the completion packet + _dispatch_muxnote_release(dmn); +} + #pragma mark timers typedef struct _dispatch_windows_timeout_s { @@ -716,6 +871,16 @@ _dispatch_event_loop_drain(uint32_t flags) dwNumberOfBytesTransferred); break; + case DISPATCH_PORT_SOCKET_READ: + _dispatch_event_merge_socket_read((dispatch_muxnote_t)pOV, + dwNumberOfBytesTransferred); + break; + + case DISPATCH_PORT_SOCKET_WRITE: + _dispatch_event_merge_socket_write((dispatch_muxnote_t)pOV, + dwNumberOfBytesTransferred); + break; + default: DISPATCH_INTERNAL_CRASH(ulCompletionKey, "unsupported completion key"); diff --git a/src/io.c b/src/io.c index 1c63a6b85..f68c930e4 100644 --- a/src/io.c +++ b/src/io.c @@ -2407,7 +2407,18 @@ _dispatch_operation_perform(dispatch_operation_t op) #if defined(_WIN32) HANDLE hFile = (HANDLE)op->fd_entry->fd; BOOL bSuccess; - if (GetFileType(hFile) == FILE_TYPE_PIPE) { + if (_dispatch_handle_is_socket(hFile)) { + processed = recv((SOCKET)hFile, buf, len, 0); + if (processed < 0) { + bSuccess = FALSE; + err = WSAGetLastError(); + if (err == WSAEWOULDBLOCK) { + err = EAGAIN; + } + goto error; + } + bSuccess = TRUE; + } else if (GetFileType(hFile) == FILE_TYPE_PIPE) { OVERLAPPED ovlOverlapped = {}; DWORD dwTotalBytesAvail; bSuccess = PeekNamedPipe(hFile, NULL, 0, NULL, @@ -2461,7 +2472,18 @@ _dispatch_operation_perform(dispatch_operation_t op) #if defined(_WIN32) HANDLE hFile = (HANDLE)op->fd_entry->fd; BOOL bSuccess; - if (GetFileType(hFile) == FILE_TYPE_PIPE) { + if (_dispatch_handle_is_socket(hFile)) { + processed = send((SOCKET)hFile, buf, len, 0); + if (processed < 0) { + bSuccess = FALSE; + err = WSAGetLastError(); + if (err == WSAEWOULDBLOCK) { + err = EAGAIN; + } + goto error; + } + bSuccess = TRUE; + } else if (GetFileType(hFile) == FILE_TYPE_PIPE) { // Unfortunately there isn't a good way to achieve O_NONBLOCK // semantics when writing to a pipe. SetNamedPipeHandleState() // can allow pipes to be switched into a "no wait" mode, but diff --git a/src/shims/generic_win_stubs.c b/src/shims/generic_win_stubs.c index b976075af..7781673a4 100644 --- a/src/shims/generic_win_stubs.c +++ b/src/shims/generic_win_stubs.c @@ -13,6 +13,19 @@ typedef NTSTATUS (NTAPI *_NtQueryInformationFile_fn_t)(HANDLE FileHandle, DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_ntdll_pred); DISPATCH_STATIC_GLOBAL(_NtQueryInformationFile_fn_t _dispatch_NtQueryInformationFile_ptr); +bool +_dispatch_handle_is_socket(HANDLE hFile) +{ + // GetFileType() returns FILE_TYPE_PIPE for both pipes and sockets. We can + // disambiguate by checking if PeekNamedPipe() fails with + // ERROR_INVALID_FUNCTION. + if (GetFileType(hFile) == FILE_TYPE_PIPE && + !PeekNamedPipe(hFile, NULL, 0, NULL, NULL, NULL)) { + return GetLastError() == ERROR_INVALID_FUNCTION; + } + return false; +} + static void _dispatch_init_precise_time(void *context DISPATCH_UNUSED) { diff --git a/src/shims/generic_win_stubs.h b/src/shims/generic_win_stubs.h index 1f7f4eaa3..985bbe30b 100644 --- a/src/shims/generic_win_stubs.h +++ b/src/shims/generic_win_stubs.h @@ -4,6 +4,7 @@ #include +#include #include #include #include @@ -39,6 +40,8 @@ typedef __typeof__(_Generic((__SIZE_TYPE__)0, \ #define strcasecmp _stricmp +bool _dispatch_handle_is_socket(HANDLE hFile); + /* * Wrappers for dynamically loaded Windows APIs */ diff --git a/src/unifdef.t9sz4J b/src/unifdef.gckzOS similarity index 100% rename from src/unifdef.t9sz4J rename to src/unifdef.gckzOS From f45b0c8ad3fb245b75910d8c354c46c0d420149b Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 29 Jul 2019 09:33:43 -0700 Subject: [PATCH 196/249] Merge pull request #507 from adierking/sock-and-roll event: support socket sources on Windows Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.gckzOS => unifdef.ApQOM4} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.gckzOS => unifdef.ApQOM4} (100%) diff --git a/PATCHES b/PATCHES index eb16bc0f9..855c52095 100644 --- a/PATCHES +++ b/PATCHES @@ -518,3 +518,4 @@ github commits starting with 29bdc2f from [fc917b4] APPLIED rdar://54572081 [f911a44] APPLIED rdar://54572081 [6d32c4d] APPLIED rdar://54572081 +[9005cb4] APPLIED rdar://54572081 diff --git a/src/unifdef.gckzOS b/src/unifdef.ApQOM4 similarity index 100% rename from src/unifdef.gckzOS rename to src/unifdef.ApQOM4 From 6aaaca5c446553a8f28bea0cd7528ff60366482d Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 23 Jul 2019 10:01:40 -0700 Subject: [PATCH 197/249] build: support `CMAKE_SWIFT_COMPILER_TARGET` This adds support for a new variable `CMAKE_SWIFT_COMPILER_TARGET` which mirror's CMake's `CMAKE_Swift_COMPILER_TARGET`. Signed-off-by: Kim Topley --- src/CMakeLists.txt | 2 +- src/{unifdef.ApQOM4 => unifdef.7aneTp} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/{unifdef.ApQOM4 => unifdef.7aneTp} (100%) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 99548bf95..0691357dd 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -170,7 +170,7 @@ if(ENABLE_SWIFT) $<$:-Xcc> $<$:-D_DLL> TARGET - ${CMAKE_C_COMPILER_TARGET}) + ${CMAKE_SWIFT_COMPILER_TARGET}) endif() if(ENABLE_DTRACE) dtrace_usdt_probe(${CMAKE_CURRENT_SOURCE_DIR}/provider.d diff --git a/src/unifdef.ApQOM4 b/src/unifdef.7aneTp similarity index 100% rename from src/unifdef.ApQOM4 rename to src/unifdef.7aneTp From e18bbaa6ebbafcfbf2f8047bc54b6c5bbab50e7f Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 31 Jul 2019 15:58:26 -0700 Subject: [PATCH 198/249] Merge pull request #509 from compnerd/compiler-target build: support `CMAKE_SWIFT_COMPILER_TARGET` Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.7aneTp => unifdef.YmjwOl} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.7aneTp => unifdef.YmjwOl} (100%) diff --git a/PATCHES b/PATCHES index 855c52095..b85d7d33c 100644 --- a/PATCHES +++ b/PATCHES @@ -519,3 +519,4 @@ github commits starting with 29bdc2f from [f911a44] APPLIED rdar://54572081 [6d32c4d] APPLIED rdar://54572081 [9005cb4] APPLIED rdar://54572081 +[68875cb] APPLIED rdar://54572081 diff --git a/src/unifdef.7aneTp b/src/unifdef.YmjwOl similarity index 100% rename from src/unifdef.7aneTp rename to src/unifdef.YmjwOl From e99a38da2e33ac968477bcc04d8fa624716b9a85 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Wed, 14 Aug 2019 13:21:25 -0700 Subject: [PATCH 199/249] shims: always replace `_Bool` with `bool` in C++ This generalises the path to always perform this substitution in C++ mode. This should repair the build of libdispatch on android. Signed-off-by: Kim Topley --- src/shims/atomic.h | 2 +- src/{unifdef.YmjwOl => unifdef.W095qv} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/{unifdef.YmjwOl => unifdef.W095qv} (100%) diff --git a/src/shims/atomic.h b/src/shims/atomic.h index 88cbb3408..44af102eb 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -32,7 +32,7 @@ #endif // FreeBSD only defines _Bool in C mode. In C++ mode _Bool is not being defined. -#if defined(__cplusplus) && (defined(__FreeBSD__) || defined(_WIN32)) +#if defined(__cplusplus) #define _Bool bool #endif diff --git a/src/unifdef.YmjwOl b/src/unifdef.W095qv similarity index 100% rename from src/unifdef.YmjwOl rename to src/unifdef.W095qv From 2549f993614cbc8d6b154c6fa1e8e35695106681 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Wed, 14 Aug 2019 15:24:09 -0700 Subject: [PATCH 200/249] Merge pull request #510 from compnerd/boolean shims: always replace `_Bool` with `bool` in C++ Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.W095qv => unifdef.JZchSp} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.W095qv => unifdef.JZchSp} (100%) diff --git a/PATCHES b/PATCHES index b85d7d33c..5620a4ebf 100644 --- a/PATCHES +++ b/PATCHES @@ -520,3 +520,4 @@ github commits starting with 29bdc2f from [6d32c4d] APPLIED rdar://54572081 [9005cb4] APPLIED rdar://54572081 [68875cb] APPLIED rdar://54572081 +[fc73866] APPLIED rdar://54572081 diff --git a/src/unifdef.W095qv b/src/unifdef.JZchSp similarity index 100% rename from src/unifdef.W095qv rename to src/unifdef.JZchSp From 9acd85527098a0a6d0ac0613d9886c4d0bdee6ec Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Fri, 16 Aug 2019 15:24:28 -0700 Subject: [PATCH 201/249] event: simplify the Windows file source implementation Currently, file sources are all registered with a global `_dispatch_file_handles` list that has a lock around it. Remove this in favor of posting muxnote pointers to the I/O completion port using the same mechanism as the other source types. Signed-off-by: Kim Topley --- src/event/event_windows.c | 38 +++++++++++--------------- src/{unifdef.JZchSp => unifdef.M545RQ} | 0 2 files changed, 16 insertions(+), 22 deletions(-) rename src/{unifdef.JZchSp => unifdef.M545RQ} (100%) diff --git a/src/event/event_windows.c b/src/event/event_windows.c index 512d0b536..3576774b2 100644 --- a/src/event/event_windows.c +++ b/src/event/event_windows.c @@ -84,9 +84,6 @@ typedef struct dispatch_muxnote_s { static LIST_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s) _dispatch_sources[DSL_HASH_SIZE]; -static SRWLOCK _dispatch_file_handles_lock = SRWLOCK_INIT; -static LIST_HEAD(, dispatch_unote_linkage_s) _dispatch_file_handles; - DISPATCH_ALWAYS_INLINE static inline struct dispatch_muxnote_bucket_s * _dispatch_unote_muxnote_bucket(uint32_t ident) @@ -366,8 +363,9 @@ _dispatch_io_trigger(dispatch_muxnote_t dmn) DISPATCH_INTERNAL_CRASH(0, "invalid handle"); case DISPATCH_MUXNOTE_HANDLE_TYPE_FILE: + _dispatch_muxnote_retain(dmn); bSuccess = PostQueuedCompletionStatus(hPort, 0, - (ULONG_PTR)DISPATCH_PORT_FILE_HANDLE, NULL); + (ULONG_PTR)DISPATCH_PORT_FILE_HANDLE, (LPOVERLAPPED)dmn); if (bSuccess == FALSE) { DISPATCH_INTERNAL_CRASH(GetLastError(), "PostQueuedCompletionStatus"); @@ -509,11 +507,6 @@ _dispatch_unote_register_muxed(dispatch_unote_t du) DISPATCH_INTERNAL_CRASH(0, "invalid handle"); case DISPATCH_MUXNOTE_HANDLE_TYPE_FILE: - AcquireSRWLockExclusive(&_dispatch_file_handles_lock); - LIST_INSERT_HEAD(&_dispatch_file_handles, dul, du_link); - ReleaseSRWLockExclusive(&_dispatch_file_handles_lock); - break; - case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: case DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET: if (events & DISPATCH_MUXNOTE_EVENT_READ) { @@ -550,12 +543,6 @@ _dispatch_unote_unregister_muxed(dispatch_unote_t du) DISPATCH_INTERNAL_CRASH(0, "invalid handle"); case DISPATCH_MUXNOTE_HANDLE_TYPE_FILE: - AcquireSRWLockExclusive(&_dispatch_file_handles_lock); - LIST_REMOVE(dul, du_link); - _LIST_TRASH_ENTRY(dul, du_link); - ReleaseSRWLockExclusive(&_dispatch_file_handles_lock); - break; - case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: case DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET: LIST_REMOVE(dul, du_link); @@ -573,14 +560,11 @@ _dispatch_unote_unregister_muxed(dispatch_unote_t du) } static void -_dispatch_event_merge_file_handle(void) +_dispatch_event_merge_file_handle(dispatch_muxnote_t dmn) { dispatch_unote_linkage_t dul, dul_next; - - AcquireSRWLockExclusive(&_dispatch_file_handles_lock); - LIST_FOREACH_SAFE(dul, &_dispatch_file_handles, du_link, dul_next) { + LIST_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); - // consumed by dux_merge_evt() _dispatch_retain_unote_owner(du); dispatch_assert(dux_needs_rearm(du._du)); @@ -588,7 +572,17 @@ _dispatch_event_merge_file_handle(void) os_atomic_store2o(du._dr, ds_pending_data, ~1, relaxed); dux_merge_evt(du._du, EV_ADD | EV_ENABLE | EV_DISPATCH, 1, 0); } - ReleaseSRWLockExclusive(&_dispatch_file_handles_lock); + LIST_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_assert(dux_needs_rearm(du._du)); + _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); + os_atomic_store2o(du._dr, ds_pending_data, ~1, relaxed); + dux_merge_evt(du._du, EV_ADD | EV_ENABLE | EV_DISPATCH, 1, 0); + } + // Retained when posting the completion packet + _dispatch_muxnote_release(dmn); } static void @@ -858,7 +852,7 @@ _dispatch_event_loop_drain(uint32_t flags) break; case DISPATCH_PORT_FILE_HANDLE: - _dispatch_event_merge_file_handle(); + _dispatch_event_merge_file_handle((dispatch_muxnote_t)pOV); break; case DISPATCH_PORT_PIPE_HANDLE_READ: diff --git a/src/unifdef.JZchSp b/src/unifdef.M545RQ similarity index 100% rename from src/unifdef.JZchSp rename to src/unifdef.M545RQ From d7be6c52f29fa38e178fb2ca7aff0e6d029b3d06 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Wed, 21 Aug 2019 07:55:50 -0700 Subject: [PATCH 202/249] Merge pull request #511 from adierking/file-cleanup event: simplify the Windows file source implementation Signed-off-by: Kim Topley --- PATCHES | 1 + src/{unifdef.M545RQ => unifdef.bngtnJ} | 0 2 files changed, 1 insertion(+) rename src/{unifdef.M545RQ => unifdef.bngtnJ} (100%) diff --git a/PATCHES b/PATCHES index 5620a4ebf..10277a45c 100644 --- a/PATCHES +++ b/PATCHES @@ -521,3 +521,4 @@ github commits starting with 29bdc2f from [9005cb4] APPLIED rdar://54572081 [68875cb] APPLIED rdar://54572081 [fc73866] APPLIED rdar://54572081 +[3cf1bf3] APPLIED rdar://54572081 diff --git a/src/unifdef.M545RQ b/src/unifdef.bngtnJ similarity index 100% rename from src/unifdef.M545RQ rename to src/unifdef.bngtnJ From 708257f3c23991290c595edf06bc227bf7ba80a8 Mon Sep 17 00:00:00 2001 From: Kim Topley Date: Fri, 23 Aug 2019 14:49:13 -0700 Subject: [PATCH 203/249] libdispatch open source import fixups. rdar://problem/54572081 --- CMakeLists.txt | 3 + cmake/config.h.in | 6 + private/queue_private.h | 9 +- src/CMakeLists.txt | 1 + src/event/event_kevent.c | 1 + src/init.c | 2 +- src/internal.h | 1 - src/io.c | 5 + src/queue.c | 633 ++- src/semaphore.c | 2 +- src/source.c | 2 +- src/unifdef.bngtnJ | 8028 -------------------------------------- 12 files changed, 646 insertions(+), 8047 deletions(-) delete mode 100644 src/unifdef.bngtnJ diff --git a/CMakeLists.txt b/CMakeLists.txt index 2e2ada1c7..9f3f221e6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -192,7 +192,10 @@ check_function_exists(malloc_create_zone HAVE_MALLOC_CREATE_ZONE) check_function_exists(posix_fadvise HAVE_POSIX_FADVISE) check_function_exists(posix_spawnp HAVE_POSIX_SPAWNP) check_function_exists(pthread_key_init_np HAVE_PTHREAD_KEY_INIT_NP) +check_function_exists(pthread_attr_setcpupercent_np HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP) +check_function_exists(pthread_yield_np HAVE_PTHREAD_YIELD_NP) check_function_exists(pthread_main_np HAVE_PTHREAD_MAIN_NP) +check_function_exists(pthread_workqueue_setdispatch_np HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP) check_function_exists(strlcpy HAVE_STRLCPY) check_function_exists(sysconf HAVE_SYSCONF) check_function_exists(arc4random HAVE_ARC4RANDOM) diff --git a/cmake/config.h.in b/cmake/config.h.in index a076208e5..2896a2083 100644 --- a/cmake/config.h.in +++ b/cmake/config.h.in @@ -145,12 +145,18 @@ /* Define to 1 if you have the `pthread_key_init_np' function. */ #cmakedefine HAVE_PTHREAD_KEY_INIT_NP +/* Define to 1 if you have the `pthread_attr_setcpupercent_np' function. */ +#cmakedefine HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP + /* Define to 1 if you have the header file. */ #cmakedefine HAVE_PTHREAD_MACHDEP_H /* Define to 1 if you have the `pthread_main_np' function. */ #cmakedefine01 HAVE_PTHREAD_MAIN_NP +/* Define to 1 if you have the `pthread_yield_np' function. */ +#cmakedefine01 HAVE_PTHREAD_YIELD_NP + /* Define to 1 if you have the header file. */ #cmakedefine01 HAVE_PTHREAD_NP_H diff --git a/private/queue_private.h b/private/queue_private.h index 2a3abe32c..302de4aad 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -149,6 +149,13 @@ dispatch_set_qos_class_fallback(dispatch_object_t object, #define DISPATCH_QUEUE_FLAGS_MASK (DISPATCH_QUEUE_OVERCOMMIT) +// On FreeBSD pthread_attr_t is a typedef to a pointer type +#if defined(__FreeBSD__) +# define DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR _Nullable +#else +# define DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR +#endif + /*! * @function dispatch_queue_attr_make_with_overcommit * @@ -329,7 +336,7 @@ DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_global_t dispatch_pthread_root_queue_create(const char *_Nullable label, - unsigned long flags, const pthread_attr_t *_Nullable attr, + unsigned long flags, const pthread_attr_t DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR *_Nullable attr, dispatch_block_t _Nullable configure); /*! diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 0691357dd..f71b68f45 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -54,6 +54,7 @@ add_library(dispatch shims/perfmon.h shims/time.h shims/tsd.h + shims/yield.c shims/yield.h) set_target_properties(dispatch diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c index c2cab0fc2..4c17e0897 100644 --- a/src/event/event_kevent.c +++ b/src/event/event_kevent.c @@ -873,6 +873,7 @@ _dispatch_kq_unote_set_kevent(dispatch_unote_t _du, dispatch_kevent_t dk, du->du_priority), #endif }; + (void)pp; // if DISPATCH_USE_KEVENT_QOS == 0 } DISPATCH_ALWAYS_INLINE diff --git a/src/init.c b/src/init.c index adb32b966..04ab9c459 100644 --- a/src/init.c +++ b/src/init.c @@ -374,7 +374,7 @@ dispatch_get_global_queue(intptr_t priority, uintptr_t flags) dispatch_assert(countof(_dispatch_root_queues) == DISPATCH_ROOT_QUEUE_COUNT); - if (flags & ~(uintptr_t)DISPATCH_QUEUE_OVERCOMMIT) { + if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { return DISPATCH_BAD_INPUT; } dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority); diff --git a/src/internal.h b/src/internal.h index a65aff691..66f0244cb 100644 --- a/src/internal.h +++ b/src/internal.h @@ -290,7 +290,6 @@ upcast(dispatch_object_t dou) #include #include #endif -#include #ifdef __BLOCKS__ #if __has_include() diff --git a/src/io.c b/src/io.c index f68c930e4..73295df29 100644 --- a/src/io.c +++ b/src/io.c @@ -20,6 +20,11 @@ #include "internal.h" +#if defined(__FreeBSD__) +#include +#define F_RDADVISE F_RDAHEAD +#endif + #ifndef DISPATCH_IO_DEBUG #define DISPATCH_IO_DEBUG DISPATCH_DEBUG #endif diff --git a/src/queue.c b/src/queue.c index cdd73b3f1..55c4873b1 100644 --- a/src/queue.c +++ b/src/queue.c @@ -236,10 +236,6 @@ const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { DC_VTABLE_ENTRY(MACH_IPC_HANDOFF, .do_invoke = _dispatch_mach_ipc_handoff_invoke), #endif -#if !defined(__OPEN_SOURCE__) && OS_VENTURE_ENABLE - DC_VTABLE_ENTRY(VENTURE_DRAIN, - .do_invoke = _os_venture_drain_continuation_invoke), -#endif // !defined(__OPEN_SOURCE__) && OS_VENTURE_ENABLE }; DISPATCH_NOINLINE @@ -624,7 +620,7 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) "run more than once and waited for"); } - intptr_t ret = dispatch_group_wait(dbpd->dbpd_group, timeout); + long ret = dispatch_group_wait(dbpd->dbpd_group, timeout); if (boost_th) { _dispatch_thread_override_end(boost_th, dbpd); @@ -900,6 +896,7 @@ dispatch_async(dispatch_queue_t dq, dispatch_block_t work) qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags); _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); } +#endif #pragma mark - #pragma mark _dispatch_sync_invoke / _dispatch_sync_complete @@ -3564,6 +3561,617 @@ static bool _dispatch_queue_drain_should_narrow_slow(uint64_t now, dispatch_invoke_context_t dic) { + if (dic->dic_next_narrow_check != DISPATCH_THREAD_IS_NARROWING) { + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_qos_t qos = _dispatch_qos_from_pp(pp); + if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) { + DISPATCH_CLIENT_CRASH(pp, "Thread QoS corruption"); + } + size_t idx = DISPATCH_QOS_BUCKET(qos); + os_atomic(uint64_t) *deadline = &_dispatch_narrowing_deadlines[idx]; + uint64_t oldval, newval = now + _dispatch_narrow_check_interval(); + + dic->dic_next_narrow_check = newval; + os_atomic_rmw_loop(deadline, oldval, newval, relaxed, { + if (now < oldval) { + os_atomic_rmw_loop_give_up(return false); + } + }); + + if (!_pthread_workqueue_should_narrow(pp)) { + return false; + } + dic->dic_next_narrow_check = DISPATCH_THREAD_IS_NARROWING; + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_drain_should_narrow(dispatch_invoke_context_t dic) +{ + uint64_t next_check = dic->dic_next_narrow_check; + if (unlikely(next_check)) { + uint64_t now = _dispatch_approximate_time(); + if (unlikely(next_check < now)) { + return _dispatch_queue_drain_should_narrow_slow(now, dic); + } + } + return false; +} +#else +#define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) +#define _dispatch_queue_drain_should_narrow(dic) false +#endif + +/* + * Drain comes in 2 flavours (serial/concurrent) and 2 modes + * (redirecting or not). + * + * Serial + * ~~~~~~ + * Serial drain is about serial queues (width == 1). It doesn't support + * the redirecting mode, which doesn't make sense, and treats all continuations + * as barriers. Bookkeeping is minimal in serial flavour, most of the loop + * is optimized away. + * + * Serial drain stops if the width of the queue grows to larger than 1. + * Going through a serial drain prevents any recursive drain from being + * redirecting. + * + * Concurrent + * ~~~~~~~~~~ + * When in non-redirecting mode (meaning one of the target queues is serial), + * non-barriers and barriers alike run in the context of the drain thread. + * Slow non-barrier items are still all signaled so that they can make progress + * toward the dispatch_sync() that will serialize them all . + * + * In redirecting mode, non-barrier work items are redirected downward. + * + * Concurrent drain stops if the width of the queue becomes 1, so that the + * queue drain moves to the more efficient serial mode. + */ +DISPATCH_ALWAYS_INLINE +static dispatch_queue_wakeup_target_t +_dispatch_lane_drain(dispatch_lane_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned_ptr, bool serial_drain) +{ + dispatch_queue_t orig_tq = dq->do_targetq; + dispatch_thread_frame_s dtf; + struct dispatch_object_s *dc = NULL, *next_dc; + uint64_t dq_state, owned = *owned_ptr; + + if (unlikely(!dq->dq_items_tail)) return NULL; + + _dispatch_thread_frame_push(&dtf, dq); + if (serial_drain || _dq_state_is_in_barrier(owned)) { + // we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL` + // but width can change while draining barrier work items, so we only + // convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER` + owned = DISPATCH_QUEUE_IN_BARRIER; + } else { + owned &= DISPATCH_QUEUE_WIDTH_MASK; + } + + dc = _dispatch_queue_get_head(dq); + goto first_iteration; + + for (;;) { + dispatch_assert(dic->dic_barrier_waiter == NULL); + dc = next_dc; + if (unlikely(!dc)) { + if (!dq->dq_items_tail) { + break; + } + dc = _dispatch_queue_get_head(dq); + } + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } + if (unlikely(serial_drain != (dq->dq_width == 1))) { + break; + } + if (unlikely(_dispatch_queue_drain_should_narrow(dic))) { + break; + } + if (likely(flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { + dispatch_workloop_t dwl = (dispatch_workloop_t)_dispatch_get_wlh(); + if (unlikely(_dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos)) { + break; + } + } + +first_iteration: + dq_state = os_atomic_load(&dq->dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + break; + } + if (unlikely(orig_tq != dq->do_targetq)) { + break; + } + + if (serial_drain || _dispatch_object_is_barrier(dc)) { + if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) { + if (!_dispatch_queue_try_upgrade_full_width(dq, owned)) { + goto out_with_no_width; + } + owned = DISPATCH_QUEUE_IN_BARRIER; + } + if (_dispatch_object_is_sync_waiter(dc) && + !(flags & DISPATCH_INVOKE_THREAD_BOUND)) { + dic->dic_barrier_waiter = dc; + goto out_with_barrier_waiter; + } + next_dc = _dispatch_queue_pop_head(dq, dc); + } else { + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // we just ran barrier work items, we have to make their + // effect visible to other sync work items on other threads + // that may start coming in after this point, hence the + // release barrier + os_atomic_xor2o(dq, dq_state, owned, release); + owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } else if (unlikely(owned == 0)) { + if (_dispatch_object_is_waiter(dc)) { + // sync "readers" don't observe the limit + _dispatch_queue_reserve_sync_width(dq); + } else if (!_dispatch_queue_try_acquire_async(dq)) { + goto out_with_no_width; + } + owned = DISPATCH_QUEUE_WIDTH_INTERVAL; + } + + next_dc = _dispatch_queue_pop_head(dq, dc); + if (_dispatch_object_is_waiter(dc)) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_non_barrier_waiter_redirect_or_wake(dq, dc); + continue; + } + + if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + // This is a re-redirect, overrides have already been applied by + // _dispatch_continuation_async* + // However we want to end up on the root queue matching `dc` + // qos, so pick up the current override of `dq` which includes + // dc's override (and maybe more) + _dispatch_continuation_redirect_push(dq, dc, + _dispatch_queue_max_qos(dq)); + continue; + } + } + + _dispatch_continuation_pop_inline(dc, dic, flags, dq); + } + + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // if we're IN_BARRIER we really own the full width too + owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } + if (dc) { + owned = _dispatch_queue_adjust_owned(dq, owned, dc); + } + *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; + *owned_ptr |= owned; + _dispatch_thread_frame_pop(&dtf); + return dc ? dq->do_targetq : NULL; + +out_with_no_width: + *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; + _dispatch_thread_frame_pop(&dtf); + return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + +out_with_barrier_waiter: + if (unlikely(flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS)) { + DISPATCH_INTERNAL_CRASH(0, + "Deferred continuation on source, mach channel or mgr"); + } + _dispatch_thread_frame_pop(&dtf); + return dq->do_targetq; +} + +DISPATCH_NOINLINE +static dispatch_queue_wakeup_target_t +_dispatch_lane_concurrent_drain(dispatch_lane_class_t dqu, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned) +{ + return _dispatch_lane_drain(dqu._dl, dic, flags, owned, false); +} + +DISPATCH_NOINLINE +dispatch_queue_wakeup_target_t +_dispatch_lane_serial_drain(dispatch_lane_class_t dqu, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned) +{ + flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; + return _dispatch_lane_drain(dqu._dl, dic, flags, owned, true); +} + +void +_dispatch_queue_invoke_finish(dispatch_queue_t dq, + dispatch_invoke_context_t dic, dispatch_queue_t tq, uint64_t owned) +{ + struct dispatch_object_s *dc = dic->dic_barrier_waiter; + dispatch_qos_t qos = dic->dic_barrier_waiter_bucket; + if (dc) { + dic->dic_barrier_waiter = NULL; + dic->dic_barrier_waiter_bucket = DISPATCH_QOS_UNSPECIFIED; + owned &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; +#if DISPATCH_INTROSPECTION + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; + dsc->dsc_from_async = true; +#endif + if (qos) { + return _dispatch_workloop_drain_barrier_waiter(upcast(dq)._dwl, + dc, qos, DISPATCH_WAKEUP_CONSUME_2, owned); + } + return _dispatch_lane_drain_barrier_waiter(upcast(dq)._dl, dc, + DISPATCH_WAKEUP_CONSUME_2, owned); + } + + uint64_t old_state, new_state, enqueued = DISPATCH_QUEUE_ENQUEUED; + if (tq == DISPATCH_QUEUE_WAKEUP_MGR) { + enqueued = DISPATCH_QUEUE_ENQUEUED_ON_MGR; + } + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - owned; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state |= DISPATCH_QUEUE_DIRTY; + if (_dq_state_is_runnable(new_state) && + !_dq_state_is_enqueued(new_state)) { + // drain was not interupted for suspension + // we will reenqueue right away, just put ENQUEUED back + new_state |= enqueued; + } + }); + old_state -= owned; + if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(new_state)); + } + if ((old_state ^ new_state) & enqueued) { + dispatch_assert(_dq_state_is_enqueued(new_state)); + return _dispatch_queue_push_queue(tq, dq, new_state); + } + return _dispatch_release_2_tailcall(dq); +} + +void +_dispatch_lane_activate(dispatch_lane_class_t dq) +{ + dispatch_queue_t tq = dq._dl->do_targetq; + dispatch_priority_t pri = dq._dl->dq_priority; + + // Normalize priority: keep the fallback only when higher than the floor + if (_dispatch_priority_fallback_qos(pri) <= _dispatch_priority_qos(pri) || + (_dispatch_priority_qos(pri) && + !(pri & DISPATCH_PRIORITY_FLAG_FLOOR))) { + pri &= ~DISPATCH_PRIORITY_FALLBACK_QOS_MASK; + pri &= ~DISPATCH_PRIORITY_FLAG_FALLBACK; + dq._dl->dq_priority = pri; + } + tq = _dispatch_queue_priority_inherit_from_target(dq, tq); + _dispatch_lane_inherit_wlh_from_target(dq._dl, tq); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_wakeup_target_t +_dispatch_lane_invoke2(dispatch_lane_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned) +{ + dispatch_queue_t otq = dq->do_targetq; + dispatch_queue_t cq = _dispatch_queue_get_current(); + + if (unlikely(cq != otq)) { + return otq; + } + if (dq->dq_width == 1) { + return _dispatch_lane_serial_drain(dq, dic, flags, owned); + } + return _dispatch_lane_concurrent_drain(dq, dic, flags, owned); +} + +DISPATCH_NOINLINE +void +_dispatch_lane_invoke(dispatch_lane_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) +{ + _dispatch_queue_class_invoke(dq, dic, flags, 0, _dispatch_lane_invoke2); +} + +#pragma mark - +#pragma mark dispatch_workloop_t + +#define _dispatch_wl(dwl, qos) os_mpsc(dwl, dwl, s[DISPATCH_QOS_BUCKET(qos)]) +#define _dispatch_workloop_looks_empty(dwl, qos) \ + os_mpsc_looks_empty(_dispatch_wl(dwl, qos)) +#define _dispatch_workloop_get_head(dwl, qos) \ + os_mpsc_get_head(_dispatch_wl(dwl, qos)) +#define _dispatch_workloop_pop_head(dwl, qos, dc) \ + os_mpsc_pop_head(_dispatch_wl(dwl, qos), dc, do_next) +#define _dispatch_workloop_push_update_tail(dwl, qos, dou) \ + os_mpsc_push_update_tail(_dispatch_wl(dwl, qos), dou, do_next) +#define _dispatch_workloop_push_update_prev(dwl, qos, prev, dou) \ + os_mpsc_push_update_prev(_dispatch_wl(dwl, qos), prev, dou, do_next) + +dispatch_workloop_t +dispatch_workloop_copy_current(void) +{ + dispatch_workloop_t dwl = _dispatch_wlh_to_workloop(_dispatch_get_wlh()); + if (likely(dwl)) { + _os_object_retain_with_resurrect(dwl->_as_os_obj); + return dwl; + } + return NULL; +} + +bool +dispatch_workloop_is_current(dispatch_workloop_t dwl) +{ + return _dispatch_get_wlh() == (dispatch_wlh_t)dwl; +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_workloop_role_bits(void) +{ +#if DISPATCH_USE_KEVENT_WORKLOOP + if (likely(_dispatch_kevent_workqueue_enabled)) { + return DISPATCH_QUEUE_ROLE_BASE_WLH; + } +#endif + return DISPATCH_QUEUE_ROLE_BASE_ANON; +} + +bool +_dispatch_workloop_should_yield_4NW(void) +{ + dispatch_workloop_t dwl = _dispatch_wlh_to_workloop(_dispatch_get_wlh()); + if (likely(dwl)) { + return _dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos; + } + return false; +} + +DISPATCH_NOINLINE +static dispatch_workloop_t +_dispatch_workloop_create(const char *label, uint64_t dq_state) +{ + dispatch_queue_flags_t dqf = DQF_AUTORELEASE_ALWAYS; + dispatch_workloop_t dwl; + + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } + } + + dq_state |= _dispatch_workloop_role_bits(); + + dwl = _dispatch_queue_alloc(workloop, dqf, 1, dq_state)._dwl; + dwl->dq_label = label; + dwl->do_targetq = _dispatch_get_default_queue(true); + if (!(dq_state & DISPATCH_QUEUE_INACTIVE)) { + dwl->dq_priority = DISPATCH_PRIORITY_FLAG_OVERCOMMIT | + _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT); + } + _dispatch_object_debug(dwl, "%s", __func__); + return _dispatch_introspection_queue_create(dwl)._dwl; +} + +dispatch_workloop_t +dispatch_workloop_create(const char *label) +{ + return _dispatch_workloop_create(label, 0); +} + +dispatch_workloop_t +dispatch_workloop_create_inactive(const char *label) +{ + return _dispatch_workloop_create(label, DISPATCH_QUEUE_INACTIVE); +} + +void +dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t dwl, + dispatch_autorelease_frequency_t frequency) +{ + if (frequency == DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM) { + _dispatch_queue_atomic_flags_set_and_clear(dwl, + DQF_AUTORELEASE_ALWAYS, DQF_AUTORELEASE_NEVER); + } else { + _dispatch_queue_atomic_flags_set_and_clear(dwl, + DQF_AUTORELEASE_NEVER, DQF_AUTORELEASE_ALWAYS); + } + _dispatch_queue_setter_assert_inactive(dwl); +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_workloop_attributes_dispose(dispatch_workloop_t dwl) +{ + if (dwl->dwl_attr) { + free(dwl->dwl_attr); + } +} + +#if TARGET_OS_MAC +DISPATCH_ALWAYS_INLINE +static bool +_dispatch_workloop_has_kernel_attributes(dispatch_workloop_t dwl) +{ + return dwl->dwl_attr && (dwl->dwl_attr->dwla_flags & + (DISPATCH_WORKLOOP_ATTR_HAS_SCHED | + DISPATCH_WORKLOOP_ATTR_HAS_POLICY | + DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT)); +} + +void +dispatch_workloop_set_scheduler_priority(dispatch_workloop_t dwl, int priority, + uint64_t flags) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + if (priority) { + dwl->dwl_attr->dwla_sched.sched_priority = priority; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_SCHED; + } else { + dwl->dwl_attr->dwla_sched.sched_priority = 0; + dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_SCHED; + } + + if (flags & DISPATCH_WORKLOOP_FIXED_PRIORITY) { + dwl->dwl_attr->dwla_policy = POLICY_RR; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_POLICY; + } else { + dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; + } +} +#endif // TARGET_OS_MAC + +void +dispatch_workloop_set_qos_class_floor(dispatch_workloop_t dwl, + qos_class_t cls, int relpri, uint64_t flags) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); + + if (qos) { + dwl->dwl_attr->dwla_pri = _dispatch_priority_make(qos, relpri); + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS; + } else { + dwl->dwl_attr->dwla_pri = 0; + dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS; + } + +#if TARGET_OS_MAC + if (flags & DISPATCH_WORKLOOP_FIXED_PRIORITY) { + dwl->dwl_attr->dwla_policy = POLICY_RR; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_POLICY; + } else { + dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; + } +#else // TARGET_OS_MAC + (void)flags; +#endif // TARGET_OS_MAC +} + +void +dispatch_workloop_set_qos_class(dispatch_workloop_t dwl, + qos_class_t cls, uint64_t flags) +{ + dispatch_workloop_set_qos_class_floor(dwl, cls, 0, flags); +} + +void +dispatch_workloop_set_cpupercent(dispatch_workloop_t dwl, uint8_t percent, + uint32_t refillms) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + if ((dwl->dwl_attr->dwla_flags & (DISPATCH_WORKLOOP_ATTR_HAS_SCHED | + DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS)) == 0) { + DISPATCH_CLIENT_CRASH(0, "workloop qos class or priority must be " + "set before cpupercent"); + } + + dwl->dwl_attr->dwla_cpupercent.percent = percent; + dwl->dwl_attr->dwla_cpupercent.refillms = refillms; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT; +} + +#if DISPATCH_IOHID_SPI +void +_dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t dwl, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + dwl->dwl_attr->dwla_observers = *observer_hooks; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS; +} +#endif + +#if TARGET_OS_MAC +static void +_dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, + pthread_attr_t *attr) +{ + uint64_t old_state, new_state; + dispatch_queue_global_t dprq; + + dprq = dispatch_pthread_root_queue_create( + "com.apple.libdispatch.workloop_fallback", 0, attr, NULL); + + dwl->do_targetq = dprq->_as_dq; + _dispatch_retain(dprq); + dispatch_release(dprq); + + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, relaxed, { + new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK; + new_state |= DISPATCH_QUEUE_ROLE_BASE_ANON; + }); +} + +static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { + DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, + .do_ctxt = NULL, + .dq_label = "com.apple.root.workloop-custom", + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), + .dq_priority = _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT) | + DISPATCH_PRIORITY_SATURATED_OVERRIDE, + .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, + .dgq_thread_pool_size = 1, +}; +#endif // TARGET_OS_MAC + +static void +_dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) +{ +#if defined(_POSIX_THREADS) + dispatch_workloop_attr_t dwla = dwl->dwl_attr; + pthread_attr_t attr; + + pthread_attr_init(&attr); + if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS) { + dwl->dq_priority |= dwla->dwla_pri | DISPATCH_PRIORITY_FLAG_FLOOR; + } +#if TARGET_OS_MAC + if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_SCHED) { + pthread_attr_setschedparam(&attr, &dwla->dwla_sched); + // _dispatch_async_and_wait_should_always_async detects when a queue + // targets a root queue that is not part of the root queues array in + // order to force async_and_wait to async. We want this path to always + // be taken on workloops that have a scheduler priority set. + dwl->do_targetq = + (dispatch_queue_t)_dispatch_custom_workloop_root_queue._as_dq; + } + if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_POLICY) { + pthread_attr_setschedpolicy(&attr, dwla->dwla_policy); + } +#endif // TARGET_OS_MAC +#if HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP + if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT) { + pthread_attr_setcpupercent_np(&attr, dwla->dwla_cpupercent.percent, + (unsigned long)dwla->dwla_cpupercent.refillms); + } +#endif // HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP +#if TARGET_OS_MAC + if (_dispatch_workloop_has_kernel_attributes(dwl)) { + int rv = _pthread_workloop_create((uint64_t)dwl, 0, &attr); + switch (rv) { + case 0: + dwla->dwla_flags |= DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY; + break; case ENOTSUP: /* simulator fallback */ _dispatch_workloop_activate_simulator_fallback(dwl, &attr); @@ -5838,6 +6446,7 @@ _dispatch_root_queue_drain_one(dispatch_queue_global_t dq) return head; } +#if DISPATCH_USE_KEVENT_WORKQUEUE static void _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi DISPATCH_PERF_MON_ARGS_PROTO) @@ -6351,7 +6960,7 @@ _dispatch_pthread_root_queue_dispose(dispatch_queue_global_t dq, DISPATCH_STATIC_GLOBAL(bool _dispatch_program_is_probably_callback_driven); -#if DISPATCH_COCOA_COMPAT || defined(_WIN32) +#if DISPATCH_COCOA_COMPAT DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_main_q_handle_pred); DISPATCH_ALWAYS_INLINE @@ -6493,9 +7102,7 @@ _dispatch_runloop_queue_handle_dispose(dispatch_lane_t dq) #error "runloop support not implemented on this platform" #endif } -#endif // DISPATCH_COCOA_COMPAT || defined(_WIN32) -#if DISPATCH_COCOA_COMPAT static inline void _dispatch_runloop_queue_class_poke(dispatch_lane_t dq) { @@ -6572,8 +7179,6 @@ _dispatch_runloop_queue_poke(dispatch_lane_t dq, dispatch_qos_t qos, } } -#if DISPATCH_COCOA_COMPAT || defined(_WIN32) - DISPATCH_ALWAYS_INLINE static inline dispatch_qos_t _dispatch_runloop_queue_reset_max_qos(dispatch_lane_t dq) @@ -6823,10 +7428,10 @@ _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) } #endif -#endif // DISPATCH_COCOA_COMPAT || defined(_WIN32) +#endif // DISPATCH_COCOA_COMPAT #pragma mark - #pragma mark dispatch_main_queue -#if DISPATCH_COCOA_COMPAT || defined(_WIN32) +#if DISPATCH_COCOA_COMPAT dispatch_runloop_handle_t _dispatch_get_main_queue_handle_4CF(void) @@ -6858,7 +7463,7 @@ _dispatch_main_queue_callback_4CF( _dispatch_main_q.dq_side_suspend_cnt = false; } -#endif // DISPATCH_COCOA_COMPAT || defined(_WIN32) +#endif // DISPATCH_COCOA_COMPAT DISPATCH_NOINLINE void @@ -7365,6 +7970,7 @@ libdispatch_tsd_init(void) #endif // defined(_WIN32) __dispatch_tsd.tid = _gettid(); } +#endif DISPATCH_NOTHROW void @@ -7402,7 +8008,6 @@ _dispatch_fork_becomes_unsafe_slow(void) DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); } } -#endif // TARGET_OS_MAC DISPATCH_NOINLINE void diff --git a/src/semaphore.c b/src/semaphore.c index b706b0b88..1d164f17f 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -21,7 +21,7 @@ #include "internal.h" DISPATCH_WEAK // rdar://problem/8503746 -long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); +intptr_t _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); #pragma mark - #pragma mark dispatch_semaphore_t diff --git a/src/source.c b/src/source.c index c2fe427b7..376327052 100644 --- a/src/source.c +++ b/src/source.c @@ -159,7 +159,7 @@ dispatch_source_get_data(dispatch_source_t ds) #endif #endif // DISPATCH_USE_MEMORYSTATUS uint64_t value = os_atomic_load2o(dr, ds_data, relaxed); - return (uintptr_t)(dr->du_has_extended_status ? + return (unsigned long)(dr->du_has_extended_status ? DISPATCH_SOURCE_GET_DATA(value) : value); } diff --git a/src/unifdef.bngtnJ b/src/unifdef.bngtnJ deleted file mode 100644 index 76ec87402..000000000 --- a/src/unifdef.bngtnJ +++ /dev/null @@ -1,8028 +0,0 @@ -/* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#include "internal.h" -#if HAVE_MACH -#include "protocol.h" // _dispatch_send_wakeup_runloop_thread -#endif - -static inline void _dispatch_root_queues_init(void); -static void _dispatch_lane_barrier_complete(dispatch_lane_class_t dqu, - dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -static void _dispatch_lane_non_barrier_complete(dispatch_lane_t dq, - dispatch_wakeup_flags_t flags); -#if HAVE_PTHREAD_WORKQUEUE_QOS -static inline void _dispatch_queue_wakeup_with_override( - dispatch_queue_class_t dq, uint64_t dq_state, - dispatch_wakeup_flags_t flags); -#endif -static void _dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, - struct dispatch_object_s *dc, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags, uint64_t owned); -static inline bool -_dispatch_async_and_wait_should_always_async(dispatch_queue_class_t dqu, - uint64_t dq_state); - -#pragma mark - -#pragma mark dispatch_assert_queue - -DISPATCH_NOINLINE DISPATCH_NORETURN -static void -_dispatch_assert_queue_fail(dispatch_queue_t dq, bool expected) -{ - _dispatch_client_assert_fail( - "Block was %sexpected to execute on queue [%s]", - expected ? "" : "not ", dq->dq_label ?: ""); -} - -DISPATCH_NOINLINE DISPATCH_NORETURN -static void -_dispatch_assert_queue_barrier_fail(dispatch_queue_t dq) -{ - _dispatch_client_assert_fail( - "Block was expected to act as a barrier on queue [%s]", - dq->dq_label ?: ""); -} - -void -dispatch_assert_queue(dispatch_queue_t dq) -{ - unsigned long metatype = dx_metatype(dq); - if (unlikely(metatype != _DISPATCH_LANE_TYPE && - metatype != _DISPATCH_WORKLOOP_TYPE)) { - DISPATCH_CLIENT_CRASH(metatype, "invalid queue passed to " - "dispatch_assert_queue()"); - } - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (likely(_dq_state_drain_locked_by_self(dq_state))) { - return; - } - if (likely(_dispatch_thread_frame_find_queue(dq))) { - return; - } - _dispatch_assert_queue_fail(dq, true); -} - -void -dispatch_assert_queue_not(dispatch_queue_t dq) -{ - unsigned long metatype = dx_metatype(dq); - if (unlikely(metatype != _DISPATCH_LANE_TYPE && - metatype != _DISPATCH_WORKLOOP_TYPE)) { - DISPATCH_CLIENT_CRASH(metatype, "invalid queue passed to " - "dispatch_assert_queue_not()"); - } - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(_dq_state_drain_locked_by_self(dq_state))) { - _dispatch_assert_queue_fail(dq, false); - } - if (unlikely(_dispatch_thread_frame_find_queue(dq))) { - _dispatch_assert_queue_fail(dq, false); - } -} - -void -dispatch_assert_queue_barrier(dispatch_queue_t dq) -{ - dispatch_assert_queue(dq); - - if (likely(dq->dq_width == 1)) { - return; - } - - if (likely(dq->do_targetq)) { - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (likely(_dq_state_is_in_barrier(dq_state))) { - return; - } - } - - _dispatch_assert_queue_barrier_fail(dq); -} - -#pragma mark - -#pragma mark _dispatch_set_priority_and_mach_voucher -#if HAVE_PTHREAD_WORKQUEUE_QOS - -DISPATCH_NOINLINE -void -_dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, - mach_voucher_t kv) -{ - _pthread_set_flags_t pflags = (_pthread_set_flags_t)0; - if (pp && _dispatch_set_qos_class_enabled) { - pthread_priority_t old_pri = _dispatch_get_priority(); - if (pp != old_pri) { - if (old_pri & _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG) { - pflags |= _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND; - // when we unbind, overcomitness can flip, so we need to learn - // it from the defaultpri, see _dispatch_priority_compute_update - pp |= (_dispatch_get_basepri() & - DISPATCH_PRIORITY_FLAG_OVERCOMMIT); - } else { - // else we need to keep the one that is set in the current pri - pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); - } - if (likely(old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { - pflags |= _PTHREAD_SET_SELF_QOS_FLAG; - } - uint64_t mgr_dq_state = - os_atomic_load2o(&_dispatch_mgr_q, dq_state, relaxed); - if (unlikely(_dq_state_drain_locked_by_self(mgr_dq_state))) { - DISPATCH_INTERNAL_CRASH(pp, - "Changing the QoS while on the manager queue"); - } - if (unlikely(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { - DISPATCH_INTERNAL_CRASH(pp, "Cannot raise oneself to manager"); - } - if (old_pri & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) { - DISPATCH_INTERNAL_CRASH(old_pri, - "Cannot turn a manager thread into a normal one"); - } - } - } - if (kv != VOUCHER_NO_MACH_VOUCHER) { -#if VOUCHER_USE_MACH_VOUCHER - pflags |= _PTHREAD_SET_SELF_VOUCHER_FLAG; -#endif - } - if (!pflags) return; - int r = _pthread_set_properties_self(pflags, pp, kv); - if (r == EINVAL) { - DISPATCH_INTERNAL_CRASH(pp, "_pthread_set_properties_self failed"); - } - (void)dispatch_assume_zero(r); -} - -DISPATCH_NOINLINE -voucher_t -_dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, - voucher_t v, dispatch_thread_set_self_t flags) -{ - voucher_t ov = DISPATCH_NO_VOUCHER; - mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER; - if (v != DISPATCH_NO_VOUCHER) { - bool retained = flags & DISPATCH_VOUCHER_CONSUME; - ov = _voucher_get(); - if (ov == v && (flags & DISPATCH_VOUCHER_REPLACE)) { - if (retained && v) _voucher_release_no_dispose(v); - ov = DISPATCH_NO_VOUCHER; - } else { - if (!retained && v) _voucher_retain(v); - kv = _voucher_swap_and_get_mach_voucher(ov, v); - } - } - if (!(flags & DISPATCH_THREAD_PARK)) { - _dispatch_set_priority_and_mach_voucher_slow(priority, kv); - } - if (ov != DISPATCH_NO_VOUCHER && (flags & DISPATCH_VOUCHER_REPLACE)) { - if (ov) _voucher_release(ov); - ov = DISPATCH_NO_VOUCHER; - } - return ov; -} -#endif -#pragma mark - -#pragma mark dispatch_continuation_t - -static void _dispatch_async_redirect_invoke(dispatch_continuation_t dc, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); -#if HAVE_PTHREAD_WORKQUEUE_QOS -static void _dispatch_queue_override_invoke(dispatch_continuation_t dc, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); -static void _dispatch_workloop_stealer_invoke(dispatch_continuation_t dc, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); -#endif // HAVE_PTHREAD_WORKQUEUE_QOS - -const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { - DC_VTABLE_ENTRY(ASYNC_REDIRECT, - .do_invoke = _dispatch_async_redirect_invoke), -#if HAVE_MACH - DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN, - .do_invoke = _dispatch_mach_send_barrier_drain_invoke), - DC_VTABLE_ENTRY(MACH_SEND_BARRIER, - .do_invoke = _dispatch_mach_barrier_invoke), - DC_VTABLE_ENTRY(MACH_RECV_BARRIER, - .do_invoke = _dispatch_mach_barrier_invoke), - DC_VTABLE_ENTRY(MACH_ASYNC_REPLY, - .do_invoke = _dispatch_mach_msg_async_reply_invoke), -#endif -#if HAVE_PTHREAD_WORKQUEUE_QOS - DC_VTABLE_ENTRY(WORKLOOP_STEALING, - .do_invoke = _dispatch_workloop_stealer_invoke), - DC_VTABLE_ENTRY(OVERRIDE_STEALING, - .do_invoke = _dispatch_queue_override_invoke), - DC_VTABLE_ENTRY(OVERRIDE_OWNING, - .do_invoke = _dispatch_queue_override_invoke), -#endif -#if HAVE_MACH - DC_VTABLE_ENTRY(MACH_IPC_HANDOFF, - .do_invoke = _dispatch_mach_ipc_handoff_invoke), -#endif -}; - -DISPATCH_NOINLINE -static void DISPATCH_TSD_DTOR_CC -_dispatch_cache_cleanup(void *value) -{ - dispatch_continuation_t dc, next_dc = value; - - while ((dc = next_dc)) { - next_dc = dc->do_next; - _dispatch_continuation_free_to_heap(dc); - } -} - -static void -_dispatch_force_cache_cleanup(void) -{ - dispatch_continuation_t dc; - dc = _dispatch_thread_getspecific(dispatch_cache_key); - if (dc) { - _dispatch_thread_setspecific(dispatch_cache_key, NULL); - _dispatch_cache_cleanup(dc); - } -} - -#if DISPATCH_USE_MEMORYPRESSURE_SOURCE -DISPATCH_NOINLINE -void -_dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) -{ - _dispatch_continuation_free_to_heap(dc); - dispatch_continuation_t next_dc; - dc = _dispatch_thread_getspecific(dispatch_cache_key); - int cnt; - if (!dc || (cnt = dc->dc_cache_cnt - - _dispatch_continuation_cache_limit) <= 0) { - return; - } - do { - next_dc = dc->do_next; - _dispatch_continuation_free_to_heap(dc); - } while (--cnt && (dc = next_dc)); - _dispatch_thread_setspecific(dispatch_cache_key, next_dc); -} -#endif - -DISPATCH_NOINLINE -void -_dispatch_continuation_pop(dispatch_object_t dou, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags, dispatch_queue_class_t dqu) -{ - _dispatch_continuation_pop_inline(dou, dic, flags, dqu._dq); -} - -#pragma mark - -#pragma mark dispatch_block_create - -#if __BLOCKS__ - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_block_flags_valid(dispatch_block_flags_t flags) -{ - return ((flags & ~DISPATCH_BLOCK_API_MASK) == 0); -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_block_flags_t -_dispatch_block_normalize_flags(dispatch_block_flags_t flags) -{ - if (flags & (DISPATCH_BLOCK_NO_QOS_CLASS|DISPATCH_BLOCK_DETACHED)) { - flags |= DISPATCH_BLOCK_HAS_PRIORITY; - } - if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) { - flags &= ~(dispatch_block_flags_t)DISPATCH_BLOCK_INHERIT_QOS_CLASS; - } - return flags; -} - -static inline dispatch_block_t -_dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, - voucher_t voucher, pthread_priority_t pri, dispatch_block_t block) -{ - dispatch_block_flags_t unmodified_flags = flags; - pthread_priority_t unmodified_pri = pri; - - flags = _dispatch_block_normalize_flags(flags); - bool assign = (flags & DISPATCH_BLOCK_ASSIGN_CURRENT); - - if (!(flags & DISPATCH_BLOCK_HAS_VOUCHER)) { - if (flags & DISPATCH_BLOCK_DETACHED) { - voucher = VOUCHER_NULL; - flags |= DISPATCH_BLOCK_HAS_VOUCHER; - } else if (flags & DISPATCH_BLOCK_NO_VOUCHER) { - voucher = DISPATCH_NO_VOUCHER; - flags |= DISPATCH_BLOCK_HAS_VOUCHER; - } else if (assign) { -#if OS_VOUCHER_ACTIVITY_SPI - voucher = VOUCHER_CURRENT; -#endif - flags |= DISPATCH_BLOCK_HAS_VOUCHER; - } - } -#if OS_VOUCHER_ACTIVITY_SPI - if (voucher == VOUCHER_CURRENT) { - voucher = _voucher_get(); - } -#endif - if (assign && !(flags & DISPATCH_BLOCK_HAS_PRIORITY)) { - pri = _dispatch_priority_propagate(); - flags |= DISPATCH_BLOCK_HAS_PRIORITY; - } - dispatch_block_t db = _dispatch_block_create(flags, voucher, pri, block); - -#if DISPATCH_DEBUG - dispatch_assert(_dispatch_block_get_data(db)); -#endif - - _dispatch_trace_block_create_with_voucher_and_priority(db, - _dispatch_Block_invoke(block), unmodified_flags, - ((unmodified_flags & DISPATCH_BLOCK_HAS_PRIORITY) ? unmodified_pri : - (unsigned long)UINT32_MAX), - _dispatch_get_priority(), pri); - return db; -} - -dispatch_block_t -dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block) -{ - if (!_dispatch_block_flags_valid(flags)) return DISPATCH_BAD_INPUT; - return _dispatch_block_create_with_voucher_and_priority(flags, NULL, 0, - block); -} - -dispatch_block_t -dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, - dispatch_qos_class_t qos_class, int relative_priority, - dispatch_block_t block) -{ - if (!_dispatch_block_flags_valid(flags) || - !_dispatch_qos_class_valid(qos_class, relative_priority)) { - return DISPATCH_BAD_INPUT; - } - flags |= DISPATCH_BLOCK_HAS_PRIORITY; - pthread_priority_t pri = 0; -#if HAVE_PTHREAD_WORKQUEUE_QOS - pri = _pthread_qos_class_encode(qos_class, relative_priority, 0); -#endif - return _dispatch_block_create_with_voucher_and_priority(flags, NULL, - pri, block); -} - -dispatch_block_t -dispatch_block_create_with_voucher(dispatch_block_flags_t flags, - voucher_t voucher, dispatch_block_t block) -{ - if (!_dispatch_block_flags_valid(flags)) return DISPATCH_BAD_INPUT; - flags |= DISPATCH_BLOCK_HAS_VOUCHER; - flags &= ~DISPATCH_BLOCK_NO_VOUCHER; - return _dispatch_block_create_with_voucher_and_priority(flags, voucher, 0, - block); -} - -dispatch_block_t -dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, - voucher_t voucher, dispatch_qos_class_t qos_class, - int relative_priority, dispatch_block_t block) -{ - if (!_dispatch_block_flags_valid(flags) || - !_dispatch_qos_class_valid(qos_class, relative_priority)) { - return DISPATCH_BAD_INPUT; - } - flags |= (DISPATCH_BLOCK_HAS_VOUCHER|DISPATCH_BLOCK_HAS_PRIORITY); - flags &= ~(DISPATCH_BLOCK_NO_VOUCHER|DISPATCH_BLOCK_NO_QOS_CLASS); - pthread_priority_t pri = 0; -#if HAVE_PTHREAD_WORKQUEUE_QOS - pri = _pthread_qos_class_encode(qos_class, relative_priority, 0); -#endif - return _dispatch_block_create_with_voucher_and_priority(flags, voucher, - pri, block); -} - -void -dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block) -{ - if (!_dispatch_block_flags_valid(flags)) { - DISPATCH_CLIENT_CRASH(flags, "Invalid flags passed to " - "dispatch_block_perform()"); - } - flags = _dispatch_block_normalize_flags(flags); - - voucher_t voucher = DISPATCH_NO_VOUCHER; - if (flags & DISPATCH_BLOCK_DETACHED) { - voucher = VOUCHER_NULL; - flags |= DISPATCH_BLOCK_HAS_VOUCHER; - } - - struct dispatch_block_private_data_s dbpds = - DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block, voucher); - return _dispatch_block_invoke_direct(&dbpds); -} - -void -_dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd) -{ - dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)dbcpd; - dispatch_block_flags_t flags = dbpd->dbpd_flags; - unsigned int atomic_flags = dbpd->dbpd_atomic_flags; - if (unlikely(atomic_flags & DBF_WAITED)) { - DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " - "run more than once and waited for"); - } - if (atomic_flags & DBF_CANCELED) goto out; - - pthread_priority_t op = 0, p = 0; - op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority); - if (op) { - p = dbpd->dbpd_priority; - } - voucher_t ov, v = DISPATCH_NO_VOUCHER; - if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { - v = dbpd->dbpd_voucher; - } - ov = _dispatch_set_priority_and_voucher(p, v, 0); - dbpd->dbpd_thread = _dispatch_tid_self(); - _dispatch_client_callout(dbpd->dbpd_block, - _dispatch_Block_invoke(dbpd->dbpd_block)); - _dispatch_reset_priority_and_voucher(op, ov); -out: - if ((atomic_flags & DBF_PERFORM) == 0) { - if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { - dispatch_group_leave(dbpd->dbpd_group); - } - } -} - -void -_dispatch_block_sync_invoke(void *block) -{ - dispatch_block_t b = block; - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); - dispatch_block_flags_t flags = dbpd->dbpd_flags; - unsigned int atomic_flags = dbpd->dbpd_atomic_flags; - if (unlikely(atomic_flags & DBF_WAITED)) { - DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " - "run more than once and waited for"); - } - if (atomic_flags & DBF_CANCELED) goto out; - - voucher_t ov = DISPATCH_NO_VOUCHER; - if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { - ov = _dispatch_adopt_priority_and_set_voucher(0, dbpd->dbpd_voucher, 0); - } - dbpd->dbpd_block(); - _dispatch_reset_voucher(ov, 0); -out: - if ((atomic_flags & DBF_PERFORM) == 0) { - if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { - dispatch_group_leave(dbpd->dbpd_group); - } - } - - dispatch_queue_t boost_dq; - boost_dq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); - if (boost_dq) { - // balances dispatch_{,barrier_,}sync - _dispatch_release_2(boost_dq); - } -} - -#define DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE 0x1 - -DISPATCH_NOINLINE -static void -_dispatch_block_async_invoke2(dispatch_block_t b, unsigned long invoke_flags) -{ - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); - unsigned int atomic_flags = dbpd->dbpd_atomic_flags; - if (unlikely(atomic_flags & DBF_WAITED)) { - DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " - "run more than once and waited for"); - } - - if (likely(!(atomic_flags & DBF_CANCELED))) { - dbpd->dbpd_block(); - } - if ((atomic_flags & DBF_PERFORM) == 0) { - if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { - dispatch_group_leave(dbpd->dbpd_group); - } - } - - dispatch_queue_t boost_dq; - boost_dq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); - if (boost_dq) { - // balances dispatch_{,barrier_,group_}async - _dispatch_release_2(boost_dq); - } - - if (invoke_flags & DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE) { - Block_release(b); - } -} - -static void -_dispatch_block_async_invoke(void *block) -{ - _dispatch_block_async_invoke2(block, 0); -} - -static void -_dispatch_block_async_invoke_and_release(void *block) -{ - _dispatch_block_async_invoke2(block, DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE); -} - -void -dispatch_block_cancel(dispatch_block_t db) -{ - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (unlikely(!dbpd)) { - DISPATCH_CLIENT_CRASH(0, "Invalid block object passed to " - "dispatch_block_cancel()"); - } - (void)os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed); -} - -intptr_t -dispatch_block_testcancel(dispatch_block_t db) -{ - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (unlikely(!dbpd)) { - DISPATCH_CLIENT_CRASH(0, "Invalid block object passed to " - "dispatch_block_testcancel()"); - } - return (bool)(dbpd->dbpd_atomic_flags & DBF_CANCELED); -} - -intptr_t -dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) -{ - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (unlikely(!dbpd)) { - DISPATCH_CLIENT_CRASH(0, "Invalid block object passed to " - "dispatch_block_wait()"); - } - - unsigned int flags = os_atomic_or_orig2o(dbpd, dbpd_atomic_flags, - DBF_WAITING, relaxed); - if (unlikely(flags & (DBF_WAITED | DBF_WAITING))) { - DISPATCH_CLIENT_CRASH(flags, "A block object may not be waited for " - "more than once"); - } - - // If we know the queue where this block is - // enqueued, or the thread that's executing it, then we should boost - // it here. - - pthread_priority_t pp = _dispatch_get_priority(); - - dispatch_queue_t boost_dq; - boost_dq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); - if (boost_dq) { - // release balances dispatch_{,barrier_,group_}async. - // Can't put the queue back in the timeout case: the block might - // finish after we fell out of group_wait and see our NULL, so - // neither of us would ever release. Side effect: After a _wait - // that times out, subsequent waits will not boost the qos of the - // still-running block. - dx_wakeup(boost_dq, _dispatch_qos_from_pp(pp), - DISPATCH_WAKEUP_BLOCK_WAIT | DISPATCH_WAKEUP_CONSUME_2); - } - - mach_port_t boost_th = dbpd->dbpd_thread; - if (boost_th) { - _dispatch_thread_override_start(boost_th, pp, dbpd); - } - - int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); - if (unlikely(performed > 1 || (boost_th && boost_dq))) { - DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " - "run more than once and waited for"); - } - - intptr_t ret = dispatch_group_wait(dbpd->dbpd_group, timeout); - - if (boost_th) { - _dispatch_thread_override_end(boost_th, dbpd); - } - - if (ret) { - // timed out: reverse our changes - os_atomic_and2o(dbpd, dbpd_atomic_flags, ~DBF_WAITING, relaxed); - } else { - os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_WAITED, relaxed); - // don't need to re-test here: the second call would see - // the first call's WAITING - } - - return ret; -} - -void -dispatch_block_notify(dispatch_block_t db, dispatch_queue_t queue, - dispatch_block_t notification_block) -{ - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (!dbpd) { - DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " - "dispatch_block_notify()"); - } - int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); - if (unlikely(performed > 1)) { - DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " - "run more than once and observed"); - } - - return dispatch_group_notify(dbpd->dbpd_group, queue, notification_block); -} - -DISPATCH_NOINLINE -dispatch_qos_t -_dispatch_continuation_init_slow(dispatch_continuation_t dc, - dispatch_queue_t dq, dispatch_block_flags_t flags) -{ - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(dc->dc_ctxt); - dispatch_block_flags_t block_flags = dbpd->dbpd_flags; - uintptr_t dc_flags = dc->dc_flags; - pthread_priority_t pp = 0; - - // balanced in d_block_async_invoke_and_release or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain_2(dq); - } - - if (dc_flags & DC_FLAG_CONSUME) { - dc->dc_func = _dispatch_block_async_invoke_and_release; - } else { - dc->dc_func = _dispatch_block_async_invoke; - } - - flags |= block_flags; - if (block_flags & DISPATCH_BLOCK_HAS_PRIORITY) { - pp = dbpd->dbpd_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; - } else if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - // _dispatch_source_handler_alloc is calling is and doesn't want us - // to propagate priorities - pp = 0; - } else { - pp = _dispatch_priority_propagate(); - } - _dispatch_continuation_priority_set(dc, dq, pp, flags); - if (block_flags & DISPATCH_BLOCK_BARRIER) { - dc_flags |= DC_FLAG_BARRIER; - } - if (block_flags & DISPATCH_BLOCK_HAS_VOUCHER) { - voucher_t v = dbpd->dbpd_voucher; - dc->dc_voucher = (v && v != DISPATCH_NO_VOUCHER) ? _voucher_retain(v) - : v; - _dispatch_voucher_debug("continuation[%p] set", dc->dc_voucher, dc); - _dispatch_voucher_ktrace_dc_push(dc); - } else { - _dispatch_continuation_voucher_set(dc, flags); - } - dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA; - dc->dc_flags = dc_flags; - return _dispatch_qos_from_pp(dc->dc_priority); -} - -#endif // __BLOCKS__ -#pragma mark - -#pragma mark dispatch_barrier_async - -DISPATCH_NOINLINE -static void -_dispatch_async_f_slow(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, dispatch_block_flags_t flags, - uintptr_t dc_flags) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); - dispatch_qos_t qos; - - qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, flags, dc_flags); - _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); -} - -DISPATCH_NOINLINE -void -dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); - uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_BARRIER; - dispatch_qos_t qos; - - if (likely(!dc)) { - return _dispatch_async_f_slow(dq, ctxt, func, 0, dc_flags); - } - - qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, 0, dc_flags); - _dispatch_continuation_async(dq, dc, qos, dc_flags); -} - -DISPATCH_NOINLINE -void -_dispatch_barrier_async_detached_f(dispatch_queue_class_t dq, void *ctxt, - dispatch_function_t func) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->dc_flags = DC_FLAG_CONSUME | DC_FLAG_BARRIER | DC_FLAG_ALLOCATED; - dc->dc_func = func; - dc->dc_ctxt = ctxt; - dc->dc_voucher = DISPATCH_NO_VOUCHER; - dc->dc_priority = DISPATCH_NO_PRIORITY; - _dispatch_trace_item_push(dq, dc); - dx_push(dq._dq, dc, 0); -} - -#ifdef __BLOCKS__ -void -dispatch_barrier_async(dispatch_queue_t dq, dispatch_block_t work) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_BARRIER; - dispatch_qos_t qos; - - qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags); - _dispatch_continuation_async(dq, dc, qos, dc_flags); -} -#endif - -#pragma mark - -#pragma mark dispatch_async - -void -_dispatch_async_redirect_invoke(dispatch_continuation_t dc, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) -{ - dispatch_thread_frame_s dtf; - struct dispatch_continuation_s *other_dc = dc->dc_other; - dispatch_invoke_flags_t ctxt_flags = (dispatch_invoke_flags_t)dc->dc_ctxt; - // if we went through _dispatch_root_queue_push_override, - // the "right" root queue was stuffed into dc_func - dispatch_queue_global_t assumed_rq = (dispatch_queue_global_t)dc->dc_func; - dispatch_lane_t dq = dc->dc_data; - dispatch_queue_t rq, old_dq; - dispatch_priority_t old_dbp; - - if (ctxt_flags) { - flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK; - flags |= ctxt_flags; - } - old_dq = _dispatch_queue_get_current(); - if (assumed_rq) { - old_dbp = _dispatch_root_queue_identity_assume(assumed_rq); - _dispatch_set_basepri(dq->dq_priority); - } else { - old_dbp = _dispatch_set_basepri(dq->dq_priority); - } - - uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_NO_INTROSPECTION; - _dispatch_thread_frame_push(&dtf, dq); - _dispatch_continuation_pop_forwarded(dc, dc_flags, NULL, { - _dispatch_continuation_pop(other_dc, dic, flags, dq); - }); - _dispatch_thread_frame_pop(&dtf); - if (assumed_rq) _dispatch_queue_set_current(old_dq); - _dispatch_reset_basepri(old_dbp); - - rq = dq->do_targetq; - while (unlikely(rq->do_targetq && rq != old_dq)) { - _dispatch_lane_non_barrier_complete(upcast(rq)._dl, 0); - rq = rq->do_targetq; - } - - // pairs with _dispatch_async_redirect_wrap - _dispatch_lane_non_barrier_complete(dq, DISPATCH_WAKEUP_CONSUME_2); -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_continuation_t -_dispatch_async_redirect_wrap(dispatch_lane_t dq, dispatch_object_t dou) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - - dou._do->do_next = NULL; - dc->do_vtable = DC_VTABLE(ASYNC_REDIRECT); - dc->dc_func = NULL; - dc->dc_ctxt = (void *)(uintptr_t)_dispatch_queue_autorelease_frequency(dq); - dc->dc_data = dq; - dc->dc_other = dou._do; - dc->dc_voucher = DISPATCH_NO_VOUCHER; - dc->dc_priority = DISPATCH_NO_PRIORITY; - _dispatch_retain_2(dq); // released in _dispatch_async_redirect_invoke - return dc; -} - -DISPATCH_NOINLINE -static void -_dispatch_continuation_redirect_push(dispatch_lane_t dl, - dispatch_object_t dou, dispatch_qos_t qos) -{ - if (likely(!_dispatch_object_is_redirection(dou))) { - dou._dc = _dispatch_async_redirect_wrap(dl, dou); - } else if (!dou._dc->dc_ctxt) { - // find first queue in descending target queue order that has - // an autorelease frequency set, and use that as the frequency for - // this continuation. - dou._dc->dc_ctxt = (void *) - (uintptr_t)_dispatch_queue_autorelease_frequency(dl); - } - - dispatch_queue_t dq = dl->do_targetq; - if (!qos) qos = _dispatch_priority_qos(dq->dq_priority); - dx_push(dq, dou, qos); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, - dispatch_block_flags_t flags) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); - uintptr_t dc_flags = DC_FLAG_CONSUME; - dispatch_qos_t qos; - - if (unlikely(!dc)) { - return _dispatch_async_f_slow(dq, ctxt, func, flags, dc_flags); - } - - qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, flags, dc_flags); - _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); -} - -DISPATCH_NOINLINE -void -dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) -{ - _dispatch_async_f(dq, ctxt, func, 0); -} - -DISPATCH_NOINLINE -void -dispatch_async_enforce_qos_class_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - _dispatch_async_f(dq, ctxt, func, DISPATCH_BLOCK_ENFORCE_QOS_CLASS); -} - -#ifdef __BLOCKS__ -void -dispatch_async(dispatch_queue_t dq, dispatch_block_t work) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DC_FLAG_CONSUME; - dispatch_qos_t qos; - - qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags); - _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); -} - -#pragma mark - -#pragma mark _dispatch_sync_invoke / _dispatch_sync_complete - -DISPATCH_ALWAYS_INLINE -static uint64_t -_dispatch_lane_non_barrier_complete_try_lock(dispatch_lane_t dq, - uint64_t old_state, uint64_t new_state, uint64_t owner_self) -{ - uint64_t full_width = new_state; - if (_dq_state_has_pending_barrier(new_state)) { - full_width -= DISPATCH_QUEUE_PENDING_BARRIER; - full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; - full_width += DISPATCH_QUEUE_IN_BARRIER; - } else { - full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - full_width += DISPATCH_QUEUE_IN_BARRIER; - } - if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == - DISPATCH_QUEUE_WIDTH_FULL_BIT) { - new_state = full_width; - new_state &= ~DISPATCH_QUEUE_DIRTY; - new_state |= owner_self; - } else if (_dq_state_is_dirty(old_state)) { - new_state |= DISPATCH_QUEUE_ENQUEUED; - } - return new_state; -} - -DISPATCH_ALWAYS_INLINE -static void -_dispatch_lane_non_barrier_complete_finish(dispatch_lane_t dq, - dispatch_wakeup_flags_t flags, uint64_t old_state, uint64_t new_state) -{ - if (_dq_state_received_override(old_state)) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); - } - - if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { - if (_dq_state_is_dirty(old_state)) { - // - // dependency ordering for dq state changes that were flushed - // and not acted upon - os_atomic_thread_fence(dependency); - dq = os_atomic_inject_dependency(dq, (unsigned long)old_state); - } - return _dispatch_lane_barrier_complete(dq, 0, flags); - } - - if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { - if (!(flags & DISPATCH_WAKEUP_CONSUME_2)) { - _dispatch_retain_2(dq); - } - dispatch_assert(!_dq_state_is_base_wlh(new_state)); - _dispatch_trace_item_push(dq->do_targetq, dq); - return dx_push(dq->do_targetq, dq, _dq_state_max_qos(new_state)); - } - - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - _dispatch_release_2_tailcall(dq); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_lane_non_barrier_complete(dispatch_lane_t dq, - dispatch_wakeup_flags_t flags) -{ - uint64_t old_state, new_state, owner_self = _dispatch_lock_value_for_self(); - - // see _dispatch_lane_resume() - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL; - if (unlikely(_dq_state_drain_locked(old_state))) { - // make drain_try_unlock() fail and reconsider whether there's - // enough width now for a new item - new_state |= DISPATCH_QUEUE_DIRTY; - } else if (likely(_dq_state_is_runnable(new_state))) { - new_state = _dispatch_lane_non_barrier_complete_try_lock(dq, - old_state, new_state, owner_self); - } - }); - - _dispatch_lane_non_barrier_complete_finish(dq, flags, old_state, new_state); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_sync_function_invoke_inline(dispatch_queue_class_t dq, void *ctxt, - dispatch_function_t func) -{ - dispatch_thread_frame_s dtf; - _dispatch_thread_frame_push(&dtf, dq); - _dispatch_client_callout(ctxt, func); - _dispatch_perfmon_workitem_inc(); - _dispatch_thread_frame_pop(&dtf); -} - -DISPATCH_NOINLINE -static void -_dispatch_sync_function_invoke(dispatch_queue_class_t dq, void *ctxt, - dispatch_function_t func) -{ - _dispatch_sync_function_invoke_inline(dq, ctxt, func); -} - -DISPATCH_NOINLINE -static void -_dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq, - uintptr_t dc_flags) -{ - bool barrier = (dc_flags & DC_FLAG_BARRIER); - do { - if (dq == stop_dq) return; - if (barrier) { - dx_wakeup(dq, 0, DISPATCH_WAKEUP_BARRIER_COMPLETE); - } else { - _dispatch_lane_non_barrier_complete(upcast(dq)._dl, 0); - } - dq = dq->do_targetq; - barrier = (dq->dq_width == 1); - } while (unlikely(dq->do_targetq)); -} - -DISPATCH_NOINLINE -static void -_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_class_t dq, - void *ctxt, dispatch_function_t func, uintptr_t dc_flags - DISPATCH_TRACE_ARG(void *dc)) -{ - _dispatch_sync_function_invoke_inline(dq, ctxt, func); - _dispatch_trace_item_complete(dc); - _dispatch_sync_complete_recurse(dq._dq, NULL, dc_flags); -} - -DISPATCH_NOINLINE -static void -_dispatch_sync_invoke_and_complete(dispatch_lane_t dq, void *ctxt, - dispatch_function_t func DISPATCH_TRACE_ARG(void *dc)) -{ - _dispatch_sync_function_invoke_inline(dq, ctxt, func); - _dispatch_trace_item_complete(dc); - _dispatch_lane_non_barrier_complete(dq, 0); -} - -/* - * For queues we can cheat and inline the unlock code, which is invalid - * for objects with a more complex state machine (sources or mach channels) - */ -DISPATCH_NOINLINE -static void -_dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq, - void *ctxt, dispatch_function_t func DISPATCH_TRACE_ARG(void *dc)) -{ - _dispatch_sync_function_invoke_inline(dq, ctxt, func); - _dispatch_trace_item_complete(dc); - if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) { - return _dispatch_lane_barrier_complete(dq, 0, 0); - } - - // Presence of any of these bits requires more work that only - // _dispatch_*_barrier_complete() handles properly - // - // Note: testing for RECEIVED_OVERRIDE or RECEIVED_SYNC_WAIT without - // checking the role is sloppy, but is a super fast check, and neither of - // these bits should be set if the lock was never contended/discovered. - const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK | - DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY | - DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER | - DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; - uint64_t old_state, new_state; - - // similar to _dispatch_queue_drain_try_unlock - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - if (unlikely(old_state & fail_unlock_mask)) { - os_atomic_rmw_loop_give_up({ - return _dispatch_lane_barrier_complete(dq, 0, 0); - }); - } - }); - if (_dq_state_is_base_wlh(old_state)) { - _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); - } -} - -#pragma mark - -#pragma mark _dispatch_sync_wait / _dispatch_sync_waiter_wake - -DISPATCH_NOINLINE -static void -_dispatch_waiter_wake_wlh_anon(dispatch_sync_context_t dsc) -{ - if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) { - _dispatch_wqthread_override_start(dsc->dsc_waiter, - dsc->dsc_override_qos); - } - _dispatch_thread_event_signal(&dsc->dsc_event); -} - -DISPATCH_NOINLINE -static void -_dispatch_waiter_wake(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, - uint64_t old_state, uint64_t new_state) -{ - dispatch_wlh_t waiter_wlh = dsc->dc_data; - -#if DISPATCH_USE_KEVENT_WORKLOOP - // - // We need to interact with a workloop if any of the following 3 cases: - // 1. the current owner of the lock has a SYNC_WAIT knote to destroy - // 2. the next owner of the lock is a workloop, we need to make sure it has - // a SYNC_WAIT knote to destroy when it will later release the lock - // 3. the waiter is waiting on a workloop (which may be different from `wlh` - // if the hierarchy was mutated after the next owner started waiting) - // - // However, note that even when (2) is true, the next owner may be waiting - // without pushing (waiter_wlh == DISPATCH_WLH_ANON), in which case the next - // owner is really woken up when the thread event is signaled. - // -#endif - if (_dq_state_in_sync_transfer(old_state) || - _dq_state_in_sync_transfer(new_state) || - (waiter_wlh != DISPATCH_WLH_ANON)) { - _dispatch_event_loop_wake_owner(dsc, wlh, old_state, new_state); - } - if (unlikely(waiter_wlh == DISPATCH_WLH_ANON)) { - _dispatch_waiter_wake_wlh_anon(dsc); - } -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_async_waiter_update(dispatch_sync_context_t dsc, - dispatch_queue_class_t dqu) -{ - dispatch_queue_t dq = dqu._dq; - dispatch_priority_t p = dq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; - if (p) { - pthread_priority_t pp = _dispatch_priority_to_pp_strip_flags(p); - if (pp > (dsc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { - dsc->dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG; - } - } - - if (dsc->dsc_autorelease == 0) { - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dqu); - dqf &= (dispatch_queue_flags_t)_DQF_AUTORELEASE_MASK; - dsc->dsc_autorelease = (uint8_t)(dqf / DQF_AUTORELEASE_ALWAYS); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_non_barrier_waiter_redirect_or_wake(dispatch_lane_t dq, - dispatch_object_t dou) -{ - dispatch_sync_context_t dsc = (dispatch_sync_context_t)dou._dc; - uint64_t old_state; - - dispatch_assert(!(dsc->dc_flags & DC_FLAG_BARRIER)); - -again: - old_state = os_atomic_load2o(dq, dq_state, relaxed); - - if (dsc->dsc_override_qos < _dq_state_max_qos(old_state)) { - dsc->dsc_override_qos = (uint8_t)_dq_state_max_qos(old_state); - } - - if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - _dispatch_async_waiter_update(dsc, dq); - } - - if (unlikely(_dq_state_is_inner_queue(old_state))) { - dispatch_queue_t tq = dq->do_targetq; - if (likely(tq->dq_width == 1)) { - dsc->dc_flags |= DC_FLAG_BARRIER; - } else { - dsc->dc_flags &= ~DC_FLAG_BARRIER; - if (_dispatch_queue_try_reserve_sync_width(upcast(tq)._dl)) { - dq = upcast(tq)._dl; - goto again; - } - } - return dx_push(tq, dsc, 0); - } - - if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects - // dc_other to be the bottom queue of the graph - dsc->dc_other = dq; - } - return _dispatch_waiter_wake_wlh_anon(dsc); -} - -DISPATCH_NOINLINE -static void -_dispatch_barrier_waiter_redirect_or_wake(dispatch_queue_class_t dqu, - dispatch_object_t dc, dispatch_wakeup_flags_t flags, - uint64_t old_state, uint64_t new_state) -{ - dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc._dc; - dispatch_queue_t dq = dqu._dq; - dispatch_wlh_t wlh = DISPATCH_WLH_ANON; - - if (dsc->dc_data == DISPATCH_WLH_ANON) { - if (dsc->dsc_override_qos < _dq_state_max_qos(old_state)) { - dsc->dsc_override_qos = (uint8_t)_dq_state_max_qos(old_state); - } - } - - if (_dq_state_is_base_wlh(old_state)) { - wlh = (dispatch_wlh_t)dq; - } else if (_dq_state_received_override(old_state)) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); - } - - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - if (_dq_state_is_base_wlh(old_state) && - _dq_state_is_enqueued_on_target(new_state)) { - // If the thread request still exists, we need to leave it a +1 - _dispatch_release_no_dispose(dq); - } else { - _dispatch_release_2_no_dispose(dq); - } - } else if (_dq_state_is_base_wlh(old_state) && - _dq_state_is_enqueued_on_target(old_state) && - !_dq_state_is_enqueued_on_target(new_state)) { - // If we cleared the enqueued bit, we're about to destroy the workloop - // thread request, and we need to consume its +1. - _dispatch_release_no_dispose(dq); - } - - // - // Past this point we are borrowing the reference of the sync waiter - // - if (unlikely(_dq_state_is_inner_queue(old_state))) { - dispatch_queue_t tq = dq->do_targetq; - if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - _dispatch_async_waiter_update(dsc, dq); - } - if (likely(tq->dq_width == 1)) { - dsc->dc_flags |= DC_FLAG_BARRIER; - } else { - dispatch_lane_t dl = upcast(tq)._dl; - dsc->dc_flags &= ~DC_FLAG_BARRIER; - if (_dispatch_queue_try_reserve_sync_width(dl)) { - return _dispatch_non_barrier_waiter_redirect_or_wake(dl, dc); - } - } - // passing the QoS of `dq` helps pushing on low priority waiters with - // legacy workloops. -#if DISPATCH_INTROSPECTION - dsc->dsc_from_async = false; -#endif - return dx_push(tq, dsc, _dq_state_max_qos(old_state)); - } - -#if DISPATCH_INTROSPECTION - if (dsc->dsc_from_async) { - _dispatch_trace_runtime_event(async_sync_handoff, dq, 0); - } else { - _dispatch_trace_runtime_event(sync_sync_handoff, dq, 0); - } -#endif // DISPATCH_INTROSPECTION - - if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - // Falling into case (2) of _dispatch_async_and_wait_f_slow, dc_other is - // the bottom queue - dsc->dc_other = dq; - } - return _dispatch_waiter_wake(dsc, wlh, old_state, new_state); -} - -DISPATCH_NOINLINE -static void -_dispatch_lane_drain_barrier_waiter(dispatch_lane_t dq, - struct dispatch_object_s *dc, dispatch_wakeup_flags_t flags, - uint64_t enqueued_bits) -{ - dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; - struct dispatch_object_s *next_dc; - uint64_t next_owner = 0, old_state, new_state; - - next_owner = _dispatch_lock_value_from_tid(dsc->dsc_waiter); - next_dc = _dispatch_queue_pop_head(dq, dc); - -transfer_lock_again: - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state &= ~DISPATCH_QUEUE_DIRTY; - new_state |= next_owner; - - if (_dq_state_is_base_wlh(old_state)) { - new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; - if (next_dc) { - // we know there's a next item, keep the enqueued bit if any - } else if (unlikely(_dq_state_is_dirty(old_state))) { - os_atomic_rmw_loop_give_up({ - os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); - next_dc = os_atomic_load2o(dq, dq_items_head, relaxed); - goto transfer_lock_again; - }); - } else { - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - new_state &= ~DISPATCH_QUEUE_ENQUEUED; - } - } else { - new_state -= enqueued_bits; - } - }); - - return _dispatch_barrier_waiter_redirect_or_wake(dq, dc, flags, - old_state, new_state); -} - -DISPATCH_NOINLINE -static void -_dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target, - uint64_t owned) -{ - uint64_t old_state, new_state, enqueue; - dispatch_queue_t tq; - - if (target == DISPATCH_QUEUE_WAKEUP_MGR) { - tq = _dispatch_mgr_q._as_dq; - enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; - } else if (target) { - tq = (target == DISPATCH_QUEUE_WAKEUP_TARGET) ? dq->do_targetq : target; - enqueue = DISPATCH_QUEUE_ENQUEUED; - } else { - tq = NULL; - enqueue = 0; - } - - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = _dq_state_merge_qos(old_state - owned, qos); - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - if (unlikely(_dq_state_is_suspended(old_state))) { - if (likely(_dq_state_is_base_wlh(old_state))) { - new_state &= ~DISPATCH_QUEUE_ENQUEUED; - } - } else if (enqueue) { - if (!_dq_state_is_enqueued(old_state)) { - new_state |= enqueue; - } - } else if (unlikely(_dq_state_is_dirty(old_state))) { - os_atomic_rmw_loop_give_up({ - // just renew the drain lock with an acquire barrier, to see - // what the enqueuer that set DIRTY has done. - // the xor generates better assembly as DISPATCH_QUEUE_DIRTY - // is already in a register - os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); - flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE; - return dx_wakeup(dq, qos, flags); - }); - } else { - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - } - }); - old_state -= owned; - dispatch_assert(_dq_state_drain_locked_by_self(old_state)); - dispatch_assert(!_dq_state_is_enqueued_on_manager(old_state)); - - if (_dq_state_is_enqueued(new_state)) { - _dispatch_trace_runtime_event(sync_async_handoff, dq, 0); - } - -#if DISPATCH_USE_KEVENT_WORKLOOP - if (_dq_state_is_base_wlh(old_state)) { - // - Only non-"du_is_direct" sources & mach channels can be enqueued - // on the manager. - // - // - Only dispatch_source_cancel_and_wait() and - // dispatch_source_set_*_handler() use the barrier complete codepath, - // none of which are used by mach channels. - // - // Hence no source-ish object can both be a workloop and need to use the - // manager at the same time. - dispatch_assert(!_dq_state_is_enqueued_on_manager(new_state)); - if (_dq_state_is_enqueued_on_target(old_state) || - _dq_state_is_enqueued_on_target(new_state) || - _dq_state_received_sync_wait(old_state) || - _dq_state_in_sync_transfer(old_state)) { - return _dispatch_event_loop_end_ownership((dispatch_wlh_t)dq, - old_state, new_state, flags); - } - _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - return _dispatch_release_2_tailcall(dq); - } - return; - } -#endif - - if (_dq_state_received_override(old_state)) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); - } - - if (tq) { - if (likely((old_state ^ new_state) & enqueue)) { - dispatch_assert(_dq_state_is_enqueued(new_state)); - dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2); - return _dispatch_queue_push_queue(tq, dq, new_state); - } -#if HAVE_PTHREAD_WORKQUEUE_QOS - // when doing sync to async handoff - // if the queue received an override we have to forecefully redrive - // the same override so that a new stealer is enqueued because - // the previous one may be gone already - if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dq, new_state, flags); - } -#endif - } - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - return _dispatch_release_2_tailcall(dq); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_lane_drain_non_barriers(dispatch_lane_t dq, - struct dispatch_object_s *dc, dispatch_wakeup_flags_t flags) -{ - size_t owned_width = dq->dq_width; - struct dispatch_object_s *next_dc; - - // see _dispatch_lane_drain, go in non barrier mode, and drain items - - os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_IN_BARRIER, release); - - do { - if (likely(owned_width)) { - owned_width--; - } else if (_dispatch_object_is_waiter(dc)) { - // sync "readers" don't observe the limit - _dispatch_queue_reserve_sync_width(dq); - } else if (!_dispatch_queue_try_acquire_async(dq)) { - // no width left - break; - } - next_dc = _dispatch_queue_pop_head(dq, dc); - if (_dispatch_object_is_waiter(dc)) { - _dispatch_non_barrier_waiter_redirect_or_wake(dq, dc); - } else { - _dispatch_continuation_redirect_push(dq, dc, - _dispatch_queue_max_qos(dq)); - } -drain_again: - dc = next_dc; - } while (dc && !_dispatch_object_is_barrier(dc)); - - uint64_t old_state, new_state, owner_self = _dispatch_lock_value_for_self(); - uint64_t owned = owned_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - - if (dc) { - owned = _dispatch_queue_adjust_owned(dq, owned, dc); - } - - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - new_state = old_state - owned; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state &= ~DISPATCH_QUEUE_DIRTY; - - // similar to _dispatch_lane_non_barrier_complete(): - // if by the time we get here all redirected non barrier syncs are - // done and returned their width to the queue, we may be the last - // chance for the next item to run/be re-driven. - if (unlikely(dc)) { - new_state |= DISPATCH_QUEUE_DIRTY; - new_state = _dispatch_lane_non_barrier_complete_try_lock(dq, - old_state, new_state, owner_self); - } else if (unlikely(_dq_state_is_dirty(old_state))) { - os_atomic_rmw_loop_give_up({ - os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); - next_dc = os_atomic_load2o(dq, dq_items_head, relaxed); - goto drain_again; - }); - } - }); - - old_state -= owned; - _dispatch_lane_non_barrier_complete_finish(dq, flags, old_state, new_state); -} - -DISPATCH_NOINLINE -static void -_dispatch_lane_barrier_complete(dispatch_lane_class_t dqu, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) -{ - dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; - dispatch_lane_t dq = dqu._dl; - - if (dq->dq_items_tail && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) { - struct dispatch_object_s *dc = _dispatch_queue_get_head(dq); - if (likely(dq->dq_width == 1 || _dispatch_object_is_barrier(dc))) { - if (_dispatch_object_is_waiter(dc)) { - return _dispatch_lane_drain_barrier_waiter(dq, dc, flags, 0); - } - } else if (dq->dq_width > 1 && !_dispatch_object_is_barrier(dc)) { - return _dispatch_lane_drain_non_barriers(dq, dc, flags); - } - - if (!(flags & DISPATCH_WAKEUP_CONSUME_2)) { - _dispatch_retain_2(dq); - flags |= DISPATCH_WAKEUP_CONSUME_2; - } - target = DISPATCH_QUEUE_WAKEUP_TARGET; - } - - uint64_t owned = DISPATCH_QUEUE_IN_BARRIER + - dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - return _dispatch_lane_class_barrier_complete(dq, qos, flags, target, owned); -} - -static void -_dispatch_async_and_wait_invoke(void *ctxt) -{ - dispatch_sync_context_t dsc = ctxt; - dispatch_queue_t top_dq = dsc->dc_other; - dispatch_invoke_flags_t iflags; - - // the block runs on the thread the queue is bound to and not - // on the calling thread, but we want to see the calling thread - // dispatch thread frames, so we fake the link, and then undo it - iflags = dsc->dsc_autorelease * DISPATCH_INVOKE_AUTORELEASE_ALWAYS; - dispatch_invoke_with_autoreleasepool(iflags, { - dispatch_thread_frame_s dtf; - _dispatch_introspection_sync_begin(top_dq); - _dispatch_thread_frame_push_and_rebase(&dtf, top_dq, &dsc->dsc_dtf); - _dispatch_client_callout(dsc->dsc_ctxt, dsc->dsc_func); - _dispatch_thread_frame_pop(&dtf); - }); - - // communicate back to _dispatch_async_and_wait_f_slow and - // _dispatch_sync_f_slow on which queue the work item was invoked - // so that the *_complete_recurse() call stops unlocking when it reaches it - dsc->dc_other = _dispatch_queue_get_current(); - dsc->dsc_func = NULL; - - if (dsc->dc_data == DISPATCH_WLH_ANON) { - _dispatch_thread_event_signal(&dsc->dsc_event); // release - } else { - _dispatch_event_loop_cancel_waiter(dsc); - } -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_wait_prepare(dispatch_queue_t dq) -{ - uint64_t old_state, new_state; - - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - if (_dq_state_is_suspended(old_state) || - !_dq_state_is_base_wlh(old_state)) { - os_atomic_rmw_loop_give_up(return old_state); - } - if (!_dq_state_drain_locked(old_state) || - _dq_state_in_sync_transfer(old_state)) { - os_atomic_rmw_loop_give_up(return old_state); - } - new_state = old_state | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; - }); - return new_state; -} - -static void -_dispatch_wait_compute_wlh(dispatch_lane_t dq, dispatch_sync_context_t dsc) -{ - bool needs_locking = _dispatch_queue_is_mutable(dq); - - if (needs_locking) { - dsc->dsc_release_storage = true; - _dispatch_queue_sidelock_lock(dq); - } - - dispatch_queue_t tq = dq->do_targetq; - uint64_t tq_state = _dispatch_wait_prepare(tq); - - if (_dq_state_is_suspended(tq_state) || - _dq_state_is_base_anon(tq_state)) { - dsc->dsc_release_storage = false; - dsc->dc_data = DISPATCH_WLH_ANON; - } else if (_dq_state_is_base_wlh(tq_state)) { - if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { - dsc->dsc_wlh_is_workloop = true; - dsc->dsc_release_storage = false; - } else if (dsc->dsc_release_storage) { - _dispatch_queue_retain_storage(tq); - } - dsc->dc_data = (dispatch_wlh_t)tq; - } else { - _dispatch_wait_compute_wlh(upcast(tq)._dl, dsc); - } - if (needs_locking) { - if (dsc->dsc_wlh_is_workloop) { - _dispatch_queue_atomic_flags_clear(dq, DQF_MUTABLE); - } - _dispatch_queue_sidelock_unlock(dq); - } -} - -DISPATCH_NOINLINE -static void -__DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq) -{ - uint64_t dq_state = _dispatch_wait_prepare(dq); - if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) { - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "dispatch_sync called on queue " - "already owned by current thread"); - } - - // Blocks submitted to the main thread MUST run on the main thread, and - // dispatch_async_and_wait also executes on the remote context rather than - // the current thread. - // - // For both these cases we need to save the frame linkage for the sake of - // _dispatch_async_and_wait_invoke - _dispatch_thread_frame_save_state(&dsc->dsc_dtf); - - if (_dq_state_is_suspended(dq_state) || - _dq_state_is_base_anon(dq_state)) { - dsc->dc_data = DISPATCH_WLH_ANON; - } else if (_dq_state_is_base_wlh(dq_state)) { - dsc->dc_data = (dispatch_wlh_t)dq; - } else { - _dispatch_wait_compute_wlh(upcast(dq)._dl, dsc); - } - - if (dsc->dc_data == DISPATCH_WLH_ANON) { - dsc->dsc_override_qos_floor = dsc->dsc_override_qos = - (uint8_t)_dispatch_get_basepri_override_qos_floor(); - _dispatch_thread_event_init(&dsc->dsc_event); - } - dx_push(dq, dsc, _dispatch_qos_from_pp(dsc->dc_priority)); - _dispatch_trace_runtime_event(sync_wait, dq, 0); - if (dsc->dc_data == DISPATCH_WLH_ANON) { - _dispatch_thread_event_wait(&dsc->dsc_event); // acquire - } else { - _dispatch_event_loop_wait_for_ownership(dsc); - } - if (dsc->dc_data == DISPATCH_WLH_ANON) { - _dispatch_thread_event_destroy(&dsc->dsc_event); - // If _dispatch_sync_waiter_wake() gave this thread an override, - // ensure that the root queue sees it. - if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) { - _dispatch_set_basepri_override_qos(dsc->dsc_override_qos); - } - } -} - -#pragma mark - -#pragma mark _dispatch_barrier_trysync_or_async_f - -DISPATCH_NOINLINE -static void -_dispatch_barrier_trysync_or_async_f_complete(dispatch_lane_t dq, - void *ctxt, dispatch_function_t func, uint32_t flags) -{ - dispatch_wakeup_flags_t wflags = DISPATCH_WAKEUP_BARRIER_COMPLETE; - - _dispatch_sync_function_invoke_inline(dq, ctxt, func); - if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) { - uint64_t dq_state = os_atomic_sub2o(dq, dq_state, - DISPATCH_QUEUE_SUSPEND_INTERVAL, relaxed); - if (!_dq_state_is_suspended(dq_state)) { - wflags |= DISPATCH_WAKEUP_CONSUME_2; - } - } - dx_wakeup(dq, 0, wflags); -} - -// Use for mutation of queue-/source-internal state only -// ignores target queue hierarchy! -DISPATCH_NOINLINE -void -_dispatch_barrier_trysync_or_async_f(dispatch_lane_t dq, void *ctxt, - dispatch_function_t func, uint32_t flags) -{ - dispatch_tid tid = _dispatch_tid_self(); - uint64_t suspend_count = (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) ? 1 : 0; - if (unlikely(!_dispatch_queue_try_acquire_barrier_sync_and_suspend(dq, tid, - suspend_count))) { - return _dispatch_barrier_async_detached_f(dq, ctxt, func); - } - if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) { - _dispatch_retain_2(dq); // see _dispatch_lane_suspend - } - _dispatch_barrier_trysync_or_async_f_complete(dq, ctxt, func, flags); -} - -#pragma mark - -#pragma mark dispatch_sync / dispatch_barrier_sync - -DISPATCH_NOINLINE -static void -_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt, - dispatch_function_t func, uintptr_t top_dc_flags, - dispatch_queue_class_t dqu, uintptr_t dc_flags) -{ - dispatch_queue_t top_dq = top_dqu._dq; - dispatch_queue_t dq = dqu._dq; - if (unlikely(!dq->do_targetq)) { - return _dispatch_sync_function_invoke(dq, ctxt, func); - } - - pthread_priority_t pp = _dispatch_get_priority(); - struct dispatch_sync_context_s dsc = { - .dc_flags = DC_FLAG_SYNC_WAITER | dc_flags, - .dc_func = _dispatch_async_and_wait_invoke, - .dc_ctxt = &dsc, - .dc_other = top_dq, - .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, - .dc_voucher = _voucher_get(), - .dsc_func = func, - .dsc_ctxt = ctxt, - .dsc_waiter = _dispatch_tid_self(), - }; - - _dispatch_trace_item_push(top_dq, &dsc); - __DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq); - - if (dsc.dsc_func == NULL) { - // dsc_func being cleared means that the block ran on another thread ie. - // case (2) as listed in _dispatch_async_and_wait_f_slow. - dispatch_queue_t stop_dq = dsc.dc_other; - return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags); - } - - _dispatch_introspection_sync_begin(top_dq); - _dispatch_trace_item_pop(top_dq, &dsc); - _dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags - DISPATCH_TRACE_ARG(&dsc)); -} - -DISPATCH_NOINLINE -static void -_dispatch_sync_recurse(dispatch_lane_t dq, void *ctxt, - dispatch_function_t func, uintptr_t dc_flags) -{ - dispatch_tid tid = _dispatch_tid_self(); - dispatch_queue_t tq = dq->do_targetq; - - do { - if (likely(tq->dq_width == 1)) { - if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) { - return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq, - DC_FLAG_BARRIER); - } - } else { - dispatch_queue_concurrent_t dl = upcast(tq)._dl; - if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) { - return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq, 0); - } - } - tq = tq->do_targetq; - } while (unlikely(tq->do_targetq)); - - _dispatch_introspection_sync_begin(dq); - _dispatch_sync_invoke_and_complete_recurse(dq, ctxt, func, dc_flags - DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop( - dq, ctxt, func, dc_flags))); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, uintptr_t dc_flags) -{ - dispatch_tid tid = _dispatch_tid_self(); - - if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) { - DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync"); - } - - dispatch_lane_t dl = upcast(dq)._dl; - // The more correct thing to do would be to merge the qos of the thread - // that just acquired the barrier lock into the queue state. - // - // However this is too expensive for the fast path, so skip doing it. - // The chosen tradeoff is that if an enqueue on a lower priority thread - // contends with this fast path, this thread may receive a useless override. - // - // Global concurrent queues and queues bound to non-dispatch threads - // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE - if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) { - return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl, - DC_FLAG_BARRIER | dc_flags); - } - - if (unlikely(dl->do_targetq->do_targetq)) { - return _dispatch_sync_recurse(dl, ctxt, func, - DC_FLAG_BARRIER | dc_flags); - } - _dispatch_introspection_sync_begin(dl); - _dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func - DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop( - dq, ctxt, func, dc_flags | DC_FLAG_BARRIER))); -} - -DISPATCH_NOINLINE -static void -_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, uintptr_t dc_flags) -{ - _dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags); -} - -DISPATCH_NOINLINE -void -dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - _dispatch_barrier_sync_f_inline(dq, ctxt, func, 0); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, uintptr_t dc_flags) -{ - if (likely(dq->dq_width == 1)) { - return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags); - } - - if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) { - DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync"); - } - - dispatch_lane_t dl = upcast(dq)._dl; - // Global concurrent queues and queues bound to non-dispatch threads - // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE - if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) { - return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags); - } - - if (unlikely(dq->do_targetq->do_targetq)) { - return _dispatch_sync_recurse(dl, ctxt, func, dc_flags); - } - _dispatch_introspection_sync_begin(dl); - _dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG( - _dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags))); -} - -DISPATCH_NOINLINE -static void -_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, - uintptr_t dc_flags) -{ - _dispatch_sync_f_inline(dq, ctxt, func, dc_flags); -} - -DISPATCH_NOINLINE -void -dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) -{ - _dispatch_sync_f_inline(dq, ctxt, func, 0); -} - -#ifdef __BLOCKS__ -DISPATCH_NOINLINE -static void -_dispatch_sync_block_with_privdata(dispatch_queue_t dq, dispatch_block_t work, - uintptr_t dc_flags) -{ - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work); - pthread_priority_t op = 0, p = 0; - dispatch_block_flags_t flags = dbpd->dbpd_flags; - - if (flags & DISPATCH_BLOCK_BARRIER) { - dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA | DC_FLAG_BARRIER; - } else { - dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA; - } - - op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority); - if (op) { - p = dbpd->dbpd_priority; - } - voucher_t ov, v = DISPATCH_NO_VOUCHER; - if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { - v = dbpd->dbpd_voucher; - } - ov = _dispatch_set_priority_and_voucher(p, v, 0); - - // balanced in d_block_sync_invoke or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain_2(dq); - } - if (dc_flags & DC_FLAG_BARRIER) { - _dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke, - dc_flags); - } else { - _dispatch_sync_f(dq, work, _dispatch_block_sync_invoke, dc_flags); - } - _dispatch_reset_priority_and_voucher(op, ov); -} - -void -dispatch_barrier_sync(dispatch_queue_t dq, dispatch_block_t work) -{ - uintptr_t dc_flags = DC_FLAG_BARRIER | DC_FLAG_BLOCK; - if (unlikely(_dispatch_block_has_private_data(work))) { - return _dispatch_sync_block_with_privdata(dq, work, dc_flags); - } - _dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags); -} - -DISPATCH_NOINLINE -void -dispatch_sync(dispatch_queue_t dq, dispatch_block_t work) -{ - uintptr_t dc_flags = DC_FLAG_BLOCK; - if (unlikely(_dispatch_block_has_private_data(work))) { - return _dispatch_sync_block_with_privdata(dq, work, dc_flags); - } - _dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags); -} -#endif // __BLOCKS__ - -#pragma mark - -#pragma mark dispatch_async_and_wait - -DISPATCH_ALWAYS_INLINE -static inline dispatch_wlh_t -_dispatch_fake_wlh(dispatch_queue_t dq) -{ - dispatch_wlh_t new_wlh = DISPATCH_WLH_ANON; - if (likely(dx_metatype(dq) == _DISPATCH_WORKLOOP_TYPE) || - _dq_state_is_base_wlh(os_atomic_load2o(dq, dq_state, relaxed))) { - new_wlh = (dispatch_wlh_t)dq; - } - dispatch_wlh_t old_wlh = _dispatch_get_wlh(); - _dispatch_thread_setspecific(dispatch_wlh_key, new_wlh); - return old_wlh; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_restore_wlh(dispatch_wlh_t wlh) -{ - _dispatch_thread_setspecific(dispatch_wlh_key, wlh); -} - -DISPATCH_NOINLINE -static void -_dispatch_async_and_wait_invoke_and_complete_recurse(dispatch_queue_t dq, - dispatch_sync_context_t dsc, dispatch_queue_t bottom_q, - uintptr_t top_dc_flags) -{ - dispatch_invoke_flags_t iflags; - dispatch_wlh_t old_wlh = _dispatch_fake_wlh(bottom_q); - - iflags = dsc->dsc_autorelease * DISPATCH_INVOKE_AUTORELEASE_ALWAYS; - dispatch_invoke_with_autoreleasepool(iflags, { - dispatch_block_flags_t bflags = DISPATCH_BLOCK_HAS_PRIORITY; - dispatch_thread_frame_s dtf; - pthread_priority_t op = 0, p = dsc->dc_priority; - voucher_t ov, v = dsc->dc_voucher; - - _dispatch_introspection_sync_begin(dq); - _dispatch_thread_frame_push(&dtf, dq); - op = _dispatch_block_invoke_should_set_priority(bflags, p); - ov = _dispatch_set_priority_and_voucher(op ? p : 0, v, 0); - _dispatch_trace_item_pop(dq, dsc); - _dispatch_client_callout(dsc->dsc_ctxt, dsc->dsc_func); - _dispatch_perfmon_workitem_inc(); - _dispatch_reset_priority_and_voucher(op, ov); - _dispatch_thread_frame_pop(&dtf); - }); - - _dispatch_trace_item_complete(dsc); - - _dispatch_restore_wlh(old_wlh); - _dispatch_sync_complete_recurse(dq, NULL, top_dc_flags); -} - -DISPATCH_NOINLINE -static void -_dispatch_async_and_wait_f_slow(dispatch_queue_t dq, uintptr_t top_dc_flags, - dispatch_sync_context_t dsc, dispatch_queue_t tq) -{ - /* dc_other is an in-out parameter. - * - * As an in-param, it specifies the top queue on which the blocking - * primitive is called. - * - * As an out-param, it refers to the queue up till which we have the drain - * lock. This is slightly different depending on how we come out of - * _WAIT_FOR_QUEUE. - * - * Case 1: - * If the continuation is to be invoked on another thread - for - * async_and_wait, or we ran on a thread bound main queue - then someone - * already called _dispatch_async_and_wait_invoke which invoked the block - * already. dc_other as an outparam here tells the enqueuer the queue up - * till which the enqueuer got the drain lock so that we know what to unlock - * on the way out. This is the case whereby the enqueuer owns part of the - * locks in the queue hierachy (but not all). - * - * Case 2: - * If the continuation is to be invoked on the enqueuing thread - because - * we were contending with another sync or async_and_wait - then enqueuer - * return from _WAIT_FOR_QUEUE without having invoked the block. The - * enqueuer has had the locks for the rest of the queue hierachy handed off - * to it so dc_other specifies the queue up till which it has the locks - * which in this case, is up till the bottom queue in the hierachy. So it - * needs to unlock everything up till the bottom queue, on the way out. - */ - - __DISPATCH_WAIT_FOR_QUEUE__(dsc, tq); - - if (unlikely(dsc->dsc_func == NULL)) { - // see _dispatch_async_and_wait_invoke - dispatch_queue_t stop_dq = dsc->dc_other; - return _dispatch_sync_complete_recurse(dq, stop_dq, top_dc_flags); - } - - // see _dispatch_*_redirect_or_wake - dispatch_queue_t bottom_q = dsc->dc_other; - return _dispatch_async_and_wait_invoke_and_complete_recurse(dq, dsc, - bottom_q, top_dc_flags); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_async_and_wait_should_always_async(dispatch_queue_class_t dqu, - uint64_t dq_state) -{ - // If the queue is anchored at a pthread root queue for which we can't - // mirror attributes, then we need to take the async path. - return !_dq_state_is_inner_queue(dq_state) && - !_dispatch_is_in_root_queues_array(dqu._dq->do_targetq); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_async_and_wait_recurse_one(dispatch_queue_t dq, - dispatch_sync_context_t dsc, dispatch_tid tid, uintptr_t dc_flags) -{ - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(_dispatch_async_and_wait_should_always_async(dq, dq_state))) { - // Remove the async_and_wait flag but drive down the slow path so that - // we do the synchronous wait. We are guaranteed that dq is the base - // queue. - // - // We're falling down to case (1) of _dispatch_async_and_wait_f_slow so - // set dc_other to dq - dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; - dsc->dc_other = dq; - return false; - } - if (likely(dc_flags & DC_FLAG_BARRIER)) { - return _dispatch_queue_try_acquire_barrier_sync(dq, tid); - } - return _dispatch_queue_try_reserve_sync_width(upcast(dq)._dl); -} - -DISPATCH_NOINLINE -static void -_dispatch_async_and_wait_recurse(dispatch_queue_t top_dq, - dispatch_sync_context_t dsc, dispatch_tid tid, uintptr_t top_flags) -{ - dispatch_queue_t dq = top_dq; - uintptr_t dc_flags = top_flags; - - _dispatch_trace_item_push(top_dq, dsc); - - for (;;) { - if (unlikely(!_dispatch_async_and_wait_recurse_one(dq, dsc, tid, - dc_flags))) { - return _dispatch_async_and_wait_f_slow(top_dq, top_flags, dsc, dq); - } - - _dispatch_async_waiter_update(dsc, dq); - if (likely(!dq->do_targetq->do_targetq)) break; - dq = dq->do_targetq; - if (likely(dq->dq_width == 1)) { - dc_flags |= DC_FLAG_BARRIER; - } else { - dc_flags &= ~DC_FLAG_BARRIER; - } - dsc->dc_flags = dc_flags; - } - - _dispatch_async_and_wait_invoke_and_complete_recurse(top_dq, dsc, dq, - top_flags); -} - -DISPATCH_NOINLINE -static void -_dispatch_async_and_wait_f(dispatch_queue_t dq, - void *ctxt, dispatch_function_t func, uintptr_t dc_flags) -{ - pthread_priority_t pp = _dispatch_get_priority(); - dispatch_tid tid = _dispatch_tid_self(); - struct dispatch_sync_context_s dsc = { - .dc_flags = dc_flags, - .dc_func = _dispatch_async_and_wait_invoke, - .dc_ctxt = &dsc, - .dc_other = dq, - .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, - .dc_voucher = _voucher_get(), - .dsc_func = func, - .dsc_ctxt = ctxt, - .dsc_waiter = tid, - }; - - return _dispatch_async_and_wait_recurse(dq, &dsc, tid, dc_flags); -} - -DISPATCH_NOINLINE -void -dispatch_async_and_wait_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - if (unlikely(!dq->do_targetq)) { - return _dispatch_sync_function_invoke(dq, ctxt, func); - } - - uintptr_t dc_flags = DC_FLAG_ASYNC_AND_WAIT; - if (likely(dq->dq_width == 1)) dc_flags |= DC_FLAG_BARRIER; - return _dispatch_async_and_wait_f(dq, ctxt, func, dc_flags); -} - -DISPATCH_NOINLINE -void -dispatch_barrier_async_and_wait_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - if (unlikely(!dq->do_targetq)) { - return _dispatch_sync_function_invoke(dq, ctxt, func); - } - - uintptr_t dc_flags = DC_FLAG_ASYNC_AND_WAIT | DC_FLAG_BARRIER; - return _dispatch_async_and_wait_f(dq, ctxt, func, dc_flags); -} - -#ifdef __BLOCKS__ -DISPATCH_NOINLINE -static void -_dispatch_async_and_wait_block_with_privdata(dispatch_queue_t dq, - dispatch_block_t work, uintptr_t dc_flags) -{ - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work); - dispatch_block_flags_t flags = dbpd->dbpd_flags; - pthread_priority_t pp; - voucher_t v; - - if (dbpd->dbpd_flags & DISPATCH_BLOCK_BARRIER) { - dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA | DC_FLAG_BARRIER; - } else { - dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA; - } - - if (_dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority)){ - pp = dbpd->dbpd_priority; - } else { - pp = _dispatch_get_priority(); - } - if (dbpd->dbpd_flags & DISPATCH_BLOCK_HAS_VOUCHER) { - v = dbpd->dbpd_voucher; - } else { - v = _voucher_get(); - } - - // balanced in d_block_sync_invoke or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain_2(dq); - } - - dispatch_tid tid = _dispatch_tid_self(); - struct dispatch_sync_context_s dsc = { - .dc_flags = dc_flags, - .dc_func = _dispatch_async_and_wait_invoke, - .dc_ctxt = &dsc, - .dc_other = dq, - .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, - .dc_voucher = v, - .dsc_func = _dispatch_block_sync_invoke, - .dsc_ctxt = work, - .dsc_waiter = tid, - }; - - return _dispatch_async_and_wait_recurse(dq, &dsc, tid, dc_flags); -} - -void -dispatch_barrier_async_and_wait(dispatch_queue_t dq, dispatch_block_t work) -{ - if (unlikely(!dq->do_targetq)) { - return dispatch_barrier_sync(dq, work); - } - - uintptr_t dc_flags = DC_FLAG_ASYNC_AND_WAIT | DC_FLAG_BLOCK|DC_FLAG_BARRIER; - if (unlikely(_dispatch_block_has_private_data(work))) { - return _dispatch_async_and_wait_block_with_privdata(dq, work, dc_flags); - } - - dispatch_function_t func = _dispatch_Block_invoke(work); - return _dispatch_async_and_wait_f(dq, work, func, dc_flags); -} - -void -dispatch_async_and_wait(dispatch_queue_t dq, dispatch_block_t work) -{ - if (unlikely(!dq->do_targetq)) { - return dispatch_sync(dq, work); - } - - uintptr_t dc_flags = DC_FLAG_ASYNC_AND_WAIT | DC_FLAG_BLOCK; - if (likely(dq->dq_width == 1)) dc_flags |= DC_FLAG_BARRIER; - if (unlikely(_dispatch_block_has_private_data(work))) { - return _dispatch_async_and_wait_block_with_privdata(dq, work, dc_flags); - } - - dispatch_function_t func = _dispatch_Block_invoke(work); - return _dispatch_async_and_wait_f(dq, work, func, dc_flags); -} -#endif // __BLOCKS__ - -#pragma mark - -#pragma mark dispatch_queue_specific - -static void -_dispatch_queue_specific_head_dispose_slow(void *ctxt) -{ - dispatch_queue_specific_head_t dqsh = ctxt; - dispatch_queue_specific_t dqs, tmp; - - TAILQ_FOREACH_SAFE(dqs, &dqsh->dqsh_entries, dqs_entry, tmp) { - dispatch_assert(dqs->dqs_destructor); - _dispatch_client_callout(dqs->dqs_ctxt, dqs->dqs_destructor); - free(dqs); - } - free(dqsh); -} - -static void -_dispatch_queue_specific_head_dispose(dispatch_queue_specific_head_t dqsh) -{ - dispatch_queue_t rq = _dispatch_get_default_queue(false); - dispatch_queue_specific_t dqs, tmp; - TAILQ_HEAD(, dispatch_queue_specific_s) entries = - TAILQ_HEAD_INITIALIZER(entries); - - TAILQ_CONCAT(&entries, &dqsh->dqsh_entries, dqs_entry); - TAILQ_FOREACH_SAFE(dqs, &entries, dqs_entry, tmp) { - if (dqs->dqs_destructor) { - TAILQ_INSERT_TAIL(&dqsh->dqsh_entries, dqs, dqs_entry); - } else { - free(dqs); - } - } - - if (TAILQ_EMPTY(&dqsh->dqsh_entries)) { - free(dqsh); - } else { - _dispatch_barrier_async_detached_f(rq, dqsh, - _dispatch_queue_specific_head_dispose_slow); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_queue_init_specific(dispatch_queue_t dq) -{ - dispatch_queue_specific_head_t dqsh; - - dqsh = _dispatch_calloc(1, sizeof(struct dispatch_queue_specific_head_s)); - TAILQ_INIT(&dqsh->dqsh_entries); - if (unlikely(!os_atomic_cmpxchg2o(dq, dq_specific_head, - NULL, dqsh, release))) { - _dispatch_queue_specific_head_dispose(dqsh); - } -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_specific_t -_dispatch_queue_specific_find(dispatch_queue_specific_head_t dqsh, - const void *key) -{ - dispatch_queue_specific_t dqs; - - TAILQ_FOREACH(dqs, &dqsh->dqsh_entries, dqs_entry) { - if (dqs->dqs_key == key) { - return dqs; - } - } - return NULL; -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_admits_specific(dispatch_queue_t dq) -{ - if (dx_metatype(dq) == _DISPATCH_LANE_TYPE) { - return (dx_type(dq) == DISPATCH_QUEUE_MAIN_TYPE || - !dx_hastypeflag(dq, QUEUE_BASE)); - } - return dx_metatype(dq) == _DISPATCH_WORKLOOP_TYPE; -} - -DISPATCH_NOINLINE -void -dispatch_queue_set_specific(dispatch_queue_t dq, const void *key, - void *ctxt, dispatch_function_t destructor) -{ - if (unlikely(!key)) { - return; - } - dispatch_queue_t rq = _dispatch_get_default_queue(false); - dispatch_queue_specific_head_t dqsh = dq->dq_specific_head; - dispatch_queue_specific_t dqs; - - if (unlikely(!_dispatch_queue_admits_specific(dq))) { - DISPATCH_CLIENT_CRASH(0, - "Queue doesn't support dispatch_queue_set_specific"); - } - - if (ctxt && !dqsh) { - _dispatch_queue_init_specific(dq); - dqsh = dq->dq_specific_head; - } else if (!dqsh) { - return; - } - - _dispatch_unfair_lock_lock(&dqsh->dqsh_lock); - dqs = _dispatch_queue_specific_find(dqsh, key); - if (dqs) { - if (dqs->dqs_destructor) { - _dispatch_barrier_async_detached_f(rq, dqs->dqs_ctxt, - dqs->dqs_destructor); - } - if (ctxt) { - dqs->dqs_ctxt = ctxt; - dqs->dqs_destructor = destructor; - } else { - TAILQ_REMOVE(&dqsh->dqsh_entries, dqs, dqs_entry); - free(dqs); - } - } else if (ctxt) { - dqs = _dispatch_calloc(1, sizeof(struct dispatch_queue_specific_s)); - dqs->dqs_key = key; - dqs->dqs_ctxt = ctxt; - dqs->dqs_destructor = destructor; - TAILQ_INSERT_TAIL(&dqsh->dqsh_entries, dqs, dqs_entry); - } - - _dispatch_unfair_lock_unlock(&dqsh->dqsh_lock); -} - -DISPATCH_ALWAYS_INLINE -static inline void * -_dispatch_queue_get_specific_inline(dispatch_queue_t dq, const void *key) -{ - dispatch_queue_specific_head_t dqsh = dq->dq_specific_head; - dispatch_queue_specific_t dqs; - void *ctxt = NULL; - - if (likely(_dispatch_queue_admits_specific(dq) && dqsh)) { - _dispatch_unfair_lock_lock(&dqsh->dqsh_lock); - dqs = _dispatch_queue_specific_find(dqsh, key); - if (dqs) ctxt = dqs->dqs_ctxt; - _dispatch_unfair_lock_unlock(&dqsh->dqsh_lock); - } - return ctxt; -} - -DISPATCH_NOINLINE -void * -dispatch_queue_get_specific(dispatch_queue_t dq, const void *key) -{ - if (unlikely(!key)) { - return NULL; - } - return _dispatch_queue_get_specific_inline(dq, key); -} - -DISPATCH_NOINLINE -void * -dispatch_get_specific(const void *key) -{ - dispatch_queue_t dq = _dispatch_queue_get_current(); - void *ctxt = NULL; - - if (likely(key && dq)) { - do { - ctxt = _dispatch_queue_get_specific_inline(dq, key); - dq = dq->do_targetq; - } while (unlikely(ctxt == NULL && dq)); - } - return ctxt; -} - -#pragma mark - -#pragma mark dispatch_queue_t / dispatch_lane_t - -void -dispatch_queue_set_label_nocopy(dispatch_queue_t dq, const char *label) -{ - if (unlikely(_dispatch_object_is_global(dq))) { - return; - } - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dq); - if (unlikely(dqf & DQF_LABEL_NEEDS_FREE)) { - DISPATCH_CLIENT_CRASH(dq, "Cannot change label for this queue"); - } - dq->dq_label = label; -} - -static inline bool -_dispatch_base_lane_is_wlh(dispatch_lane_t dq, dispatch_queue_t tq) -{ -#if DISPATCH_USE_KEVENT_WORKLOOP - if (unlikely(!_dispatch_kevent_workqueue_enabled)) { - return false; - } - if (dx_type(dq) == DISPATCH_QUEUE_NETWORK_EVENT_TYPE) { - return true; - } - if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) { - // Sources don't support sync waiters, so the ones that never change QoS - // don't benefit from any of the workloop features which have overhead, - // so just use the workqueue kqueue for these. - if (likely(!upcast(dq)._ds->ds_refs->du_can_be_wlh)) { - return false; - } - dispatch_assert(upcast(dq)._ds->ds_refs->du_is_direct); - } - return dq->dq_width == 1 && _dispatch_is_in_root_queues_array(tq); -#else - (void)dq; (void)tq; - return false; -#endif // DISPATCH_USE_KEVENT_WORKLOOP -} - -static void -_dispatch_lane_inherit_wlh_from_target(dispatch_lane_t dq, dispatch_queue_t tq) -{ - uint64_t old_state, new_state, role; - - if (!dx_hastypeflag(tq, QUEUE_ROOT)) { - role = DISPATCH_QUEUE_ROLE_INNER; - } else if (_dispatch_base_lane_is_wlh(dq, tq)) { - role = DISPATCH_QUEUE_ROLE_BASE_WLH; - } else { - role = DISPATCH_QUEUE_ROLE_BASE_ANON; - } - - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK; - new_state |= role; - if (old_state == new_state) { - os_atomic_rmw_loop_give_up(break); - } - }); - - if (_dq_state_is_base_wlh(old_state) && !_dq_state_is_base_wlh(new_state)) { - dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - if (ddi && ddi->ddi_wlh == (dispatch_wlh_t)dq) { - _dispatch_event_loop_leave_immediate(new_state); - } - } - if (!dx_hastypeflag(tq, QUEUE_ROOT)) { - dispatch_queue_flags_t clear = 0, set = DQF_TARGETED; - if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { - clear |= DQF_MUTABLE; -#if !DISPATCH_ALLOW_NON_LEAF_RETARGET - } else { - clear |= DQF_MUTABLE; -#endif - } - if (clear) { - _dispatch_queue_atomic_flags_set_and_clear(tq, set, clear); - } else { - _dispatch_queue_atomic_flags_set(tq, set); - } - } -} - -dispatch_priority_t -_dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, - dispatch_wlh_t *wlh_out) -{ - dispatch_priority_t dpri = dq->dq_priority; - dispatch_priority_t p = dpri & DISPATCH_PRIORITY_REQUESTED_MASK; - dispatch_qos_t fallback = _dispatch_priority_fallback_qos(dpri); - dispatch_queue_t tq = dq->do_targetq; - dispatch_wlh_t wlh = DISPATCH_WLH_ANON; - - if (_dq_state_is_base_wlh(dq->dq_state)) { - wlh = (dispatch_wlh_t)dq; - } - - while (unlikely(!dx_hastypeflag(tq, QUEUE_ROOT))) { - if (unlikely(tq == _dispatch_mgr_q._as_dq)) { - if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; - return DISPATCH_PRIORITY_FLAG_MANAGER; - } - if (unlikely(_dispatch_queue_is_thread_bound(tq))) { - if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; - return tq->dq_priority; - } - if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(tq))) { - // this queue may not be activated yet, so the queue graph may not - // have stabilized yet - _dispatch_ktrace2(DISPATCH_PERF_delayed_registration, dq, - dx_metatype(dq) == _DISPATCH_SOURCE_TYPE ? dq : NULL); - if (wlh_out) *wlh_out = NULL; - return 0; - } - - if (_dq_state_is_base_wlh(tq->dq_state)) { - wlh = (dispatch_wlh_t)tq; - if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { - _dispatch_queue_atomic_flags_clear(dq, DQF_MUTABLE); - } - } else if (unlikely(_dispatch_queue_is_mutable(tq))) { - // we're not allowed to dereference tq->do_targetq - _dispatch_ktrace2(DISPATCH_PERF_delayed_registration, dq, - dx_metatype(dq) == _DISPATCH_SOURCE_TYPE ? dq : NULL); - if (wlh_out) *wlh_out = NULL; - return 0; - } - - dispatch_priority_t tqp = tq->dq_priority; - - tq = tq->do_targetq; - if (tqp & DISPATCH_PRIORITY_FLAG_INHERITED) { - // if the priority is inherited, it means we got it from our target - // which has fallback and various magical flags that the code below - // will handle, so do not bother here. - break; - } - - if (!fallback) fallback = _dispatch_priority_fallback_qos(tqp); - tqp &= DISPATCH_PRIORITY_REQUESTED_MASK; - if (p < tqp) p = tqp; - } - - if (likely(_dispatch_is_in_root_queues_array(tq) || - tq->dq_serialnum == DISPATCH_QUEUE_SERIAL_NUMBER_WLF)) { - dispatch_priority_t rqp = tq->dq_priority; - - if (!fallback) fallback = _dispatch_priority_fallback_qos(rqp); - rqp &= DISPATCH_PRIORITY_REQUESTED_MASK; - if (p < rqp) p = rqp; - - p |= (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); - if ((dpri & DISPATCH_PRIORITY_FLAG_FLOOR) || - !(dpri & DISPATCH_PRIORITY_REQUESTED_MASK)) { - p |= (dpri & DISPATCH_PRIORITY_FLAG_FLOOR); - if (fallback > _dispatch_priority_qos(p)) { - p |= _dispatch_priority_make_fallback(fallback); - } - } - if (wlh_out) *wlh_out = wlh; - return p; - } - - // pthread root queues opt out of QoS - if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; - return DISPATCH_PRIORITY_FLAG_MANAGER; -} - -DISPATCH_ALWAYS_INLINE -static void -_dispatch_queue_setter_assert_inactive(dispatch_queue_class_t dq) -{ - uint64_t dq_state = os_atomic_load2o(dq._dq, dq_state, relaxed); - if (likely(dq_state & DISPATCH_QUEUE_INACTIVE)) return; -#if DISPATCH_SIZEOF_PTR == 4 - dq_state >>= 32; -#endif - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "dispatch queue/source property setter called after activation"); -} - -DISPATCH_ALWAYS_INLINE -static void -_dispatch_workloop_attributes_alloc_if_needed(dispatch_workloop_t dwl) -{ - if (unlikely(!dwl->dwl_attr)) { - dwl->dwl_attr = _dispatch_calloc(1, sizeof(dispatch_workloop_attr_s)); - } -} - -void -dispatch_set_qos_class_floor(dispatch_object_t dou, - dispatch_qos_class_t cls, int relpri) -{ - if (dx_cluster(dou._do) != _DISPATCH_QUEUE_CLUSTER) { - DISPATCH_CLIENT_CRASH(0, - "dispatch_set_qos_class_floor called on invalid object type"); - } - if (dx_metatype(dou._do) == _DISPATCH_WORKLOOP_TYPE) { - return dispatch_workloop_set_qos_class_floor(dou._dwl, cls, relpri, 0); - } - - dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); - dispatch_priority_t pri = _dispatch_priority_make(qos, relpri); - dispatch_priority_t old_pri = dou._dq->dq_priority; - - if (pri) pri |= DISPATCH_PRIORITY_FLAG_FLOOR; - old_pri &= ~DISPATCH_PRIORITY_REQUESTED_MASK; - old_pri &= ~DISPATCH_PRIORITY_FLAG_FLOOR; - dou._dq->dq_priority = pri | old_pri; - - _dispatch_queue_setter_assert_inactive(dou._dq); -} - -void -dispatch_set_qos_class(dispatch_object_t dou, dispatch_qos_class_t cls, - int relpri) -{ - if (dx_cluster(dou._do) != _DISPATCH_QUEUE_CLUSTER || - dx_metatype(dou._do) == _DISPATCH_WORKLOOP_TYPE) { - DISPATCH_CLIENT_CRASH(0, - "dispatch_set_qos_class called on invalid object type"); - } - - dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); - dispatch_priority_t pri = _dispatch_priority_make(qos, relpri); - dispatch_priority_t old_pri = dou._dq->dq_priority; - - old_pri &= ~DISPATCH_PRIORITY_REQUESTED_MASK; - old_pri &= ~DISPATCH_PRIORITY_FLAG_FLOOR; - dou._dq->dq_priority = pri | old_pri; - - _dispatch_queue_setter_assert_inactive(dou._dq); -} - -void -dispatch_set_qos_class_fallback(dispatch_object_t dou, dispatch_qos_class_t cls) -{ - if (dx_cluster(dou._do) != _DISPATCH_QUEUE_CLUSTER) { - DISPATCH_CLIENT_CRASH(0, - "dispatch_set_qos_class_fallback called on invalid object type"); - } - - dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); - dispatch_priority_t pri = _dispatch_priority_make_fallback(qos); - dispatch_priority_t old_pri = dou._dq->dq_priority; - - old_pri &= ~DISPATCH_PRIORITY_FALLBACK_QOS_MASK; - old_pri &= ~DISPATCH_PRIORITY_FLAG_FALLBACK; - dou._dq->dq_priority = pri | old_pri; - - _dispatch_queue_setter_assert_inactive(dou._dq); -} - -static dispatch_queue_t -_dispatch_queue_priority_inherit_from_target(dispatch_lane_class_t dq, - dispatch_queue_t tq) -{ - const dispatch_priority_t inherited = DISPATCH_PRIORITY_FLAG_INHERITED; - dispatch_priority_t pri = dq._dl->dq_priority; - - // This priority has been selected by the client, leave it alone - // However, when the client picked a QoS, we should adjust the target queue - // if it is a root queue to best match the ask - if (_dispatch_queue_priority_manually_selected(pri)) { - if (_dispatch_is_in_root_queues_array(tq)) { - dispatch_qos_t qos = _dispatch_priority_qos(pri); - if (!qos) qos = DISPATCH_QOS_DEFAULT; - tq = _dispatch_get_root_queue(qos, - pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)->_as_dq; - } - return tq; - } - - if (_dispatch_is_in_root_queues_array(tq)) { - // base queues need to know they target - // the default root queue so that _dispatch_queue_wakeup_qos() - // in _dispatch_queue_wakeup() can fallback to QOS_DEFAULT - // if no other priority was provided. - pri = tq->dq_priority | inherited; - } else if (pri & inherited) { - // if the FALLBACK flag is set on queues due to the code above - // we need to clear it if the queue is retargeted within a hierachy - // and is no longer a base queue. - pri &= ~DISPATCH_PRIORITY_FALLBACK_QOS_MASK; - pri &= ~DISPATCH_PRIORITY_FLAG_FALLBACK; - } - - dq._dl->dq_priority = pri; - return tq; -} - - -DISPATCH_NOINLINE -static dispatch_queue_t -_dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa, - dispatch_queue_t tq, bool legacy) -{ - dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa); - - // - // Step 1: Normalize arguments (qos, overcommit, tq) - // - - dispatch_qos_t qos = dqai.dqai_qos; -#if !HAVE_PTHREAD_WORKQUEUE_QOS - if (qos == DISPATCH_QOS_USER_INTERACTIVE) { - dqai.dqai_qos = qos = DISPATCH_QOS_USER_INITIATED; - } - if (qos == DISPATCH_QOS_MAINTENANCE) { - dqai.dqai_qos = qos = DISPATCH_QOS_BACKGROUND; - } -#endif // !HAVE_PTHREAD_WORKQUEUE_QOS - - _dispatch_queue_attr_overcommit_t overcommit = dqai.dqai_overcommit; - if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) { - if (tq->do_targetq) { - DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and " - "a non-global target queue"); - } - } - - if (tq && dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { - // Handle discrepancies between attr and target queue, attributes win - if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { - if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { - overcommit = _dispatch_queue_attr_overcommit_enabled; - } else { - overcommit = _dispatch_queue_attr_overcommit_disabled; - } - } - if (qos == DISPATCH_QOS_UNSPECIFIED) { - qos = _dispatch_priority_qos(tq->dq_priority); - } - tq = NULL; - } else if (tq && !tq->do_targetq) { - // target is a pthread or runloop root queue, setting QoS or overcommit - // is disallowed - if (overcommit != _dispatch_queue_attr_overcommit_unspecified) { - DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute " - "and use this kind of target queue"); - } - } else { - if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { - // Serial queues default to overcommit! - overcommit = dqai.dqai_concurrent ? - _dispatch_queue_attr_overcommit_disabled : - _dispatch_queue_attr_overcommit_enabled; - } - } - if (!tq) { - tq = _dispatch_get_root_queue( - qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos, - overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq; - if (unlikely(!tq)) { - DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute"); - } - } - - // - // Step 2: Initialize the queue - // - - if (legacy) { - // if any of these attributes is specified, use non legacy classes - if (dqai.dqai_inactive || dqai.dqai_autorelease_frequency) { - legacy = false; - } - } - - const void *vtable; - dispatch_queue_flags_t dqf = legacy ? DQF_MUTABLE : 0; - if (dqai.dqai_concurrent) { - vtable = DISPATCH_VTABLE(queue_concurrent); - } else { - vtable = DISPATCH_VTABLE(queue_serial); - } - switch (dqai.dqai_autorelease_frequency) { - case DISPATCH_AUTORELEASE_FREQUENCY_NEVER: - dqf |= DQF_AUTORELEASE_NEVER; - break; - case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM: - dqf |= DQF_AUTORELEASE_ALWAYS; - break; - } - if (label) { - const char *tmp = _dispatch_strdup_if_mutable(label); - if (tmp != label) { - dqf |= DQF_LABEL_NEEDS_FREE; - label = tmp; - } - } - - dispatch_lane_t dq = _dispatch_object_alloc(vtable, - sizeof(struct dispatch_lane_s)); - _dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ? - DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER | - (dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0)); - - dq->dq_label = label; - dq->dq_priority = _dispatch_priority_make((dispatch_qos_t)dqai.dqai_qos, - dqai.dqai_relpri); - if (overcommit == _dispatch_queue_attr_overcommit_enabled) { - dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - } - if (!dqai.dqai_inactive) { - _dispatch_queue_priority_inherit_from_target(dq, tq); - _dispatch_lane_inherit_wlh_from_target(dq, tq); - } - _dispatch_retain(tq); - dq->do_targetq = tq; - _dispatch_object_debug(dq, "%s", __func__); - return _dispatch_trace_queue_create(dq)._dq; -} - -dispatch_queue_t -dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, - dispatch_queue_t tq) -{ - return _dispatch_lane_create_with_target(label, dqa, tq, false); -} - -dispatch_queue_t -dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) -{ - return _dispatch_lane_create_with_target(label, attr, - DISPATCH_TARGET_QUEUE_DEFAULT, true); -} - -dispatch_queue_t -dispatch_queue_create_with_accounting_override_voucher(const char *label, - dispatch_queue_attr_t attr, voucher_t voucher) -{ - (void)label; (void)attr; (void)voucher; - DISPATCH_CLIENT_CRASH(0, "Unsupported interface"); -} - -DISPATCH_NOINLINE -static void -_dispatch_queue_dispose(dispatch_queue_class_t dqu, bool *allow_free) -{ - dispatch_queue_specific_head_t dqsh; - dispatch_queue_t dq = dqu._dq; - - if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { - free((void*)dq->dq_label); - } - dqsh = os_atomic_xchg2o(dq, dq_specific_head, (void *)0x200, relaxed); - if (dqsh) _dispatch_queue_specific_head_dispose(dqsh); - - // fast path for queues that never got their storage retained - if (likely(os_atomic_load2o(dq, dq_sref_cnt, relaxed) == 0)) { - // poison the state with something that is suspended and is easy to spot - dq->dq_state = 0xdead000000000000; - return; - } - - // Take over freeing the memory from _dispatch_object_dealloc() - // - // As soon as we call _dispatch_queue_release_storage(), we forfeit - // the possibility for the caller of dx_dispose() to finalize the object - // so that responsibility is ours. - _dispatch_object_finalize(dq); - *allow_free = false; - dq->dq_label = ""; - dq->do_targetq = NULL; - dq->do_finalizer = NULL; - dq->do_ctxt = NULL; - return _dispatch_queue_release_storage(dq); -} - -void -_dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) -{ - dispatch_lane_t dq = dqu._dl; - if (unlikely(dq->dq_items_tail)) { - DISPATCH_CLIENT_CRASH(dq->dq_items_tail, - "Release of a queue while items are enqueued"); - } - dq->dq_items_head = (void *)0x200; - dq->dq_items_tail = (void *)0x200; - - uint64_t orig_dq_state, dq_state; - dq_state = orig_dq_state = os_atomic_load2o(dq, dq_state, relaxed); - - uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); - if (dx_hastypeflag(dq, QUEUE_ROOT)) { - initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; - } - dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - dq_state &= ~DISPATCH_QUEUE_DIRTY; - dq_state &= ~DISPATCH_QUEUE_ROLE_MASK; - if (unlikely(dq_state != initial_state)) { - if (_dq_state_drain_locked(dq_state)) { - DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, - "Release of a locked queue"); - } -#ifndef __LP64__ - orig_dq_state >>= 32; -#endif - DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, - "Release of a queue with corrupt state"); - } - _dispatch_queue_dispose(dqu, allow_free); -} - -void -_dispatch_lane_dispose(dispatch_lane_t dq, bool *allow_free) -{ - _dispatch_object_debug(dq, "%s", __func__); - _dispatch_trace_queue_dispose(dq); - _dispatch_lane_class_dispose(dq, allow_free); -} - -void -_dispatch_queue_xref_dispose(dispatch_queue_t dq) -{ - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(_dq_state_is_suspended(dq_state))) { - long state = (long)dq_state; - if (sizeof(long) < sizeof(uint64_t)) state = (long)(dq_state >> 32); - if (unlikely(dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK)) { - // Arguments for and against this assert are within 6705399 - DISPATCH_CLIENT_CRASH(state, "Release of an inactive object"); - } - DISPATCH_CLIENT_CRASH(dq_state, "Release of a suspended object"); - } - os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed); -} - -DISPATCH_NOINLINE -static void -_dispatch_lane_suspend_slow(dispatch_lane_t dq) -{ - uint64_t old_state, new_state, delta; - - _dispatch_queue_sidelock_lock(dq); - - // what we want to transfer (remove from dq_state) - delta = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL; - // but this is a suspend so add a suspend count at the same time - delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL; - if (dq->dq_side_suspend_cnt == 0) { - // we substract delta from dq_state, and we want to set this bit - delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; - } - - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - // unsigned underflow of the substraction can happen because other - // threads could have touched this value while we were trying to acquire - // the lock, or because another thread raced us to do the same operation - // and got to the lock first. - if (unlikely(os_sub_overflow(old_state, delta, &new_state))) { - os_atomic_rmw_loop_give_up(goto retry); - } - }); - if (unlikely(os_add_overflow(dq->dq_side_suspend_cnt, - DISPATCH_QUEUE_SUSPEND_HALF, &dq->dq_side_suspend_cnt))) { - DISPATCH_CLIENT_CRASH(0, "Too many nested calls to dispatch_suspend()"); - } - return _dispatch_queue_sidelock_unlock(dq); - -retry: - _dispatch_queue_sidelock_unlock(dq); - return _dispatch_lane_suspend(dq); -} - -void -_dispatch_lane_suspend(dispatch_lane_t dq) -{ - uint64_t old_state, new_state; - - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - new_state = DISPATCH_QUEUE_SUSPEND_INTERVAL; - if (unlikely(os_add_overflow(old_state, new_state, &new_state))) { - os_atomic_rmw_loop_give_up({ - return _dispatch_lane_suspend_slow(dq); - }); - } - }); - - if (!_dq_state_is_suspended(old_state)) { - // rdar://8181908 we need to extend the queue life for the duration - // of the call to wakeup at _dispatch_lane_resume() time. - _dispatch_retain_2(dq); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_lane_resume_slow(dispatch_lane_t dq) -{ - uint64_t old_state, new_state, delta; - - _dispatch_queue_sidelock_lock(dq); - - // what we want to transfer - delta = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL; - // but this is a resume so consume a suspend count at the same time - delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL; - switch (dq->dq_side_suspend_cnt) { - case 0: - goto retry; - case DISPATCH_QUEUE_SUSPEND_HALF: - // we will transition the side count to 0, so we want to clear this bit - delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; - break; - } - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - // unsigned overflow of the addition can happen because other - // threads could have touched this value while we were trying to acquire - // the lock, or because another thread raced us to do the same operation - // and got to the lock first. - if (unlikely(os_add_overflow(old_state, delta, &new_state))) { - os_atomic_rmw_loop_give_up(goto retry); - } - }); - dq->dq_side_suspend_cnt -= DISPATCH_QUEUE_SUSPEND_HALF; - return _dispatch_queue_sidelock_unlock(dq); - -retry: - _dispatch_queue_sidelock_unlock(dq); - return _dispatch_lane_resume(dq, DISPATCH_RESUME); -} - -DISPATCH_NOINLINE -static void -_dispatch_lane_resume_activate(dispatch_lane_t dq) -{ - if (dx_vtable(dq)->dq_activate) { - dx_vtable(dq)->dq_activate(dq); - } - - _dispatch_lane_resume(dq, DISPATCH_ACTIVATION_DONE); -} - -DISPATCH_NOINLINE -void -_dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) -{ - // covers all suspend and inactive bits, including side suspend bit - const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK; - uint64_t pending_barrier_width = - (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; - uint64_t set_owner_and_set_full_width_and_in_barrier = - _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | - DISPATCH_QUEUE_IN_BARRIER; - - // backward compatibility: only dispatch sources can abuse - // dispatch_resume() to really mean dispatch_activate() - bool is_source = (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE); - uint64_t old_state, new_state; - - // - // Activation is a bit tricky as it needs to finalize before the wakeup. - // - // The inactive bits have 4 states: - // - 11: INACTIVE - // - 10: ACTIVATED, but not activating yet - // - 01: ACTIVATING right now - // - 00: fully active - // - // ACTIVATED is only used when the queue is otherwise also suspended. - // In that case the last resume will take over the activation. - // - // The ACTIVATING state is tricky because it may be cleared by sources - // firing, to avoid priority inversions problems such as rdar://45419440 - // where as soon as the kevent is installed, the source may fire - // before its activating state was cleared. - // - if (op == DISPATCH_ACTIVATE) { - // relaxed atomic because this doesn't publish anything, this is only - // about picking the thread that gets to finalize the activation - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - if (!_dq_state_is_inactive(old_state)) { - // object already active or activated - os_atomic_rmw_loop_give_up(return); - } - if (unlikely(_dq_state_suspend_cnt(old_state))) { - // { sc != 0, i = INACTIVE } -> i = ACTIVATED - new_state = old_state - DISPATCH_QUEUE_INACTIVE + - DISPATCH_QUEUE_ACTIVATED; - } else { - // { sc = 0, i = INACTIVE } -> i = ACTIVATING - new_state = old_state - DISPATCH_QUEUE_INACTIVE + - DISPATCH_QUEUE_ACTIVATING; - } - }); - } else if (op == DISPATCH_ACTIVATION_DONE) { - // release barrier needed to publish the effect of dq_activate() - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - if (unlikely(!(old_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK))) { - os_atomic_rmw_loop_give_up({ - // object activation was already concurrently done - // due to a concurrent DISPATCH_WAKEUP_CLEAR_ACTIVATING - // wakeup call. - // - // We still need to consume the internal refcounts because - // the wakeup doesn't take care of these. - return _dispatch_release_2_tailcall(dq); - }); - } - - new_state = old_state - DISPATCH_QUEUE_ACTIVATING; - if (!_dq_state_is_runnable(new_state)) { - // Out of width or still suspended. - // For the former, force _dispatch_lane_non_barrier_complete - // to reconsider whether it has work to do - new_state |= DISPATCH_QUEUE_DIRTY; - } else if (_dq_state_drain_locked(new_state)) { - // still locked by someone else, make drain_try_unlock() fail - // and reconsider whether it has work to do - new_state |= DISPATCH_QUEUE_DIRTY; - } else { - // clear overrides and force a wakeup - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - } - }); - if (unlikely(new_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK)) { - DISPATCH_CLIENT_CRASH(dq, "Corrupt activation state"); - } - } else { - // release barrier needed to publish the effect of - // - dispatch_set_target_queue() - // - dispatch_set_*_handler() - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state; - if (is_source && (old_state & suspend_bits) == - DISPATCH_QUEUE_INACTIVE) { - // { sc = 0, i = INACTIVE } -> i = ACTIVATING - new_state -= DISPATCH_QUEUE_INACTIVE; - new_state += DISPATCH_QUEUE_ACTIVATING; - } else if (unlikely(os_sub_overflow(old_state, - DISPATCH_QUEUE_SUSPEND_INTERVAL, &new_state))) { - // underflow means over-resume or a suspend count transfer - // to the side count is needed - os_atomic_rmw_loop_give_up({ - if (!(old_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT)) { - goto over_resume; - } - return _dispatch_lane_resume_slow(dq); - }); - // - // below this, new_state = old_state - DISPATCH_QUEUE_SUSPEND_INTERVAL - // - } else if (_dq_state_is_activated(new_state)) { - // { sc = 1, i = ACTIVATED } -> i = ACTIVATING - new_state -= DISPATCH_QUEUE_ACTIVATED; - new_state += DISPATCH_QUEUE_ACTIVATING; - } else if (!_dq_state_is_runnable(new_state)) { - // Out of width or still suspended. - // For the former, force _dispatch_lane_non_barrier_complete - // to reconsider whether it has work to do - new_state |= DISPATCH_QUEUE_DIRTY; - } else if (_dq_state_drain_locked(new_state)) { - // still locked by someone else, make drain_try_unlock() fail - // and reconsider whether it has work to do - new_state |= DISPATCH_QUEUE_DIRTY; - } else if (!is_source && (_dq_state_has_pending_barrier(new_state) || - new_state + pending_barrier_width < - DISPATCH_QUEUE_WIDTH_FULL_BIT)) { - // if we can, acquire the full width drain lock - // and then perform a lock transfer - // - // However this is never useful for a source where there are no - // sync waiters, so never take the lock and do a plain wakeup - new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; - new_state |= set_owner_and_set_full_width_and_in_barrier; - } else { - // clear overrides and force a wakeup - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - } - }); - } - - if (_dq_state_is_activating(new_state)) { - return _dispatch_lane_resume_activate(dq); - } - - if (_dq_state_is_suspended(new_state)) { - return; - } - - if (_dq_state_is_dirty(old_state)) { - // - // dependency ordering for dq state changes that were flushed - // and not acted upon - os_atomic_thread_fence(dependency); - dq = os_atomic_inject_dependency(dq, (unsigned long)old_state); - } - // Balancing the retain_2 done in suspend() for rdar://8181908 - dispatch_wakeup_flags_t flags = DISPATCH_WAKEUP_CONSUME_2; - if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { - flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE; - } else if (!_dq_state_is_runnable(new_state)) { - if (_dq_state_is_base_wlh(old_state)) { - _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); - } - return _dispatch_release_2(dq); - } - dispatch_assert(!_dq_state_received_sync_wait(old_state)); - dispatch_assert(!_dq_state_in_sync_transfer(old_state)); - return dx_wakeup(dq, _dq_state_max_qos(old_state), flags); - -over_resume: - if (unlikely(_dq_state_is_inactive(old_state))) { - DISPATCH_CLIENT_CRASH(dq, "Over-resume of an inactive object"); - } - DISPATCH_CLIENT_CRASH(dq, "Over-resume of an object"); -} - -const char * -dispatch_queue_get_label(dispatch_queue_t dq) -{ - if (unlikely(dq == DISPATCH_CURRENT_QUEUE_LABEL)) { - dq = _dispatch_queue_get_current_or_default(); - } - return dq->dq_label ? dq->dq_label : ""; -} - -qos_class_t -dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relpri_ptr) -{ - dispatch_priority_t pri = dq->dq_priority; - dispatch_qos_t qos = _dispatch_priority_qos(pri); - if (relpri_ptr) { - *relpri_ptr = qos ? _dispatch_priority_relpri(dq->dq_priority) : 0; - } - return _dispatch_qos_to_qos_class(qos); -} - -static void -_dispatch_lane_set_width(void *ctxt) -{ - int w = (int)(intptr_t)ctxt; // intentional truncation - uint32_t tmp; - dispatch_lane_t dq = upcast(_dispatch_queue_get_current())._dl; - - if (w >= 0) { - tmp = w ? (unsigned int)w : 1; - } else { - dispatch_qos_t qos = _dispatch_qos_from_pp(_dispatch_get_priority()); - switch (w) { - case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: - tmp = _dispatch_qos_max_parallelism(qos, - DISPATCH_MAX_PARALLELISM_PHYSICAL); - break; - case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS: - tmp = _dispatch_qos_max_parallelism(qos, - DISPATCH_MAX_PARALLELISM_ACTIVE); - break; - case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS: - default: - tmp = _dispatch_qos_max_parallelism(qos, 0); - break; - } - } - if (tmp > DISPATCH_QUEUE_WIDTH_MAX) { - tmp = DISPATCH_QUEUE_WIDTH_MAX; - } - - dispatch_queue_flags_t old_dqf, new_dqf; - os_atomic_rmw_loop2o(dq, dq_atomic_flags, old_dqf, new_dqf, relaxed, { - new_dqf = (old_dqf & DQF_FLAGS_MASK) | DQF_WIDTH(tmp); - }); - _dispatch_lane_inherit_wlh_from_target(dq, dq->do_targetq); - _dispatch_object_debug(dq, "%s", __func__); -} - -void -dispatch_queue_set_width(dispatch_queue_t dq, long width) -{ - unsigned long type = dx_type(dq); - if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) { - DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); - } else if (unlikely(type != DISPATCH_QUEUE_CONCURRENT_TYPE)) { - DISPATCH_CLIENT_CRASH(type, "Cannot set width of a serial queue"); - } - - if (likely((int)width >= 0)) { - dispatch_lane_t dl = upcast(dq)._dl; - _dispatch_barrier_trysync_or_async_f(dl, (void*)(intptr_t)width, - _dispatch_lane_set_width, DISPATCH_BARRIER_TRYSYNC_SUSPEND); - } else { - // The negative width constants need to execute on the queue to - // query the queue QoS - _dispatch_barrier_async_detached_f(dq, (void*)(intptr_t)width, - _dispatch_lane_set_width); - } -} - -static void -_dispatch_lane_legacy_set_target_queue(void *ctxt) -{ - dispatch_lane_t dq = upcast(_dispatch_queue_get_current())._dl; - dispatch_queue_t tq = ctxt; - dispatch_queue_t otq = dq->do_targetq; - - if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { -#if DISPATCH_ALLOW_NON_LEAF_RETARGET - _dispatch_ktrace3(DISPATCH_PERF_non_leaf_retarget, dq, otq, tq); - _dispatch_bug_deprecated("Changing the target of a queue " - "already targeted by other dispatch objects"); -#else - DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " - "already targeted by other dispatch objects"); -#endif - } - - tq = _dispatch_queue_priority_inherit_from_target(dq, tq); - _dispatch_lane_inherit_wlh_from_target(dq, tq); -#if HAVE_PTHREAD_WORKQUEUE_QOS - // see _dispatch_queue_wakeup() - _dispatch_queue_sidelock_lock(dq); -#endif - if (unlikely(!_dispatch_queue_is_mutable(dq))) { - /* serialize with _dispatch_mach_handoff_set_wlh */ - DISPATCH_CLIENT_CRASH(0, "Cannot change the target of this object " - "after it has been activated"); - } - dq->do_targetq = tq; -#if HAVE_PTHREAD_WORKQUEUE_QOS - // see _dispatch_queue_wakeup() - _dispatch_queue_sidelock_unlock(dq); -#endif - - _dispatch_object_debug(dq, "%s", __func__); - _dispatch_introspection_target_queue_changed(dq->_as_dq); - _dispatch_release_tailcall(otq); -} - -void -_dispatch_lane_set_target_queue(dispatch_lane_t dq, dispatch_queue_t tq) -{ - if (tq == DISPATCH_TARGET_QUEUE_DEFAULT) { - bool overcommit = (dq->dq_width == 1); - tq = _dispatch_get_default_queue(overcommit); - } - - if (_dispatch_lane_try_inactive_suspend(dq)) { - _dispatch_object_set_target_queue_inline(dq, tq); - return _dispatch_lane_resume(dq, DISPATCH_RESUME); - } - -#if !DISPATCH_ALLOW_NON_LEAF_RETARGET - if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { - DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " - "already targeted by other dispatch objects"); - } -#endif - - if (unlikely(!_dispatch_queue_is_mutable(dq))) { -#if DISPATCH_ALLOW_NON_LEAF_RETARGET - if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { - DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " - "already targeted by other dispatch objects"); - } -#endif - DISPATCH_CLIENT_CRASH(0, "Cannot change the target of this object " - "after it has been activated"); - } - - unsigned long metatype = dx_metatype(dq); - switch (metatype) { - case _DISPATCH_LANE_TYPE: -#if DISPATCH_ALLOW_NON_LEAF_RETARGET - if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { - _dispatch_bug_deprecated("Changing the target of a queue " - "already targeted by other dispatch objects"); - } -#endif - break; - case _DISPATCH_SOURCE_TYPE: - _dispatch_ktrace1(DISPATCH_PERF_post_activate_retarget, dq); - _dispatch_bug_deprecated("Changing the target of a source " - "after it has been activated"); - break; - default: - DISPATCH_CLIENT_CRASH(metatype, "Unexpected dispatch object type"); - } - - _dispatch_retain(tq); - return _dispatch_barrier_trysync_or_async_f(dq, tq, - _dispatch_lane_legacy_set_target_queue, - DISPATCH_BARRIER_TRYSYNC_SUSPEND); -} - -#pragma mark - -#pragma mark _dispatch_queue_debug - -size_t -_dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) -{ - size_t offset = 0; - dispatch_queue_t target = dq->do_targetq; - const char *tlabel = target && target->dq_label ? target->dq_label : ""; - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - - offset += dsnprintf(&buf[offset], bufsiz - offset, "sref = %d, " - "target = %s[%p], width = 0x%x, state = 0x%016llx", - dq->dq_sref_cnt + 1, tlabel, target, dq->dq_width, - (unsigned long long)dq_state); - if (_dq_state_is_suspended(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", suspended = %d", - _dq_state_suspend_cnt(dq_state)); - } - if (_dq_state_is_inactive(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", inactive"); - } else if (_dq_state_is_activated(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", activated"); - } else if (_dq_state_is_activating(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", activating"); - } - if (_dq_state_is_enqueued(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", enqueued"); - } - if (_dq_state_is_dirty(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", dirty"); - } - dispatch_qos_t qos = _dq_state_max_qos(dq_state); - if (qos) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", max qos %d", qos); - } - mach_port_t owner = _dq_state_drain_owner(dq_state); - if (!_dispatch_queue_is_thread_bound(dq) && owner) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", draining on 0x%x", - owner); - } - if (_dq_state_is_in_barrier(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", in-barrier"); - } else { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", in-flight = %d", - _dq_state_used_width(dq_state, dq->dq_width)); - } - if (_dq_state_has_pending_barrier(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", pending-barrier"); - } - if (_dispatch_queue_is_thread_bound(dq)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", thread = 0x%x ", - owner); - } - return offset; -} - -size_t -_dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz) -{ - size_t offset = 0; - offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dq->dq_label ? dq->dq_label : _dispatch_object_class_name(dq), dq); - offset += _dispatch_object_debug_attr(dq, &buf[offset], bufsiz - offset); - offset += _dispatch_queue_debug_attr(dq, &buf[offset], bufsiz - offset); - offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); - return offset; -} - -#if DISPATCH_PERF_MON - -#define DISPATCH_PERF_MON_BUCKETS 8 - -static struct { - uint64_t volatile time_total; - uint64_t volatile count_total; - uint64_t volatile thread_total; -} _dispatch_stats[DISPATCH_PERF_MON_BUCKETS] DISPATCH_ATOMIC64_ALIGN; -DISPATCH_USED static size_t _dispatch_stat_buckets = DISPATCH_PERF_MON_BUCKETS; - -void -_dispatch_queue_merge_stats(uint64_t start, bool trace, perfmon_thread_type type) -{ - uint64_t delta = _dispatch_uptime() - start; - unsigned long count; - int bucket = 0; - count = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); - _dispatch_thread_setspecific(dispatch_bcounter_key, NULL); - if (count == 0) { - bucket = 0; - if (trace) _dispatch_ktrace1(DISPATCH_PERF_MON_worker_useless, type); - } else { - bucket = MIN(DISPATCH_PERF_MON_BUCKETS - 1, - (int)sizeof(count) * CHAR_BIT - __builtin_clzl(count)); - os_atomic_add(&_dispatch_stats[bucket].count_total, count, relaxed); - } - os_atomic_add(&_dispatch_stats[bucket].time_total, delta, relaxed); - os_atomic_inc(&_dispatch_stats[bucket].thread_total, relaxed); - if (trace) { - _dispatch_ktrace3(DISPATCH_PERF_MON_worker_thread_end, count, delta, type); - } -} - -#endif - -#pragma mark - -#pragma mark dispatch queue/lane drain & invoke - -DISPATCH_NOINLINE -static void -_dispatch_return_to_kernel(void) -{ -#if DISPATCH_USE_KEVENT_WORKQUEUE - dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - if (likely(ddi && ddi->ddi_wlh != DISPATCH_WLH_ANON)) { - dispatch_assert(ddi->ddi_wlh_servicing); - _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); - } else { - _dispatch_clear_return_to_kernel(); - } -#endif -} - -void -_dispatch_poll_for_events_4launchd(void) -{ - _dispatch_return_to_kernel(); -} - -#if DISPATCH_USE_WORKQUEUE_NARROWING -DISPATCH_STATIC_GLOBAL(os_atomic(uint64_t) -_dispatch_narrowing_deadlines[DISPATCH_QOS_NBUCKETS]); -#if !DISPATCH_TIME_UNIT_USES_NANOSECONDS -DISPATCH_STATIC_GLOBAL(uint64_t _dispatch_narrow_check_interval_cache); -#endif - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_narrow_check_interval(void) -{ -#if DISPATCH_TIME_UNIT_USES_NANOSECONDS - return 50 * NSEC_PER_MSEC; -#else - if (_dispatch_narrow_check_interval_cache == 0) { - _dispatch_narrow_check_interval_cache = - _dispatch_time_nano2mach(50 * NSEC_PER_MSEC); - } - return _dispatch_narrow_check_interval_cache; -#endif -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_drain_init_narrowing_check_deadline(dispatch_invoke_context_t dic, - dispatch_priority_t pri) -{ - if (!(pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)) { - dic->dic_next_narrow_check = _dispatch_approximate_time() + - _dispatch_narrow_check_interval(); - } -} - -DISPATCH_NOINLINE -static bool -_dispatch_queue_drain_should_narrow_slow(uint64_t now, - dispatch_invoke_context_t dic) -{ - if (dic->dic_next_narrow_check != DISPATCH_THREAD_IS_NARROWING) { - pthread_priority_t pp = _dispatch_get_priority(); - dispatch_qos_t qos = _dispatch_qos_from_pp(pp); - if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) { - DISPATCH_CLIENT_CRASH(pp, "Thread QoS corruption"); - } - size_t idx = DISPATCH_QOS_BUCKET(qos); - os_atomic(uint64_t) *deadline = &_dispatch_narrowing_deadlines[idx]; - uint64_t oldval, newval = now + _dispatch_narrow_check_interval(); - - dic->dic_next_narrow_check = newval; - os_atomic_rmw_loop(deadline, oldval, newval, relaxed, { - if (now < oldval) { - os_atomic_rmw_loop_give_up(return false); - } - }); - - if (!_pthread_workqueue_should_narrow(pp)) { - return false; - } - dic->dic_next_narrow_check = DISPATCH_THREAD_IS_NARROWING; - } - return true; -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_drain_should_narrow(dispatch_invoke_context_t dic) -{ - uint64_t next_check = dic->dic_next_narrow_check; - if (unlikely(next_check)) { - uint64_t now = _dispatch_approximate_time(); - if (unlikely(next_check < now)) { - return _dispatch_queue_drain_should_narrow_slow(now, dic); - } - } - return false; -} -#else -#define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) -#define _dispatch_queue_drain_should_narrow(dic) false -#endif - -/* - * Drain comes in 2 flavours (serial/concurrent) and 2 modes - * (redirecting or not). - * - * Serial - * ~~~~~~ - * Serial drain is about serial queues (width == 1). It doesn't support - * the redirecting mode, which doesn't make sense, and treats all continuations - * as barriers. Bookkeeping is minimal in serial flavour, most of the loop - * is optimized away. - * - * Serial drain stops if the width of the queue grows to larger than 1. - * Going through a serial drain prevents any recursive drain from being - * redirecting. - * - * Concurrent - * ~~~~~~~~~~ - * When in non-redirecting mode (meaning one of the target queues is serial), - * non-barriers and barriers alike run in the context of the drain thread. - * Slow non-barrier items are still all signaled so that they can make progress - * toward the dispatch_sync() that will serialize them all . - * - * In redirecting mode, non-barrier work items are redirected downward. - * - * Concurrent drain stops if the width of the queue becomes 1, so that the - * queue drain moves to the more efficient serial mode. - */ -DISPATCH_ALWAYS_INLINE -static dispatch_queue_wakeup_target_t -_dispatch_lane_drain(dispatch_lane_t dq, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags, uint64_t *owned_ptr, bool serial_drain) -{ - dispatch_queue_t orig_tq = dq->do_targetq; - dispatch_thread_frame_s dtf; - struct dispatch_object_s *dc = NULL, *next_dc; - uint64_t dq_state, owned = *owned_ptr; - - if (unlikely(!dq->dq_items_tail)) return NULL; - - _dispatch_thread_frame_push(&dtf, dq); - if (serial_drain || _dq_state_is_in_barrier(owned)) { - // we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL` - // but width can change while draining barrier work items, so we only - // convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER` - owned = DISPATCH_QUEUE_IN_BARRIER; - } else { - owned &= DISPATCH_QUEUE_WIDTH_MASK; - } - - dc = _dispatch_queue_get_head(dq); - goto first_iteration; - - for (;;) { - dispatch_assert(dic->dic_barrier_waiter == NULL); - dc = next_dc; - if (unlikely(!dc)) { - if (!dq->dq_items_tail) { - break; - } - dc = _dispatch_queue_get_head(dq); - } - if (unlikely(_dispatch_needs_to_return_to_kernel())) { - _dispatch_return_to_kernel(); - } - if (unlikely(serial_drain != (dq->dq_width == 1))) { - break; - } - if (unlikely(_dispatch_queue_drain_should_narrow(dic))) { - break; - } - if (likely(flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { - dispatch_workloop_t dwl = (dispatch_workloop_t)_dispatch_get_wlh(); - if (unlikely(_dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos)) { - break; - } - } - -first_iteration: - dq_state = os_atomic_load(&dq->dq_state, relaxed); - if (unlikely(_dq_state_is_suspended(dq_state))) { - break; - } - if (unlikely(orig_tq != dq->do_targetq)) { - break; - } - - if (serial_drain || _dispatch_object_is_barrier(dc)) { - if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) { - if (!_dispatch_queue_try_upgrade_full_width(dq, owned)) { - goto out_with_no_width; - } - owned = DISPATCH_QUEUE_IN_BARRIER; - } - if (_dispatch_object_is_sync_waiter(dc) && - !(flags & DISPATCH_INVOKE_THREAD_BOUND)) { - dic->dic_barrier_waiter = dc; - goto out_with_barrier_waiter; - } - next_dc = _dispatch_queue_pop_head(dq, dc); - } else { - if (owned == DISPATCH_QUEUE_IN_BARRIER) { - // we just ran barrier work items, we have to make their - // effect visible to other sync work items on other threads - // that may start coming in after this point, hence the - // release barrier - os_atomic_xor2o(dq, dq_state, owned, release); - owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - } else if (unlikely(owned == 0)) { - if (_dispatch_object_is_waiter(dc)) { - // sync "readers" don't observe the limit - _dispatch_queue_reserve_sync_width(dq); - } else if (!_dispatch_queue_try_acquire_async(dq)) { - goto out_with_no_width; - } - owned = DISPATCH_QUEUE_WIDTH_INTERVAL; - } - - next_dc = _dispatch_queue_pop_head(dq, dc); - if (_dispatch_object_is_waiter(dc)) { - owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; - _dispatch_non_barrier_waiter_redirect_or_wake(dq, dc); - continue; - } - - if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) { - owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; - // This is a re-redirect, overrides have already been applied by - // _dispatch_continuation_async* - // However we want to end up on the root queue matching `dc` - // qos, so pick up the current override of `dq` which includes - // dc's override (and maybe more) - _dispatch_continuation_redirect_push(dq, dc, - _dispatch_queue_max_qos(dq)); - continue; - } - } - - _dispatch_continuation_pop_inline(dc, dic, flags, dq); - } - - if (owned == DISPATCH_QUEUE_IN_BARRIER) { - // if we're IN_BARRIER we really own the full width too - owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - } - if (dc) { - owned = _dispatch_queue_adjust_owned(dq, owned, dc); - } - *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; - *owned_ptr |= owned; - _dispatch_thread_frame_pop(&dtf); - return dc ? dq->do_targetq : NULL; - -out_with_no_width: - *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; - _dispatch_thread_frame_pop(&dtf); - return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; - -out_with_barrier_waiter: - if (unlikely(flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS)) { - DISPATCH_INTERNAL_CRASH(0, - "Deferred continuation on source, mach channel or mgr"); - } - _dispatch_thread_frame_pop(&dtf); - return dq->do_targetq; -} - -DISPATCH_NOINLINE -static dispatch_queue_wakeup_target_t -_dispatch_lane_concurrent_drain(dispatch_lane_class_t dqu, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, - uint64_t *owned) -{ - return _dispatch_lane_drain(dqu._dl, dic, flags, owned, false); -} - -DISPATCH_NOINLINE -dispatch_queue_wakeup_target_t -_dispatch_lane_serial_drain(dispatch_lane_class_t dqu, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, - uint64_t *owned) -{ - flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; - return _dispatch_lane_drain(dqu._dl, dic, flags, owned, true); -} - -void -_dispatch_queue_invoke_finish(dispatch_queue_t dq, - dispatch_invoke_context_t dic, dispatch_queue_t tq, uint64_t owned) -{ - struct dispatch_object_s *dc = dic->dic_barrier_waiter; - dispatch_qos_t qos = dic->dic_barrier_waiter_bucket; - if (dc) { - dic->dic_barrier_waiter = NULL; - dic->dic_barrier_waiter_bucket = DISPATCH_QOS_UNSPECIFIED; - owned &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; -#if DISPATCH_INTROSPECTION - dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; - dsc->dsc_from_async = true; -#endif - if (qos) { - return _dispatch_workloop_drain_barrier_waiter(upcast(dq)._dwl, - dc, qos, DISPATCH_WAKEUP_CONSUME_2, owned); - } - return _dispatch_lane_drain_barrier_waiter(upcast(dq)._dl, dc, - DISPATCH_WAKEUP_CONSUME_2, owned); - } - - uint64_t old_state, new_state, enqueued = DISPATCH_QUEUE_ENQUEUED; - if (tq == DISPATCH_QUEUE_WAKEUP_MGR) { - enqueued = DISPATCH_QUEUE_ENQUEUED_ON_MGR; - } - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state - owned; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state |= DISPATCH_QUEUE_DIRTY; - if (_dq_state_is_runnable(new_state) && - !_dq_state_is_enqueued(new_state)) { - // drain was not interupted for suspension - // we will reenqueue right away, just put ENQUEUED back - new_state |= enqueued; - } - }); - old_state -= owned; - if (_dq_state_received_override(old_state)) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_basepri_override_qos(_dq_state_max_qos(new_state)); - } - if ((old_state ^ new_state) & enqueued) { - dispatch_assert(_dq_state_is_enqueued(new_state)); - return _dispatch_queue_push_queue(tq, dq, new_state); - } - return _dispatch_release_2_tailcall(dq); -} - -void -_dispatch_lane_activate(dispatch_lane_class_t dq) -{ - dispatch_queue_t tq = dq._dl->do_targetq; - dispatch_priority_t pri = dq._dl->dq_priority; - - // Normalize priority: keep the fallback only when higher than the floor - if (_dispatch_priority_fallback_qos(pri) <= _dispatch_priority_qos(pri) || - (_dispatch_priority_qos(pri) && - !(pri & DISPATCH_PRIORITY_FLAG_FLOOR))) { - pri &= ~DISPATCH_PRIORITY_FALLBACK_QOS_MASK; - pri &= ~DISPATCH_PRIORITY_FLAG_FALLBACK; - dq._dl->dq_priority = pri; - } - tq = _dispatch_queue_priority_inherit_from_target(dq, tq); - _dispatch_lane_inherit_wlh_from_target(dq._dl, tq); -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_wakeup_target_t -_dispatch_lane_invoke2(dispatch_lane_t dq, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags, uint64_t *owned) -{ - dispatch_queue_t otq = dq->do_targetq; - dispatch_queue_t cq = _dispatch_queue_get_current(); - - if (unlikely(cq != otq)) { - return otq; - } - if (dq->dq_width == 1) { - return _dispatch_lane_serial_drain(dq, dic, flags, owned); - } - return _dispatch_lane_concurrent_drain(dq, dic, flags, owned); -} - -DISPATCH_NOINLINE -void -_dispatch_lane_invoke(dispatch_lane_t dq, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags) -{ - _dispatch_queue_class_invoke(dq, dic, flags, 0, _dispatch_lane_invoke2); -} - -#pragma mark - -#pragma mark dispatch_workloop_t - -#define _dispatch_wl(dwl, qos) os_mpsc(dwl, dwl, s[DISPATCH_QOS_BUCKET(qos)]) -#define _dispatch_workloop_looks_empty(dwl, qos) \ - os_mpsc_looks_empty(_dispatch_wl(dwl, qos)) -#define _dispatch_workloop_get_head(dwl, qos) \ - os_mpsc_get_head(_dispatch_wl(dwl, qos)) -#define _dispatch_workloop_pop_head(dwl, qos, dc) \ - os_mpsc_pop_head(_dispatch_wl(dwl, qos), dc, do_next) -#define _dispatch_workloop_push_update_tail(dwl, qos, dou) \ - os_mpsc_push_update_tail(_dispatch_wl(dwl, qos), dou, do_next) -#define _dispatch_workloop_push_update_prev(dwl, qos, prev, dou) \ - os_mpsc_push_update_prev(_dispatch_wl(dwl, qos), prev, dou, do_next) - -dispatch_workloop_t -dispatch_workloop_copy_current(void) -{ - dispatch_workloop_t dwl = _dispatch_wlh_to_workloop(_dispatch_get_wlh()); - if (likely(dwl)) { - _os_object_retain_with_resurrect(dwl->_as_os_obj); - return dwl; - } - return NULL; -} - -bool -dispatch_workloop_is_current(dispatch_workloop_t dwl) -{ - return _dispatch_get_wlh() == (dispatch_wlh_t)dwl; -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_workloop_role_bits(void) -{ -#if DISPATCH_USE_KEVENT_WORKLOOP - if (likely(_dispatch_kevent_workqueue_enabled)) { - return DISPATCH_QUEUE_ROLE_BASE_WLH; - } -#endif - return DISPATCH_QUEUE_ROLE_BASE_ANON; -} - -bool -_dispatch_workloop_should_yield_4NW(void) -{ - dispatch_workloop_t dwl = _dispatch_wlh_to_workloop(_dispatch_get_wlh()); - if (likely(dwl)) { - return _dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos; - } - return false; -} - -DISPATCH_NOINLINE -static dispatch_workloop_t -_dispatch_workloop_create(const char *label, uint64_t dq_state) -{ - dispatch_queue_flags_t dqf = DQF_AUTORELEASE_ALWAYS; - dispatch_workloop_t dwl; - - if (label) { - const char *tmp = _dispatch_strdup_if_mutable(label); - if (tmp != label) { - dqf |= DQF_LABEL_NEEDS_FREE; - label = tmp; - } - } - - dq_state |= _dispatch_workloop_role_bits(); - - dwl = _dispatch_queue_alloc(workloop, dqf, 1, dq_state)._dwl; - dwl->dq_label = label; - dwl->do_targetq = _dispatch_get_default_queue(true); - if (!(dq_state & DISPATCH_QUEUE_INACTIVE)) { - dwl->dq_priority = DISPATCH_PRIORITY_FLAG_OVERCOMMIT | - _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT); - } - _dispatch_object_debug(dwl, "%s", __func__); - return _dispatch_introspection_queue_create(dwl)._dwl; -} - -dispatch_workloop_t -dispatch_workloop_create(const char *label) -{ - return _dispatch_workloop_create(label, 0); -} - -dispatch_workloop_t -dispatch_workloop_create_inactive(const char *label) -{ - return _dispatch_workloop_create(label, DISPATCH_QUEUE_INACTIVE); -} - -void -dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t dwl, - dispatch_autorelease_frequency_t frequency) -{ - if (frequency == DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM) { - _dispatch_queue_atomic_flags_set_and_clear(dwl, - DQF_AUTORELEASE_ALWAYS, DQF_AUTORELEASE_NEVER); - } else { - _dispatch_queue_atomic_flags_set_and_clear(dwl, - DQF_AUTORELEASE_NEVER, DQF_AUTORELEASE_ALWAYS); - } - _dispatch_queue_setter_assert_inactive(dwl); -} - -DISPATCH_ALWAYS_INLINE -static void -_dispatch_workloop_attributes_dispose(dispatch_workloop_t dwl) -{ - if (dwl->dwl_attr) { - free(dwl->dwl_attr); - } -} - -#if TARGET_OS_MAC -DISPATCH_ALWAYS_INLINE -static bool -_dispatch_workloop_has_kernel_attributes(dispatch_workloop_t dwl) -{ - return dwl->dwl_attr && (dwl->dwl_attr->dwla_flags & - (DISPATCH_WORKLOOP_ATTR_HAS_SCHED | - DISPATCH_WORKLOOP_ATTR_HAS_POLICY | - DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT)); -} - -void -dispatch_workloop_set_scheduler_priority(dispatch_workloop_t dwl, int priority, - uint64_t flags) -{ - _dispatch_queue_setter_assert_inactive(dwl); - _dispatch_workloop_attributes_alloc_if_needed(dwl); - - if (priority) { - dwl->dwl_attr->dwla_sched.sched_priority = priority; - dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_SCHED; - } else { - dwl->dwl_attr->dwla_sched.sched_priority = 0; - dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_SCHED; - } - - if (flags & DISPATCH_WORKLOOP_FIXED_PRIORITY) { - dwl->dwl_attr->dwla_policy = POLICY_RR; - dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_POLICY; - } else { - dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; - } -} -#endif // TARGET_OS_MAC - -void -dispatch_workloop_set_qos_class_floor(dispatch_workloop_t dwl, - qos_class_t cls, int relpri, uint64_t flags) -{ - _dispatch_queue_setter_assert_inactive(dwl); - _dispatch_workloop_attributes_alloc_if_needed(dwl); - - dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); - - if (qos) { - dwl->dwl_attr->dwla_pri = _dispatch_priority_make(qos, relpri); - dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS; - } else { - dwl->dwl_attr->dwla_pri = 0; - dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS; - } - -#if TARGET_OS_MAC - if (flags & DISPATCH_WORKLOOP_FIXED_PRIORITY) { - dwl->dwl_attr->dwla_policy = POLICY_RR; - dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_POLICY; - } else { - dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; - } -#else // TARGET_OS_MAC - (void)flags; -#endif // TARGET_OS_MAC -} - -void -dispatch_workloop_set_qos_class(dispatch_workloop_t dwl, - qos_class_t cls, uint64_t flags) -{ - dispatch_workloop_set_qos_class_floor(dwl, cls, 0, flags); -} - -void -dispatch_workloop_set_cpupercent(dispatch_workloop_t dwl, uint8_t percent, - uint32_t refillms) -{ - _dispatch_queue_setter_assert_inactive(dwl); - _dispatch_workloop_attributes_alloc_if_needed(dwl); - - if ((dwl->dwl_attr->dwla_flags & (DISPATCH_WORKLOOP_ATTR_HAS_SCHED | - DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS)) == 0) { - DISPATCH_CLIENT_CRASH(0, "workloop qos class or priority must be " - "set before cpupercent"); - } - - dwl->dwl_attr->dwla_cpupercent.percent = percent; - dwl->dwl_attr->dwla_cpupercent.refillms = refillms; - dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT; -} - -#if DISPATCH_IOHID_SPI -void -_dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t dwl, - dispatch_pthread_root_queue_observer_hooks_t observer_hooks) -{ - _dispatch_queue_setter_assert_inactive(dwl); - _dispatch_workloop_attributes_alloc_if_needed(dwl); - - dwl->dwl_attr->dwla_observers = *observer_hooks; - dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS; -} -#endif - -#if TARGET_OS_MAC -static void -_dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, - pthread_attr_t *attr) -{ - uint64_t old_state, new_state; - dispatch_queue_global_t dprq; - - dprq = dispatch_pthread_root_queue_create( - "com.apple.libdispatch.workloop_fallback", 0, attr, NULL); - - dwl->do_targetq = dprq->_as_dq; - _dispatch_retain(dprq); - dispatch_release(dprq); - - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, relaxed, { - new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK; - new_state |= DISPATCH_QUEUE_ROLE_BASE_ANON; - }); -} - -static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { - DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), - .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, - .do_ctxt = NULL, - .dq_label = "com.apple.root.workloop-custom", - .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), - .dq_priority = _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT) | - DISPATCH_PRIORITY_SATURATED_OVERRIDE, - .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, - .dgq_thread_pool_size = 1, -}; -#endif // TARGET_OS_MAC - -static void -_dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) -{ -#if defined(_POSIX_THREADS) - dispatch_workloop_attr_t dwla = dwl->dwl_attr; - pthread_attr_t attr; - - pthread_attr_init(&attr); - if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS) { - dwl->dq_priority |= dwla->dwla_pri | DISPATCH_PRIORITY_FLAG_FLOOR; - } -#if TARGET_OS_MAC - if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_SCHED) { - pthread_attr_setschedparam(&attr, &dwla->dwla_sched); - // _dispatch_async_and_wait_should_always_async detects when a queue - // targets a root queue that is not part of the root queues array in - // order to force async_and_wait to async. We want this path to always - // be taken on workloops that have a scheduler priority set. - dwl->do_targetq = - (dispatch_queue_t)_dispatch_custom_workloop_root_queue._as_dq; - } - if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_POLICY) { - pthread_attr_setschedpolicy(&attr, dwla->dwla_policy); - } -#endif // TARGET_OS_MAC -#if HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP - if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT) { - pthread_attr_setcpupercent_np(&attr, dwla->dwla_cpupercent.percent, - (unsigned long)dwla->dwla_cpupercent.refillms); - } -#endif // HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP -#if TARGET_OS_MAC - if (_dispatch_workloop_has_kernel_attributes(dwl)) { - int rv = _pthread_workloop_create((uint64_t)dwl, 0, &attr); - switch (rv) { - case 0: - dwla->dwla_flags |= DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY; - break; - case ENOTSUP: - /* simulator fallback */ - _dispatch_workloop_activate_simulator_fallback(dwl, &attr); - break; - default: - dispatch_assert_zero(rv); - } - } -#endif // TARGET_OS_MAC - pthread_attr_destroy(&attr); -#endif // defined(_POSIX_THREADS) -} - -void -_dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) -{ - uint64_t dq_state = os_atomic_load2o(dwl, dq_state, relaxed); - uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1); - - initial_state |= _dispatch_workloop_role_bits(); - - if (unlikely(dq_state != initial_state)) { - if (_dq_state_drain_locked(dq_state)) { - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "Release of a locked workloop"); - } -#if DISPATCH_SIZEOF_PTR == 4 - dq_state >>= 32; -#endif - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "Release of a workloop with corrupt state"); - } - - _dispatch_object_debug(dwl, "%s", __func__); - _dispatch_introspection_queue_dispose(dwl); - - for (size_t i = 0; i < countof(dwl->dwl_tails); i++) { - if (unlikely(dwl->dwl_tails[i])) { - DISPATCH_CLIENT_CRASH(dwl->dwl_tails[i], - "Release of a workloop while items are enqueued"); - } - // trash the queue so that use after free will crash - dwl->dwl_tails[i] = (void *)0x200; - dwl->dwl_heads[i] = (void *)0x200; - } - - if (dwl->dwl_timer_heap) { - for (size_t i = 0; i < DISPATCH_TIMER_WLH_COUNT; i++) { - dispatch_assert(dwl->dwl_timer_heap[i].dth_count == 0); - } - free(dwl->dwl_timer_heap); - dwl->dwl_timer_heap = NULL; - } - -#if TARGET_OS_MAC - if (dwl->dwl_attr && (dwl->dwl_attr->dwla_flags & - DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY)) { - (void)dispatch_assume_zero(_pthread_workloop_destroy((uint64_t)dwl)); - } -#endif // TARGET_OS_MAC - _dispatch_workloop_attributes_dispose(dwl); - _dispatch_queue_dispose(dwl, allow_free); -} - -void -_dispatch_workloop_activate(dispatch_workloop_t dwl) -{ - // This transitions either: - // - from INACTIVE to ACTIVATING - // - or from ACTIVE to ACTIVE - uint64_t old_state = os_atomic_and_orig2o(dwl, dq_state, - ~DISPATCH_QUEUE_ACTIVATED, relaxed); - - if (likely(_dq_state_is_inactive(old_state))) { - if (dwl->dwl_attr) { - // Activation of a workloop with attributes forces us to create - // the workloop up front and register the attributes with the - // kernel. - _dispatch_workloop_activate_attributes(dwl); - } - if (!dwl->dq_priority) { - dwl->dq_priority = - _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT); - } - dwl->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - os_atomic_and2o(dwl, dq_state, ~DISPATCH_QUEUE_ACTIVATING, relaxed); - return _dispatch_workloop_wakeup(dwl, 0, DISPATCH_WAKEUP_CONSUME_2); - } -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_workloop_try_lower_max_qos(dispatch_workloop_t dwl, - dispatch_qos_t qos) -{ - uint64_t old_state, new_state, qos_bits = _dq_state_from_qos(qos); - - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, relaxed, { - if ((old_state & DISPATCH_QUEUE_MAX_QOS_MASK) <= qos_bits) { - os_atomic_rmw_loop_give_up(return true); - } - - if (unlikely(_dq_state_is_dirty(old_state))) { - os_atomic_rmw_loop_give_up({ - os_atomic_xor2o(dwl, dq_state, DISPATCH_QUEUE_DIRTY, acquire); - return false; - }); - } - - new_state = old_state; - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - new_state |= qos_bits; - }); - -#if DISPATCH_USE_KEVENT_WORKQUEUE - dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - if (likely(ddi)) { - ddi->ddi_wlh_needs_update = true; - _dispatch_return_to_kernel(); - } -#endif // DISPATCH_USE_KEVENT_WORKQUEUE - return true; -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_wakeup_target_t -_dispatch_workloop_invoke2(dispatch_workloop_t dwl, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, - uint64_t *owned) -{ - dispatch_workloop_attr_t dwl_attr = dwl->dwl_attr; - dispatch_thread_frame_s dtf; - struct dispatch_object_s *dc = NULL, *next_dc; - - if (dwl_attr && - (dwl_attr->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS)) { - _dispatch_set_pthread_root_queue_observer_hooks( - &dwl_attr->dwla_observers); - } - _dispatch_thread_frame_push(&dtf, dwl); - - for (;;) { - dispatch_qos_t qos; - for (qos = DISPATCH_QOS_MAX; qos >= DISPATCH_QOS_MIN; qos--) { - if (!_dispatch_workloop_looks_empty(dwl, qos)) break; - } - if (qos < DISPATCH_QOS_MIN) { - break; - } - if (unlikely(!_dispatch_workloop_try_lower_max_qos(dwl, qos))) { - continue; - } - dwl->dwl_drained_qos = (uint8_t)qos; - - dc = _dispatch_workloop_get_head(dwl, qos); - do { - if (_dispatch_object_is_sync_waiter(dc)) { - dic->dic_barrier_waiter_bucket = qos; - dic->dic_barrier_waiter = dc; - dwl->dwl_drained_qos = DISPATCH_QOS_UNSPECIFIED; - goto out_with_barrier_waiter; - } - next_dc = _dispatch_workloop_pop_head(dwl, qos, dc); - if (unlikely(_dispatch_needs_to_return_to_kernel())) { - _dispatch_return_to_kernel(); - } - - _dispatch_continuation_pop_inline(dc, dic, flags, dwl); - qos = dwl->dwl_drained_qos; - } while ((dc = next_dc) && (_dispatch_queue_max_qos(dwl) <= qos)); - } - - *owned = (*owned & DISPATCH_QUEUE_ENQUEUED) + - DISPATCH_QUEUE_IN_BARRIER + DISPATCH_QUEUE_WIDTH_INTERVAL; - _dispatch_thread_frame_pop(&dtf); - _dispatch_set_pthread_root_queue_observer_hooks(NULL); - return NULL; - -out_with_barrier_waiter: - _dispatch_thread_frame_pop(&dtf); - _dispatch_set_pthread_root_queue_observer_hooks(NULL); - return dwl->do_targetq; -} - -void -_dispatch_workloop_invoke(dispatch_workloop_t dwl, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) -{ - flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; - flags |= DISPATCH_INVOKE_WORKLOOP_DRAIN; - _dispatch_queue_class_invoke(dwl, dic, flags, 0, _dispatch_workloop_invoke2); -} - -DISPATCH_ALWAYS_INLINE -static bool -_dispatch_workloop_probe(dispatch_workloop_t dwl) -{ - dispatch_qos_t qos; - for (qos = DISPATCH_QOS_MAX; qos >= DISPATCH_QOS_MIN; qos--) { - if (!_dispatch_workloop_looks_empty(dwl, qos)) return true; - } - return false; -} - -DISPATCH_NOINLINE -static void -_dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, - struct dispatch_object_s *dc, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags, uint64_t enqueued_bits) -{ - dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; - uint64_t next_owner = 0, old_state, new_state; - bool has_more_work; - - next_owner = _dispatch_lock_value_from_tid(dsc->dsc_waiter); - has_more_work = (_dispatch_workloop_pop_head(dwl, qos, dc) != NULL); - -transfer_lock_again: - if (!has_more_work) { - has_more_work = _dispatch_workloop_probe(dwl); - } - - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { - new_state = old_state; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state &= ~DISPATCH_QUEUE_DIRTY; - new_state |= next_owner; - - if (likely(_dq_state_is_base_wlh(old_state))) { - new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; - if (has_more_work) { - // we know there's a next item, keep the enqueued bit if any - } else if (unlikely(_dq_state_is_dirty(old_state))) { - os_atomic_rmw_loop_give_up({ - os_atomic_xor2o(dwl, dq_state, DISPATCH_QUEUE_DIRTY, acquire); - goto transfer_lock_again; - }); - } else { - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - new_state &= ~DISPATCH_QUEUE_ENQUEUED; - } - } else { - new_state -= enqueued_bits; - } - }); - - return _dispatch_barrier_waiter_redirect_or_wake(dwl, dc, flags, - old_state, new_state); -} - -static void -_dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) -{ - dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; - dispatch_qos_t wl_qos; - -again: - for (wl_qos = DISPATCH_QOS_MAX; wl_qos >= DISPATCH_QOS_MIN; wl_qos--) { - struct dispatch_object_s *dc; - - if (_dispatch_workloop_looks_empty(dwl, wl_qos)) continue; - dc = _dispatch_workloop_get_head(dwl, wl_qos); - - if (_dispatch_object_is_waiter(dc)) { - return _dispatch_workloop_drain_barrier_waiter(dwl, dc, wl_qos, - flags, 0); - } - - // We have work to do, we need to wake up - target = DISPATCH_QUEUE_WAKEUP_TARGET; - } - - if (unlikely(target && !(flags & DISPATCH_WAKEUP_CONSUME_2))) { - _dispatch_retain_2(dwl); - flags |= DISPATCH_WAKEUP_CONSUME_2; - } - - uint64_t old_state, new_state; - - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { - new_state = _dq_state_merge_qos(old_state, qos); - new_state -= DISPATCH_QUEUE_IN_BARRIER; - new_state -= DISPATCH_QUEUE_WIDTH_INTERVAL; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - if (target) { - new_state |= DISPATCH_QUEUE_ENQUEUED; - } else if (unlikely(_dq_state_is_dirty(old_state))) { - os_atomic_rmw_loop_give_up({ - // just renew the drain lock with an acquire barrier, to see - // what the enqueuer that set DIRTY has done. - // the xor generates better assembly as DISPATCH_QUEUE_DIRTY - // is already in a register - os_atomic_xor2o(dwl, dq_state, DISPATCH_QUEUE_DIRTY, acquire); - goto again; - }); - } else if (likely(_dq_state_is_base_wlh(old_state))) { - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - new_state &= ~DISPATCH_QUEUE_ENQUEUED; - } else { - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - } - }); - dispatch_assert(_dq_state_drain_locked_by_self(old_state)); - dispatch_assert(!_dq_state_is_enqueued_on_manager(old_state)); - - if (_dq_state_is_enqueued(new_state)) { - _dispatch_trace_runtime_event(sync_async_handoff, dwl, 0); - } - -#if DISPATCH_USE_KEVENT_WORKLOOP - if (_dq_state_is_base_wlh(old_state)) { - // - Only non-"du_is_direct" sources & mach channels can be enqueued - // on the manager. - // - // - Only dispatch_source_cancel_and_wait() and - // dispatch_source_set_*_handler() use the barrier complete codepath, - // none of which are used by mach channels. - // - // Hence no source-ish object can both be a workloop and need to use the - // manager at the same time. - dispatch_assert(!_dq_state_is_enqueued_on_manager(new_state)); - if (_dq_state_is_enqueued_on_target(old_state) || - _dq_state_is_enqueued_on_target(new_state) || - _dq_state_received_sync_wait(old_state) || - _dq_state_in_sync_transfer(old_state)) { - return _dispatch_event_loop_end_ownership((dispatch_wlh_t)dwl, - old_state, new_state, flags); - } - _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dwl); - goto done; - } -#endif - - if (_dq_state_received_override(old_state)) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); - } - - if (target) { - if (likely((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED)) { - dispatch_assert(_dq_state_is_enqueued(new_state)); - dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2); - return _dispatch_queue_push_queue(dwl->do_targetq, dwl, new_state); - } -#if HAVE_PTHREAD_WORKQUEUE_QOS - // when doing sync to async handoff - // if the queue received an override we have to forecefully redrive - // the same override so that a new stealer is enqueued because - // the previous one may be gone already - if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dwl, new_state, flags); - } -#endif - } - -#if DISPATCH_USE_KEVENT_WORKLOOP -done: -#endif - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - return _dispatch_release_2_tailcall(dwl); - } -} - -#if HAVE_PTHREAD_WORKQUEUE_QOS -static void -_dispatch_workloop_stealer_invoke(dispatch_continuation_t dc, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) -{ - uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_NO_INTROSPECTION; - _dispatch_continuation_pop_forwarded(dc, dc_flags, NULL, { - dispatch_queue_t dq = dc->dc_data; - dx_invoke(dq, dic, flags | DISPATCH_INVOKE_STEALING); - }); -} - -DISPATCH_NOINLINE -static void -_dispatch_workloop_push_stealer(dispatch_workloop_t dwl, dispatch_queue_t dq, - dispatch_qos_t qos) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - - dc->do_vtable = DC_VTABLE(WORKLOOP_STEALING); - _dispatch_retain_2(dq); - dc->dc_func = NULL; - dc->dc_ctxt = dc; - dc->dc_other = NULL; - dc->dc_data = dq; - dc->dc_priority = DISPATCH_NO_PRIORITY; - dc->dc_voucher = DISPATCH_NO_VOUCHER; - _dispatch_workloop_push(dwl, dc, qos); -} -#endif // HAVE_PTHREAD_WORKQUEUE_QOS - -void -_dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) -{ - if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { - return _dispatch_workloop_barrier_complete(dwl, qos, flags); - } - - if (unlikely(!(flags & DISPATCH_WAKEUP_CONSUME_2))) { - DISPATCH_INTERNAL_CRASH(flags, "Invalid way to wake up a workloop"); - } - - if (unlikely(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) { - goto done; - } - - uint64_t old_state, new_state; - - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { - new_state = _dq_state_merge_qos(old_state, qos); - if (_dq_state_max_qos(new_state)) { - new_state |= DISPATCH_QUEUE_ENQUEUED; - } - if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { - new_state |= DISPATCH_QUEUE_DIRTY; - } else if (new_state == old_state) { - os_atomic_rmw_loop_give_up(goto done); - } - }); - - if (unlikely(_dq_state_is_suspended(old_state))) { -#if DISPATCH_SIZEOF_PTR == 4 - old_state >>= 32; -#endif - DISPATCH_CLIENT_CRASH(old_state, "Waking up an inactive workloop"); - } - if (likely((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED)) { - return _dispatch_queue_push_queue(dwl->do_targetq, dwl, new_state); - } -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (likely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - return _dispatch_queue_wakeup_with_override(dwl, new_state, flags); - } -#endif // HAVE_PTHREAD_WORKQUEUE_QOS -done: - return _dispatch_release_2_tailcall(dwl); -} - -DISPATCH_NOINLINE -static void -_dispatch_workloop_push_waiter(dispatch_workloop_t dwl, - dispatch_sync_context_t dsc, dispatch_qos_t qos) -{ - struct dispatch_object_s *prev, *dc = (struct dispatch_object_s *)dsc; - - dispatch_priority_t p = _dispatch_priority_from_pp(dsc->dc_priority); - if (qos < _dispatch_priority_qos(p)) { - qos = _dispatch_priority_qos(p); - } - if (qos == DISPATCH_QOS_UNSPECIFIED) { - qos = DISPATCH_QOS_DEFAULT; - } - - prev = _dispatch_workloop_push_update_tail(dwl, qos, dc); - _dispatch_workloop_push_update_prev(dwl, qos, prev, dc); - if (likely(!os_mpsc_push_was_empty(prev))) return; - - uint64_t set_owner_and_set_full_width_and_in_barrier = - _dispatch_lock_value_for_self() | - DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; - uint64_t old_state, new_state; - - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { - new_state = _dq_state_merge_qos(old_state, qos); - new_state |= DISPATCH_QUEUE_DIRTY; - if (unlikely(_dq_state_drain_locked(old_state))) { - // not runnable, so we should just handle overrides - } else if (_dq_state_is_enqueued(old_state)) { - // 32123779 let the event thread redrive since it's out already - } else { - // see _dispatch_queue_drain_try_lock - new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; - new_state |= set_owner_and_set_full_width_and_in_barrier; - } - }); - - if ((dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) && - _dispatch_async_and_wait_should_always_async(dwl, new_state)) { - dsc->dc_other = dwl; - dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; - } - - dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); - - if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { - return _dispatch_workloop_barrier_complete(dwl, qos, 0); - } -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dwl, new_state, 0); - } - } -#endif // HAVE_PTHREAD_WORKQUEUE_QOS -} - -void -_dispatch_workloop_push(dispatch_workloop_t dwl, dispatch_object_t dou, - dispatch_qos_t qos) -{ - struct dispatch_object_s *prev; - - if (unlikely(_dispatch_object_is_waiter(dou))) { - return _dispatch_workloop_push_waiter(dwl, dou._dsc, qos); - } - - if (qos < _dispatch_priority_qos(dwl->dq_priority)) { - qos = _dispatch_priority_qos(dwl->dq_priority); - } - if (qos == DISPATCH_QOS_UNSPECIFIED) { - qos = _dispatch_priority_fallback_qos(dwl->dq_priority); - } - prev = _dispatch_workloop_push_update_tail(dwl, qos, dou._do); - if (unlikely(os_mpsc_push_was_empty(prev))) { - _dispatch_retain_2_unsafe(dwl); - } - _dispatch_workloop_push_update_prev(dwl, qos, prev, dou._do); - if (unlikely(os_mpsc_push_was_empty(prev))) { - return _dispatch_workloop_wakeup(dwl, qos, DISPATCH_WAKEUP_CONSUME_2 | - DISPATCH_WAKEUP_MAKE_DIRTY); - } -} - -#pragma mark - -#pragma mark dispatch queue/lane push & wakeup - -#if HAVE_PTHREAD_WORKQUEUE_QOS -static void -_dispatch_queue_override_invoke(dispatch_continuation_t dc, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) -{ - dispatch_queue_t old_rq = _dispatch_queue_get_current(); - dispatch_queue_global_t assumed_rq = dc->dc_other; - dispatch_priority_t old_dp; - dispatch_object_t dou; - uintptr_t dc_flags = DC_FLAG_CONSUME; - - dou._do = dc->dc_data; - old_dp = _dispatch_root_queue_identity_assume(assumed_rq); - if (dc_type(dc) == DISPATCH_CONTINUATION_TYPE(OVERRIDE_STEALING)) { - flags |= DISPATCH_INVOKE_STEALING; - dc_flags |= DC_FLAG_NO_INTROSPECTION; - } - _dispatch_continuation_pop_forwarded(dc, dc_flags, assumed_rq, { - if (_dispatch_object_has_vtable(dou._do)) { - dx_invoke(dou._dq, dic, flags); - } else { - _dispatch_continuation_invoke_inline(dou, flags, assumed_rq); - } - }); - _dispatch_reset_basepri(old_dp); - _dispatch_queue_set_current(old_rq); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_root_queue_push_needs_override(dispatch_queue_global_t rq, - dispatch_qos_t qos) -{ - dispatch_qos_t fallback = _dispatch_priority_fallback_qos(rq->dq_priority); - if (fallback) { - return qos && qos != fallback; - } - - dispatch_qos_t rqos = _dispatch_priority_qos(rq->dq_priority); - return rqos && qos > rqos; -} - -DISPATCH_NOINLINE -static void -_dispatch_root_queue_push_override(dispatch_queue_global_t orig_rq, - dispatch_object_t dou, dispatch_qos_t qos) -{ - bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit); - dispatch_continuation_t dc = dou._dc; - - if (_dispatch_object_is_redirection(dc)) { - // no double-wrap is needed, _dispatch_async_redirect_invoke will do - // the right thing - dc->dc_func = (void *)orig_rq; - } else { - dc = _dispatch_continuation_alloc(); - dc->do_vtable = DC_VTABLE(OVERRIDE_OWNING); - dc->dc_ctxt = dc; - dc->dc_other = orig_rq; - dc->dc_data = dou._do; - dc->dc_priority = DISPATCH_NO_PRIORITY; - dc->dc_voucher = DISPATCH_NO_VOUCHER; - } - _dispatch_root_queue_push_inline(rq, dc, dc, 1); -} - -DISPATCH_NOINLINE -static void -_dispatch_root_queue_push_override_stealer(dispatch_queue_global_t orig_rq, - dispatch_queue_t dq, dispatch_qos_t qos) -{ - bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit); - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - - dc->do_vtable = DC_VTABLE(OVERRIDE_STEALING); - _dispatch_retain_2(dq); - dc->dc_func = NULL; - dc->dc_ctxt = dc; - dc->dc_other = orig_rq; - dc->dc_data = dq; - dc->dc_priority = DISPATCH_NO_PRIORITY; - dc->dc_voucher = DISPATCH_NO_VOUCHER; - _dispatch_root_queue_push_inline(rq, dc, dc, 1); -} - -DISPATCH_NOINLINE -static void -_dispatch_queue_wakeup_with_override_slow(dispatch_queue_t dq, - uint64_t dq_state, dispatch_wakeup_flags_t flags) -{ - dispatch_qos_t oqos, qos = _dq_state_max_qos(dq_state); - dispatch_queue_t tq = dq->do_targetq; - mach_port_t owner; - bool locked; - - if (_dq_state_is_base_anon(dq_state)) { - if (!_dispatch_is_in_root_queues_array(tq)) { - // Do not try to override pthread root - // queues, it isn't supported and can cause things to run - // on the wrong hierarchy if we enqueue a stealer by accident - goto out; - } else if ((owner = _dq_state_drain_owner(dq_state))) { - (void)_dispatch_wqthread_override_start_check_owner(owner, qos, - &dq->dq_state_lock); - goto out; - } - - // avoid locking when we recognize the target queue as a global root - // queue it is gross, but is a very common case. The locking isn't - // needed because these target queues cannot go away. - locked = false; - } else if (likely(!_dispatch_queue_is_mutable(dq))) { - locked = false; - } else if (_dispatch_queue_sidelock_trylock(upcast(dq)._dl, qos)) { - // to traverse the tq chain safely we must - // lock it to ensure it cannot change - locked = true; - tq = dq->do_targetq; - _dispatch_ktrace1(DISPATCH_PERF_mutable_target, dq); - } else { - // - // Leading to being there, the current thread has: - // 1. enqueued an object on `dq` - // 2. raised the max_qos value, set RECEIVED_OVERRIDE on `dq` - // and didn't see an owner - // 3. tried and failed to acquire the side lock - // - // The side lock owner can only be one of three things: - // - // - The suspend/resume side count code. Besides being unlikely, - // it means that at this moment the queue is actually suspended, - // which transfers the responsibility of applying the override to - // the eventual dispatch_resume(). - // - // - A dispatch_set_target_queue() call. The fact that we saw no `owner` - // means that the trysync it does wasn't being drained when (2) - // happened which can only be explained by one of these interleavings: - // - // o `dq` became idle between when the object queued in (1) ran and - // the set_target_queue call and we were unlucky enough that our - // step (2) happened while this queue was idle. There is no reason - // to override anything anymore, the queue drained to completion - // while we were preempted, our job is done. - // - // o `dq` is queued but not draining during (1-2), then when we try - // to lock at (3) the queue is now draining a set_target_queue. - // This drainer must have seen the effects of (2) and that guy has - // applied our override. Our job is done. - // - // - Another instance of _dispatch_queue_wakeup_with_override_slow(), - // which is fine because trylock leaves a hint that we failed our - // trylock, causing the tryunlock below to fail and reassess whether - // a better override needs to be applied. - // - _dispatch_ktrace1(DISPATCH_PERF_mutable_target, dq); - goto out; - } - -apply_again: - if (dx_hastypeflag(tq, QUEUE_ROOT)) { - dispatch_queue_global_t rq = upcast(tq)._dgq; - if (qos > _dispatch_priority_qos(rq->dq_priority)) { - _dispatch_root_queue_push_override_stealer(rq, dq, qos); - } - } else if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { - _dispatch_workloop_push_stealer(upcast(tq)._dwl, dq, qos); - } else if (_dispatch_queue_need_override(tq, qos)) { - dx_wakeup(tq, qos, 0); - } - if (likely(!locked)) { - goto out; - } - while (unlikely(!_dispatch_queue_sidelock_tryunlock(upcast(dq)._dl))) { - // rdar://problem/24081326 - // - // Another instance of _dispatch_queue_wakeup_with_override() tried - // to acquire the side lock while we were running, and could have - // had a better override than ours to apply. - // - oqos = _dispatch_queue_max_qos(dq); - if (oqos > qos) { - qos = oqos; - // The other instance had a better priority than ours, override - // our thread, and apply the override that wasn't applied to `dq` - // because of us. - goto apply_again; - } - } - -out: - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - return _dispatch_release_2_tailcall(dq); - } -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_wakeup_with_override(dispatch_queue_class_t dq, - uint64_t dq_state, dispatch_wakeup_flags_t flags) -{ - dispatch_assert(_dq_state_should_override(dq_state)); - -#if DISPATCH_USE_KEVENT_WORKLOOP - if (likely(_dq_state_is_base_wlh(dq_state))) { - _dispatch_trace_runtime_event(worker_request, dq._dq, 1); - return _dispatch_event_loop_poke((dispatch_wlh_t)dq._dq, dq_state, - flags | DISPATCH_EVENT_LOOP_OVERRIDE); - } -#endif // DISPATCH_USE_KEVENT_WORKLOOP - return _dispatch_queue_wakeup_with_override_slow(dq._dq, dq_state, flags); -} -#endif // HAVE_PTHREAD_WORKQUEUE_QOS - -DISPATCH_NOINLINE -void -_dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) -{ - dispatch_queue_t dq = dqu._dq; - dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT); - - if (target && !(flags & DISPATCH_WAKEUP_CONSUME_2)) { - _dispatch_retain_2(dq); - flags |= DISPATCH_WAKEUP_CONSUME_2; - } - - if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { - // - // _dispatch_lane_class_barrier_complete() is about what both regular - // queues and sources needs to evaluate, but the former can have sync - // handoffs to perform which _dispatch_lane_class_barrier_complete() - // doesn't handle, only _dispatch_lane_barrier_complete() does. - // - // _dispatch_lane_wakeup() is the one for plain queues that calls - // _dispatch_lane_barrier_complete(), and this is only taken for non - // queue types. - // - dispatch_assert(dx_metatype(dq) == _DISPATCH_SOURCE_TYPE); - qos = _dispatch_queue_wakeup_qos(dq, qos); - return _dispatch_lane_class_barrier_complete(upcast(dq)._dl, qos, - flags, target, DISPATCH_QUEUE_SERIAL_DRAIN_OWNED); - } - - if (target) { - uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED; - if (target == DISPATCH_QUEUE_WAKEUP_MGR) { - enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; - } - qos = _dispatch_queue_wakeup_qos(dq, qos); - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = _dq_state_merge_qos(old_state, qos); - if (flags & DISPATCH_WAKEUP_CLEAR_ACTIVATING) { - // When an event is being delivered to a source because its - // unote was being registered before the ACTIVATING state - // had a chance to be cleared, we don't want to fail the wakeup - // which could lead to a priority inversion. - // - // Instead, these wakeups are allowed to finish the pending - // activation. - if (_dq_state_is_activating(old_state)) { - new_state &= ~DISPATCH_QUEUE_ACTIVATING; - } - } - if (likely(!_dq_state_is_suspended(new_state) && - !_dq_state_is_enqueued(old_state) && - (!_dq_state_drain_locked(old_state) || - (enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR && - _dq_state_is_base_wlh(old_state))))) { - new_state |= enqueue; - } - if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { - new_state |= DISPATCH_QUEUE_DIRTY; - } else if (new_state == old_state) { - os_atomic_rmw_loop_give_up(goto done); - } - }); - - if (likely((old_state ^ new_state) & enqueue)) { - dispatch_queue_t tq; - if (target == DISPATCH_QUEUE_WAKEUP_TARGET) { - // the rmw_loop above has no acquire barrier, as the last block - // of a queue asyncing to that queue is not an uncommon pattern - // and in that case the acquire would be completely useless - // - // so instead use depdendency ordering to read - // the targetq pointer. - os_atomic_thread_fence(dependency); - tq = os_atomic_load_with_dependency_on2o(dq, do_targetq, - (long)new_state); - } else { - tq = target; - } - dispatch_assert(_dq_state_is_enqueued(new_state)); - return _dispatch_queue_push_queue(tq, dq, new_state); - } -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dq, new_state, - flags); - } - } - } else if (qos) { - // - // Someone is trying to override the last work item of the queue. - // - uint64_t old_state, new_state; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - // Avoid spurious override if the item was drained before we could - // apply an override - if (!_dq_state_drain_locked(old_state) && - !_dq_state_is_enqueued(old_state)) { - os_atomic_rmw_loop_give_up(goto done); - } - new_state = _dq_state_merge_qos(old_state, qos); - if (new_state == old_state) { - os_atomic_rmw_loop_give_up(goto done); - } - }); - if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dq, new_state, flags); - } -#endif // HAVE_PTHREAD_WORKQUEUE_QOS - } -done: - if (likely(flags & DISPATCH_WAKEUP_CONSUME_2)) { - return _dispatch_release_2_tailcall(dq); - } -} - -DISPATCH_NOINLINE -void -_dispatch_lane_wakeup(dispatch_lane_class_t dqu, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) -{ - dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; - - if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { - return _dispatch_lane_barrier_complete(dqu, qos, flags); - } - if (_dispatch_queue_class_probe(dqu)) { - target = DISPATCH_QUEUE_WAKEUP_TARGET; - } - return _dispatch_queue_wakeup(dqu, qos, flags, target); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_lane_push_waiter_should_wakeup(dispatch_lane_t dq, - dispatch_sync_context_t dsc) -{ - if (_dispatch_queue_is_thread_bound(dq)) { - return true; - } - if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - return _dispatch_async_and_wait_should_always_async(dq, dq_state); - } - return false; -} - -DISPATCH_NOINLINE -static void -_dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, - dispatch_qos_t qos) -{ - uint64_t old_state, new_state; - - if (dsc->dc_data != DISPATCH_WLH_ANON) { - // The kernel will handle all the overrides / priorities on our behalf. - qos = 0; - } - - if (unlikely(_dispatch_queue_push_item(dq, dsc))) { - if (unlikely(_dispatch_lane_push_waiter_should_wakeup(dq, dsc))) { - // If this returns true, we know that we are pushing onto the base - // queue - dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; - dsc->dc_other = dq; - return dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); - } - - uint64_t pending_barrier_width = - (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; - uint64_t set_owner_and_set_full_width_and_in_barrier = - _dispatch_lock_value_for_self() | - DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = _dq_state_merge_qos(old_state, qos); - new_state |= DISPATCH_QUEUE_DIRTY; - if (unlikely(_dq_state_drain_locked(old_state) || - !_dq_state_is_runnable(old_state))) { - // not runnable, so we should just handle overrides - } else if (_dq_state_is_base_wlh(old_state) && - _dq_state_is_enqueued(old_state)) { - // 32123779 let the event thread redrive since it's out already - } else if (_dq_state_has_pending_barrier(old_state) || - new_state + pending_barrier_width < - DISPATCH_QUEUE_WIDTH_FULL_BIT) { - // see _dispatch_queue_drain_try_lock - new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; - new_state |= set_owner_and_set_full_width_and_in_barrier; - } - }); - - if (_dq_state_is_base_wlh(old_state)) { - dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); - } - - if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { - return _dispatch_lane_barrier_complete(dq, qos, 0); - } -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dq, new_state, 0); - } - } - } else if (unlikely(qos)) { - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - new_state = _dq_state_merge_qos(old_state, qos); - if (old_state == new_state) { - os_atomic_rmw_loop_give_up(return); - } - }); - if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dq, new_state, 0); - } -#endif // HAVE_PTHREAD_WORKQUEUE_QOS - } -} - -DISPATCH_NOINLINE -void -_dispatch_lane_push(dispatch_lane_t dq, dispatch_object_t dou, - dispatch_qos_t qos) -{ - dispatch_wakeup_flags_t flags = 0; - struct dispatch_object_s *prev; - - if (unlikely(_dispatch_object_is_waiter(dou))) { - return _dispatch_lane_push_waiter(dq, dou._dsc, qos); - } - - dispatch_assert(!_dispatch_object_is_global(dq)); - qos = _dispatch_queue_push_qos(dq, qos); - - // If we are going to call dx_wakeup(), the queue must be retained before - // the item we're pushing can be dequeued, which means: - // - before we exchange the tail if we have to override - // - before we set the head if we made the queue non empty. - // Otherwise, if preempted between one of these and the call to dx_wakeup() - // the blocks submitted to the queue may release the last reference to the - // queue when invoked by _dispatch_lane_drain. - - prev = os_mpsc_push_update_tail(os_mpsc(dq, dq_items), dou._do, do_next); - if (unlikely(os_mpsc_push_was_empty(prev))) { - _dispatch_retain_2_unsafe(dq); - flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY; - } else if (unlikely(_dispatch_queue_need_override(dq, qos))) { - // There's a race here, _dispatch_queue_need_override may read a stale - // dq_state value. - // - // If it's a stale load from the same drain streak, given that - // the max qos is monotonic, too old a read can only cause an - // unnecessary attempt at overriding which is harmless. - // - // We'll assume here that a stale load from an a previous drain streak - // never happens in practice. - _dispatch_retain_2_unsafe(dq); - flags = DISPATCH_WAKEUP_CONSUME_2; - } - os_mpsc_push_update_prev(os_mpsc(dq, dq_items), prev, dou._do, do_next); - if (flags) { - return dx_wakeup(dq, qos, flags); - } -} - -DISPATCH_NOINLINE -void -_dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, - dispatch_qos_t qos) -{ - // reserving non barrier width - // doesn't fail if only the ENQUEUED bit is set (unlike its barrier - // width equivalent), so we have to check that this thread hasn't - // enqueued anything ahead of this call or we can break ordering - if (dq->dq_items_tail == NULL && - !_dispatch_object_is_waiter(dou) && - !_dispatch_object_is_barrier(dou) && - _dispatch_queue_try_acquire_async(dq)) { - return _dispatch_continuation_redirect_push(dq, dou, qos); - } - - _dispatch_lane_push(dq, dou, qos); -} - -#pragma mark - -#pragma mark dispatch_channel_t - -void -_dispatch_channel_dispose(dispatch_channel_t dch, bool *allow_free) -{ - dch->dch_callbacks = NULL; - _dispatch_lane_class_dispose(dch, allow_free); -} - -void -_dispatch_channel_xref_dispose(dispatch_channel_t dch) -{ - dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dch->_as_dq); - if (callbacks->dcc_acknowledge_cancel && !(dqf & DSF_CANCELED)) { - DISPATCH_CLIENT_CRASH(dch, "Release of a channel that has not been " - "cancelled, but has a cancel acknowledgement callback"); - } - dx_wakeup(dch, 0, DISPATCH_WAKEUP_MAKE_DIRTY); -} - -typedef struct dispatch_channel_invoke_ctxt_s { - dispatch_channel_t dcic_dch; - dispatch_thread_frame_s dcic_dtf; - dispatch_invoke_context_t dcic_dic; - dispatch_invoke_flags_t dcic_flags; - dispatch_queue_wakeup_target_t dcic_tq; - struct dispatch_object_s *dcic_next_dc; - bool dcic_called_drain; -} dispatch_channel_invoke_ctxt_s; - -static bool -_dispatch_channel_invoke_cancel_check(dispatch_channel_t dch, - dispatch_channel_invoke_ctxt_t ctxt, - dispatch_channel_callbacks_t callbacks) -{ - bool rc = true; - if (!dch->dm_cancel_handler_called) { - if (_dispatch_queue_atomic_flags(dch) & DSF_CANCELED) { - dispatch_invoke_with_autoreleasepool(ctxt->dcic_flags, { - rc = callbacks->dcc_acknowledge_cancel(dch, dch->do_ctxt); - }); - if (rc) { - dch->dm_cancel_handler_called = true; - _dispatch_release_no_dispose(dch); - } else { - ctxt->dcic_tq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; - } - } - } - return rc; -} - -static bool -_dispatch_channel_invoke_checks(dispatch_channel_t dch, - dispatch_channel_invoke_ctxt_t dcic, - dispatch_channel_callbacks_t callbacks) -{ - if (!_dispatch_channel_invoke_cancel_check(dch, dcic, callbacks)) { - return false; - } - if (unlikely(_dispatch_needs_to_return_to_kernel())) { - _dispatch_return_to_kernel(); - } - if (likely(dcic->dcic_flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { - dispatch_workloop_t dwl = (dispatch_workloop_t)_dispatch_get_wlh(); - if (unlikely(_dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos)) { - dcic->dcic_tq = dch->do_targetq; - return false; - } - } - if (unlikely(_dispatch_queue_drain_should_narrow(dcic->dcic_dic))) { - dcic->dcic_tq = dch->do_targetq; - return false; - } - uint64_t dq_state = os_atomic_load(&dch->dq_state, relaxed); - if (unlikely(_dq_state_is_suspended(dq_state))) { - dcic->dcic_tq = dch->do_targetq; - return false; - } - return true; -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_wakeup_target_t -_dispatch_channel_invoke2(dispatch_channel_t dch, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags, uint64_t *owned DISPATCH_UNUSED) -{ - dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; - dispatch_channel_invoke_ctxt_s dcic = { - .dcic_dch = dch, - .dcic_dic = dic, - .dcic_flags = flags & - ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN, - .dcic_tq = DISPATCH_QUEUE_WAKEUP_NONE, - }; - - _dispatch_thread_frame_push(&dcic.dcic_dtf, dch); - - if (!_dispatch_channel_invoke_cancel_check(dch, &dcic, callbacks)) { - goto out; - } - - do { - struct dispatch_object_s *dc = dcic.dcic_next_dc; - - if (unlikely(!dc)) { - if (!dch->dq_items_tail) { - break; - } - dc = _dispatch_queue_get_head(dch); - } - - if (unlikely(_dispatch_object_is_sync_waiter(dc))) { - DISPATCH_CLIENT_CRASH(0, "sync waiter found on channel"); - } - - if (_dispatch_object_is_channel_item(dc)) { - dcic.dcic_next_dc = dc; - dcic.dcic_called_drain = false; - dispatch_invoke_with_autoreleasepool(dcic.dcic_flags, { - if (callbacks->dcc_invoke(dch, &dcic, dch->do_ctxt)) { - if (unlikely(!dcic.dcic_called_drain)) { - DISPATCH_CLIENT_CRASH(0, "Channel didn't call " - "dispatch_channel_drain"); - } - } else { - dcic.dcic_tq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; - } - }); - } else { - dcic.dcic_next_dc = _dispatch_queue_pop_head(dch, dc); - _dispatch_continuation_pop_inline(dc, dic, flags, dch); - if (!_dispatch_channel_invoke_checks(dch, &dcic, callbacks)) { - break; - } - } - } while (dcic.dcic_tq == DISPATCH_QUEUE_WAKEUP_NONE); - -out: - _dispatch_thread_frame_pop(&dcic.dcic_dtf); - return dcic.dcic_tq; -} - -void -_dispatch_channel_invoke(dispatch_channel_t dch, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) -{ - _dispatch_queue_class_invoke(dch, dic, flags, - DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS, _dispatch_channel_invoke2); -} - -void -dispatch_channel_foreach_work_item_peek_f( - dispatch_channel_invoke_ctxt_t dcic, - void *ctxt, dispatch_channel_enumerator_handler_t f) -{ - if (dcic->dcic_called_drain) { - DISPATCH_CLIENT_CRASH(0, "Called peek after drain"); - } - - dispatch_channel_t dch = dcic->dcic_dch; - struct dispatch_object_s *dc = dcic->dcic_next_dc; - - for (;;) { - dispatch_continuation_t dci = (dispatch_continuation_t)dc; - if (!_dispatch_object_is_channel_item(dc)) { - break; - } - if (!f(ctxt, dci->dc_ctxt)) { - break; - } - if (dc == dch->dq_items_tail) { - break; - } - dc = os_mpsc_get_next(dc, do_next); - } -} - -void -dispatch_channel_drain_f(dispatch_channel_invoke_ctxt_t dcic, - void *_Nullable ctxt, dispatch_channel_drain_handler_t f) -{ - dispatch_channel_t dch = dcic->dcic_dch; - dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; - struct dispatch_object_s *dc; - uintptr_t dcf = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; - void *unpop_item = NULL; - bool stop_invoke = false; - - if (dcic->dcic_called_drain) { - DISPATCH_CLIENT_CRASH(0, "Called drain twice in the same invoke"); - } - dcic->dcic_called_drain = true; - - do { - dc = dcic->dcic_next_dc; - if (unlikely(!dc)) { - if (!dch->dq_items_tail) { - break; - } - dc = _dispatch_queue_get_head(dch); - } - if (!_dispatch_object_is_channel_item(dc)) { - break; - } - - dcic->dcic_next_dc = _dispatch_queue_pop_head(dch, dc); - - _dispatch_continuation_pop_forwarded(upcast(dc)._dc, dcf, dch, { - dispatch_invoke_with_autoreleasepool(dcic->dcic_flags, { - stop_invoke = !f(ctxt, upcast(dc)._dc->dc_ctxt, &unpop_item); - }); - }); - if (unlikely(stop_invoke)) { - break; - } - } while (_dispatch_channel_invoke_checks(dch, dcic, callbacks)); - - if (unlikely(unpop_item)) { - dispatch_continuation_t dci = _dispatch_continuation_alloc(); - _dispatch_continuation_init_f(dci, dch, unpop_item, NULL, 0, dcf); - os_mpsc_undo_pop_head(os_mpsc(dch, dq_items), upcast(dci)._do, - dcic->dcic_next_dc, do_next); - dcic->dcic_next_dc = upcast(dci)._do; - } -} - -#ifdef __BLOCKS__ -void -dispatch_channel_foreach_work_item_peek( - dispatch_channel_invoke_ctxt_t dcic, - dispatch_channel_enumerator_block_t block) -{ - dispatch_channel_enumerator_handler_t f; - f = (dispatch_channel_enumerator_handler_t)_dispatch_Block_invoke(block); - dispatch_channel_foreach_work_item_peek_f(dcic, block, f); -} - -void -dispatch_channel_drain(dispatch_channel_invoke_ctxt_t dcic, - dispatch_channel_drain_block_t block) -{ - dispatch_channel_drain_handler_t f; - f = (dispatch_channel_drain_handler_t)_dispatch_Block_invoke(block); - dispatch_channel_drain_f(dcic, block, f); -} -#endif // __BLOCKS__ - -DISPATCH_NOINLINE -void -_dispatch_channel_wakeup(dispatch_channel_t dch, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) -{ - dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; - dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; - dispatch_queue_t dq = dch->_as_dq; - - if (unlikely(!callbacks->dcc_probe(dch, dch->do_ctxt))) { - target = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; - } else if (_dispatch_queue_class_probe(dch)) { - target = DISPATCH_QUEUE_WAKEUP_TARGET; - } else if (_dispatch_queue_atomic_flags(dq) & DSF_CANCELED) { - if (!dch->dm_cancel_handler_called) { - target = DISPATCH_QUEUE_WAKEUP_TARGET; - } - } - - return _dispatch_queue_wakeup(dch, qos, flags, target); -} - -size_t -_dispatch_channel_debug(dispatch_channel_t dch, char *buf, size_t bufsiz) -{ - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dch); - size_t offset = 0; - - offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - _dispatch_object_class_name(dch), dch); - offset += _dispatch_object_debug_attr(dch, &buf[offset], bufsiz - offset); - offset += _dispatch_queue_debug_attr(dch->_as_dq, &buf[offset], bufsiz - offset); - offset += dsnprintf(buf, bufsiz, "%s%s%s", - (dqf & DSF_CANCELED) ? "cancelled, " : "", - (dqf & DSF_NEEDS_EVENT) ? "needs-event, " : "", - (dqf & DSF_DELETED) ? "deleted, " : ""); - - return offset; -} - -dispatch_channel_t -dispatch_channel_create(const char *label, dispatch_queue_t tq, - void *ctxt, dispatch_channel_callbacks_t callbacks) -{ - dispatch_channel_t dch; - dispatch_queue_flags_t dqf = DSF_STRICT; - - if (callbacks->dcc_version < 1) { - DISPATCH_CLIENT_CRASH(callbacks->dcc_version, - "Unsupported callbacks version"); - } - - if (label) { - const char *tmp = _dispatch_strdup_if_mutable(label); - if (tmp != label) { - dqf |= DQF_LABEL_NEEDS_FREE; - label = tmp; - } - } - - if (unlikely(!tq)) { - tq = _dispatch_get_default_queue(true); - } else { - _dispatch_retain((dispatch_queue_t _Nonnull)tq); - } - - dch = _dispatch_queue_alloc(channel, dqf, 1, - DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER)._dch; - dch->dq_label = label; - dch->do_targetq = tq; - dch->dch_callbacks = callbacks; - dch->do_ctxt = ctxt; - if (!callbacks->dcc_acknowledge_cancel) { - dch->dm_cancel_handler_called = true; - dch->do_ref_cnt--; - } - return dch; -} - -DISPATCH_NOINLINE -static void -_dispatch_channel_enqueue_slow(dispatch_channel_t dch, void *ctxt) -{ - uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; - dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); - dispatch_qos_t qos; - - qos = _dispatch_continuation_init_f(dc, dch, ctxt, NULL, 0, dc_flags); - _dispatch_continuation_async(dch, dc, qos, dc->dc_flags); -} - -DISPATCH_NOINLINE -void -dispatch_channel_enqueue(dispatch_channel_t dch, void *ctxt) -{ - uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; - dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); - dispatch_qos_t qos; - - if (unlikely(!dc)) { - return _dispatch_channel_enqueue_slow(dch, ctxt); - } - qos = _dispatch_continuation_init_f(dc, dch, ctxt, NULL, 0, dc_flags); - _dispatch_continuation_async(dch, dc, qos, dc->dc_flags); -} - -#ifndef __APPLE__ -#if __BLOCKS__ -void typeof(dispatch_channel_async) dispatch_channel_async - __attribute__((__alias__("dispatch_async"))); -#endif - -void typeof(dispatch_channel_async_f) dispatch_channel_async_f - __attribute__((__alias__("dispatch_async_f"))); -#endif - -void -dispatch_channel_wakeup(dispatch_channel_t dch, qos_class_t qos_class) -{ - dispatch_qos_t oqos = _dispatch_qos_from_qos_class(qos_class); - dx_wakeup(dch, oqos, DISPATCH_WAKEUP_MAKE_DIRTY); -} - -#pragma mark - -#pragma mark dispatch_mgr_queue - -#if DISPATCH_USE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE -struct _dispatch_mgr_sched_s { - volatile int prio; - volatile qos_class_t qos; - int default_prio; - int policy; -#if defined(_WIN32) - HANDLE hThread; -#else - pthread_t tid; -#endif -}; - -DISPATCH_STATIC_GLOBAL(struct _dispatch_mgr_sched_s _dispatch_mgr_sched); -DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mgr_sched_pred); - -#if HAVE_PTHREAD_WORKQUEUE_QOS -// TODO: switch to "event-reflector thread" property -// Must be kept in sync with list of qos classes in sys/qos.h -static int -_dispatch_mgr_sched_qos2prio(qos_class_t qos) -{ - if (qos == QOS_CLASS_MAINTENANCE) return 4; - switch (qos) { - case QOS_CLASS_BACKGROUND: return 4; - case QOS_CLASS_UTILITY: return 20; - case QOS_CLASS_DEFAULT: return 31; - case QOS_CLASS_USER_INITIATED: return 37; - case QOS_CLASS_USER_INTERACTIVE: return 47; - default: return 0; - } -} -#endif // HAVE_PTHREAD_WORKQUEUE_QOS - -static void -_dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) -{ -#if !defined(_WIN32) - struct sched_param param; -#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES - dispatch_pthread_root_queue_context_t pqc = _dispatch_mgr_root_queue.do_ctxt; - pthread_attr_t *attr = &pqc->dpq_thread_attr; -#else - pthread_attr_t a, *attr = &a; -#endif - (void)dispatch_assume_zero(pthread_attr_init(attr)); - (void)dispatch_assume_zero(pthread_attr_getschedpolicy(attr, - &_dispatch_mgr_sched.policy)); - (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); -#if HAVE_PTHREAD_WORKQUEUE_QOS - qos_class_t qos = qos_class_main(); - if (qos == QOS_CLASS_DEFAULT) { - qos = QOS_CLASS_USER_INITIATED; // rdar://problem/17279292 - } - if (qos) { - _dispatch_mgr_sched.qos = qos; - param.sched_priority = _dispatch_mgr_sched_qos2prio(qos); - } -#endif - _dispatch_mgr_sched.default_prio = param.sched_priority; -#else // defined(_WIN32) - _dispatch_mgr_sched.policy = 0; - _dispatch_mgr_sched.default_prio = THREAD_PRIORITY_NORMAL; -#endif // defined(_WIN32) - _dispatch_mgr_sched.prio = _dispatch_mgr_sched.default_prio; -} -#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE - -#if DISPATCH_USE_PTHREAD_ROOT_QUEUES -#if DISPATCH_USE_MGR_THREAD -#if !defined(_WIN32) -DISPATCH_NOINLINE -static pthread_t * -_dispatch_mgr_root_queue_init(void) -{ - dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); - dispatch_pthread_root_queue_context_t pqc = _dispatch_mgr_root_queue.do_ctxt; - pthread_attr_t *attr = &pqc->dpq_thread_attr; - struct sched_param param; - (void)dispatch_assume_zero(pthread_attr_setdetachstate(attr, - PTHREAD_CREATE_DETACHED)); -#if !DISPATCH_DEBUG - (void)dispatch_assume_zero(pthread_attr_setstacksize(attr, 64 * 1024)); -#endif -#if HAVE_PTHREAD_WORKQUEUE_QOS - qos_class_t qos = _dispatch_mgr_sched.qos; - if (qos) { - if (_dispatch_set_qos_class_enabled) { - (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, - qos, 0)); - } - } -#endif - param.sched_priority = _dispatch_mgr_sched.prio; - if (param.sched_priority > _dispatch_mgr_sched.default_prio) { - (void)dispatch_assume_zero(pthread_attr_setschedparam(attr, ¶m)); - } - return &_dispatch_mgr_sched.tid; -} -#else // defined(_WIN32) -DISPATCH_NOINLINE -static PHANDLE -_dispatch_mgr_root_queue_init(void) -{ - dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); - return &_dispatch_mgr_sched.hThread; -} -#endif // defined(_WIN32) - -static inline void -_dispatch_mgr_priority_apply(void) -{ -#if !defined(_WIN32) - struct sched_param param; - do { - param.sched_priority = _dispatch_mgr_sched.prio; - if (param.sched_priority > _dispatch_mgr_sched.default_prio) { - (void)dispatch_assume_zero(pthread_setschedparam( - _dispatch_mgr_sched.tid, _dispatch_mgr_sched.policy, - ¶m)); - } - } while (_dispatch_mgr_sched.prio > param.sched_priority); -#else // defined(_WIN32) - int nPriority = _dispatch_mgr_sched.prio; - do { - if (nPriority > _dispatch_mgr_sched.default_prio) { - // TODO(compnerd) set thread scheduling policy - dispatch_assume_zero(SetThreadPriority(_dispatch_mgr_sched.hThread, nPriority)); - nPriority = GetThreadPriority(_dispatch_mgr_sched.hThread); - } - } while (_dispatch_mgr_sched.prio > nPriority); -#endif // defined(_WIN32) -} - -DISPATCH_NOINLINE -static void -_dispatch_mgr_priority_init(void) -{ -#if !defined(_WIN32) - dispatch_pthread_root_queue_context_t pqc = _dispatch_mgr_root_queue.do_ctxt; - pthread_attr_t *attr = &pqc->dpq_thread_attr; - struct sched_param param; - (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); -#if HAVE_PTHREAD_WORKQUEUE_QOS - qos_class_t qos = 0; - (void)pthread_attr_get_qos_class_np(attr, &qos, NULL); - if (_dispatch_mgr_sched.qos > qos && _dispatch_set_qos_class_enabled) { - (void)pthread_set_qos_class_self_np(_dispatch_mgr_sched.qos, 0); - int p = _dispatch_mgr_sched_qos2prio(_dispatch_mgr_sched.qos); - if (p > param.sched_priority) { - param.sched_priority = p; - } - } -#endif - if (unlikely(_dispatch_mgr_sched.prio > param.sched_priority)) { - return _dispatch_mgr_priority_apply(); - } -#else // defined(_WIN32) - int nPriority = GetThreadPriority(_dispatch_mgr_sched.hThread); - if (slowpath(_dispatch_mgr_sched.prio > nPriority)) { - return _dispatch_mgr_priority_apply(); - } -#endif // defined(_WIN32) -} -#endif // DISPATCH_USE_MGR_THREAD - -#if !defined(_WIN32) -DISPATCH_NOINLINE -static void -_dispatch_mgr_priority_raise(const pthread_attr_t *attr) -{ - dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); - struct sched_param param; - (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); -#if HAVE_PTHREAD_WORKQUEUE_QOS - qos_class_t q, qos = 0; - (void)pthread_attr_get_qos_class_np((pthread_attr_t *)attr, &qos, NULL); - if (qos) { - param.sched_priority = _dispatch_mgr_sched_qos2prio(qos); - os_atomic_rmw_loop2o(&_dispatch_mgr_sched, qos, q, qos, relaxed, { - if (q >= qos) os_atomic_rmw_loop_give_up(break); - }); - } -#endif - int p, prio = param.sched_priority; - os_atomic_rmw_loop2o(&_dispatch_mgr_sched, prio, p, prio, relaxed, { - if (p >= prio) os_atomic_rmw_loop_give_up(return); - }); -#if DISPATCH_USE_KEVENT_WORKQUEUE - _dispatch_root_queues_init(); - if (_dispatch_kevent_workqueue_enabled) { - pthread_priority_t pp = 0; - if (prio > _dispatch_mgr_sched.default_prio) { - // The values of _PTHREAD_PRIORITY_SCHED_PRI_FLAG and - // _PTHREAD_PRIORITY_ROOTQUEUE_FLAG overlap, but that is not - // problematic in this case, since it the second one is only ever - // used on dq_priority fields. - // We never pass the _PTHREAD_PRIORITY_ROOTQUEUE_FLAG to a syscall, - // it is meaningful to libdispatch only. - pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; - } else if (qos) { - pp = _pthread_qos_class_encode(qos, 0, 0); - } - if (pp) { - int r = _pthread_workqueue_set_event_manager_priority(pp); - (void)dispatch_assume_zero(r); - } - return; - } -#endif -#if DISPATCH_USE_MGR_THREAD - if (_dispatch_mgr_sched.tid) { - return _dispatch_mgr_priority_apply(); - } -#endif -} -#endif // !defined(_WIN32) -#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_mgr_lock(struct dispatch_queue_static_s *dq) -{ - uint64_t old_state, new_state, set_owner_and_set_full_width = - _dispatch_lock_value_for_self() | DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; - - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { - new_state = old_state; - if (unlikely(!_dq_state_is_runnable(old_state) || - _dq_state_drain_locked(old_state))) { - DISPATCH_INTERNAL_CRASH((uintptr_t)old_state, - "Locking the manager should not fail"); - } - new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; - new_state |= set_owner_and_set_full_width; - }); -} - -#if DISPATCH_USE_KEVENT_WORKQUEUE -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_mgr_unlock(struct dispatch_queue_static_s *dq) -{ - uint64_t old_state, new_state; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - }); - return _dq_state_is_dirty(old_state); -} -#endif // DISPATCH_USE_KEVENT_WORKQUEUE - -static void -_dispatch_mgr_queue_drain(void) -{ - const dispatch_invoke_flags_t flags = DISPATCH_INVOKE_MANAGER_DRAIN; - dispatch_invoke_context_s dic = { }; - struct dispatch_queue_static_s *dq = &_dispatch_mgr_q; - uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; - - if (dq->dq_items_tail) { - _dispatch_perfmon_start(); - _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); - if (unlikely(_dispatch_lane_serial_drain(dq, &dic, flags, &owned))) { - DISPATCH_INTERNAL_CRASH(0, "Interrupted drain on manager queue"); - } - _dispatch_voucher_debug("mgr queue clear", NULL); - _voucher_clear(); - _dispatch_reset_basepri_override(); - _dispatch_perfmon_end(perfmon_thread_manager); - } - -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunreachable-code" -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (!_dispatch_kevent_workqueue_enabled) -#endif - { - _dispatch_force_cache_cleanup(); - } -#pragma clang diagnostic pop -} - -void -_dispatch_mgr_queue_push(dispatch_lane_t dq, dispatch_object_t dou, - DISPATCH_UNUSED dispatch_qos_t qos) -{ - uint64_t dq_state; - - if (unlikely(_dispatch_object_is_waiter(dou))) { - DISPATCH_CLIENT_CRASH(0, "Waiter pushed onto manager"); - } - - if (unlikely(_dispatch_queue_push_item(dq, dou))) { - dq_state = os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); - if (!_dq_state_drain_locked_by_self(dq_state)) { - _dispatch_trace_runtime_event(worker_request, &_dispatch_mgr_q, 1); - _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0); - } - } -} - -DISPATCH_NORETURN -void -_dispatch_mgr_queue_wakeup(DISPATCH_UNUSED dispatch_lane_t dq, - DISPATCH_UNUSED dispatch_qos_t qos, - DISPATCH_UNUSED dispatch_wakeup_flags_t flags) -{ - DISPATCH_INTERNAL_CRASH(0, "Don't try to wake up or override the manager"); -} - -#if DISPATCH_USE_MGR_THREAD -DISPATCH_NOINLINE DISPATCH_NORETURN -static void -_dispatch_mgr_invoke(void) -{ -#if DISPATCH_EVENT_BACKEND_KEVENT - dispatch_kevent_s evbuf[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT]; -#endif - dispatch_deferred_items_s ddi = { - .ddi_wlh = DISPATCH_WLH_ANON, -#if DISPATCH_EVENT_BACKEND_KEVENT - .ddi_maxevents = DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, - .ddi_eventlist = evbuf, -#endif - }; - - _dispatch_deferred_items_set(&ddi); - for (;;) { - bool poll = false; - _dispatch_mgr_queue_drain(); - _dispatch_event_loop_drain_anon_timers(); - poll = _dispatch_queue_class_probe(&_dispatch_mgr_q); - _dispatch_event_loop_drain(poll ? KEVENT_FLAG_IMMEDIATE : 0); - } -} - -DISPATCH_NORETURN -void -_dispatch_mgr_thread(dispatch_lane_t dq DISPATCH_UNUSED, - dispatch_invoke_context_t dic DISPATCH_UNUSED, - dispatch_invoke_flags_t flags DISPATCH_UNUSED) -{ -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (_dispatch_kevent_workqueue_enabled) { - DISPATCH_INTERNAL_CRASH(0, "Manager queue invoked with " - "kevent workqueue enabled"); - } -#endif - _dispatch_queue_set_current(&_dispatch_mgr_q); -#if DISPATCH_USE_PTHREAD_ROOT_QUEUES - _dispatch_mgr_priority_init(); -#endif - _dispatch_queue_mgr_lock(&_dispatch_mgr_q); - // never returns, so burn bridges behind us & clear stack 2k ahead - _dispatch_clear_stack(2048); - _dispatch_mgr_invoke(); -} -#endif // DISPATCH_USE_MGR_THREAD - -#if DISPATCH_USE_KEVENT_WORKQUEUE - -dispatch_static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >= - DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, - "our list should not be longer than the kernel's"); - -static void _dispatch_root_queue_drain_deferred_item( - dispatch_deferred_items_t ddi DISPATCH_PERF_MON_ARGS_PROTO); -static void _dispatch_root_queue_drain_deferred_wlh( - dispatch_deferred_items_t ddi DISPATCH_PERF_MON_ARGS_PROTO); - -void -_dispatch_kevent_workqueue_init(void) -{ - // Initialize kevent workqueue support - _dispatch_root_queues_init(); - if (!_dispatch_kevent_workqueue_enabled) return; - dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); - qos_class_t qos = _dispatch_mgr_sched.qos; - int prio = _dispatch_mgr_sched.prio; - pthread_priority_t pp = 0; - if (qos) { - pp = _pthread_qos_class_encode(qos, 0, 0); - } - if (prio > _dispatch_mgr_sched.default_prio) { - pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; - } - if (pp) { - int r = _pthread_workqueue_set_event_manager_priority(pp); - (void)dispatch_assume_zero(r); - } -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_wlh_worker_thread_init(dispatch_deferred_items_t ddi) -{ - dispatch_assert(ddi->ddi_wlh); - - pthread_priority_t pp = _dispatch_get_priority(); - if (!(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { - // If this thread does not have the event manager flag set, don't setup - // as the dispatch manager and let the caller know to only process - // the delivered events. - // - // Also add the NEEDS_UNBIND flag so that - // _dispatch_priority_compute_update knows it has to unbind - pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (ddi->ddi_wlh == DISPATCH_WLH_ANON) { - pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; - } else { - // pthread sets the flag when it is an event delivery thread - // so we need to explicitly clear it - pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; - } - _dispatch_thread_setspecific(dispatch_priority_key, - (void *)(uintptr_t)pp); - if (ddi->ddi_wlh != DISPATCH_WLH_ANON) { - _dispatch_debug("wlh[%p]: handling events", ddi->ddi_wlh); - } else { - ddi->ddi_can_stash = true; - } - return false; - } - - if ((pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) || - !(pp & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { - // When the phtread kext is delivering kevents to us, and pthread - // root queues are in use, then the pthread priority TSD is set - // to a sched pri with the _PTHREAD_PRIORITY_SCHED_PRI_FLAG bit set. - // - // Given that this isn't a valid QoS we need to fixup the TSD, - // and the best option is to clear the qos/priority bits which tells - // us to not do any QoS related calls on this thread. - // - // However, in that case the manager thread is opted out of QoS, - // as far as pthread is concerned, and can't be turned into - // something else, so we can't stash. - pp &= (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK; - } - // Managers always park without mutating to a regular worker thread, and - // hence never need to unbind from userland, and when draining a manager, - // the NEEDS_UNBIND flag would cause the mutation to happen. - // So we need to strip this flag - pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; - _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); - - // ensure kevents registered from this thread are registered at manager QoS - _dispatch_init_basepri_wlh(DISPATCH_PRIORITY_FLAG_MANAGER); - _dispatch_queue_set_current(&_dispatch_mgr_q); - _dispatch_queue_mgr_lock(&_dispatch_mgr_q); - return true; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_wlh_worker_thread_reset(void) -{ - bool needs_poll = _dispatch_queue_mgr_unlock(&_dispatch_mgr_q); - _dispatch_clear_basepri(); - _dispatch_queue_set_current(NULL); - if (needs_poll) { - _dispatch_trace_runtime_event(worker_request, &_dispatch_mgr_q, 1); - _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0); - } -} - -DISPATCH_ALWAYS_INLINE -static void -_dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, - int *nevents) -{ - _dispatch_introspection_thread_add(); - - DISPATCH_PERF_MON_VAR_INIT - - dispatch_deferred_items_s ddi = { - .ddi_wlh = wlh, - .ddi_eventlist = events, - }; - bool is_manager; - - is_manager = _dispatch_wlh_worker_thread_init(&ddi); - if (!is_manager) { - _dispatch_trace_runtime_event(worker_event_delivery, - wlh == DISPATCH_WLH_ANON ? NULL : wlh, (uint64_t)*nevents); - _dispatch_perfmon_start_impl(true); - } else { - _dispatch_trace_runtime_event(worker_event_delivery, - &_dispatch_mgr_q, (uint64_t)*nevents); - ddi.ddi_wlh = DISPATCH_WLH_ANON; - } - _dispatch_deferred_items_set(&ddi); - _dispatch_event_loop_merge(events, *nevents); - - if (is_manager) { - _dispatch_trace_runtime_event(worker_unpark, &_dispatch_mgr_q, 0); - _dispatch_mgr_queue_drain(); - _dispatch_event_loop_drain_anon_timers(); - _dispatch_wlh_worker_thread_reset(); - } else if (ddi.ddi_stashed_dou._do) { - _dispatch_debug("wlh[%p]: draining deferred item %p", ddi.ddi_wlh, - ddi.ddi_stashed_dou._do); - if (ddi.ddi_wlh == DISPATCH_WLH_ANON) { - dispatch_assert(ddi.ddi_nevents == 0); - _dispatch_deferred_items_set(NULL); - _dispatch_trace_runtime_event(worker_unpark, ddi.ddi_stashed_rq, 0); - _dispatch_root_queue_drain_deferred_item(&ddi - DISPATCH_PERF_MON_ARGS); - } else { - _dispatch_trace_runtime_event(worker_unpark, wlh, 0); - _dispatch_root_queue_drain_deferred_wlh(&ddi - DISPATCH_PERF_MON_ARGS); - } - } - - _dispatch_deferred_items_set(NULL); - if (!is_manager && !ddi.ddi_stashed_dou._do) { - _dispatch_perfmon_end(perfmon_thread_event_no_steal); - } - _dispatch_debug("returning %d deferred kevents", ddi.ddi_nevents); - _dispatch_clear_return_to_kernel(); - *nevents = ddi.ddi_nevents; - - _dispatch_trace_runtime_event(worker_park, NULL, 0); -} - -DISPATCH_NOINLINE -static void -_dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents) -{ - if (!dispatch_assume(events && nevents)) { - return; - } - if (*nevents == 0 || *events == NULL) { - // events for worker thread request have already been delivered earlier - // or got cancelled before point of no return concurrently - return; - } - _dispatch_adopt_wlh_anon(); - _dispatch_wlh_worker_thread(DISPATCH_WLH_ANON, *events, nevents); - _dispatch_reset_wlh(); -} - -#if DISPATCH_USE_KEVENT_WORKLOOP -DISPATCH_NOINLINE -static void -_dispatch_workloop_worker_thread(uint64_t *workloop_id, - dispatch_kevent_t *events, int *nevents) -{ - if (!dispatch_assume(workloop_id && events && nevents)) { - return; - } - if (!dispatch_assume(*workloop_id != 0)) { - return _dispatch_kevent_worker_thread(events, nevents); - } - if (*nevents == 0 || *events == NULL) { - // events for worker thread request have already been delivered earlier - // or got cancelled before point of no return concurrently - return; - } - dispatch_wlh_t wlh = (dispatch_wlh_t)*workloop_id; - _dispatch_adopt_wlh(wlh); - _dispatch_wlh_worker_thread(wlh, *events, nevents); - _dispatch_preserve_wlh_storage_reference(wlh); -} -#endif // DISPATCH_USE_KEVENT_WORKLOOP -#endif // DISPATCH_USE_KEVENT_WORKQUEUE -#pragma mark - -#pragma mark dispatch_root_queue - -#if DISPATCH_USE_PTHREAD_POOL -static void *_dispatch_worker_thread(void *context); -#if defined(_WIN32) -static unsigned WINAPI _dispatch_worker_thread_thunk(LPVOID lpParameter); -#endif -#endif // DISPATCH_USE_PTHREAD_POOL - -#if DISPATCH_DEBUG && DISPATCH_ROOT_QUEUE_DEBUG -#define _dispatch_root_queue_debug(...) _dispatch_debug(__VA_ARGS__) -static void -_dispatch_debug_root_queue(dispatch_queue_class_t dqu, const char *str) -{ - if (likely(dqu._dq)) { - _dispatch_object_debug(dqu._dq, "%s", str); - } else { - _dispatch_log("queue[NULL]: %s", str); - } -} -#else -#define _dispatch_root_queue_debug(...) -#define _dispatch_debug_root_queue(...) -#endif // DISPATCH_DEBUG && DISPATCH_ROOT_QUEUE_DEBUG - -DISPATCH_NOINLINE -static void -_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) -{ - int remaining = n; -#if !defined(_WIN32) - int r = ENOSYS; -#endif - - _dispatch_root_queues_init(); - _dispatch_debug_root_queue(dq, __func__); - _dispatch_trace_runtime_event(worker_request, dq, (uint64_t)n); - -#if !DISPATCH_USE_INTERNAL_WORKQUEUE -#if DISPATCH_USE_PTHREAD_ROOT_QUEUES - if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) -#endif - { - _dispatch_root_queue_debug("requesting new worker thread for global " - "queue: %p", dq); - r = _pthread_workqueue_addthreads(remaining, - _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority)); - (void)dispatch_assume_zero(r); - return; - } -#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE -#if DISPATCH_USE_PTHREAD_POOL - dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; - if (likely(pqc->dpq_thread_mediator.do_vtable)) { - while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) { - _dispatch_root_queue_debug("signaled sleeping worker for " - "global queue: %p", dq); - if (!--remaining) { - return; - } - } - } - - bool overcommit = dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - if (overcommit) { - os_atomic_add2o(dq, dgq_pending, remaining, relaxed); - } else { - if (!os_atomic_cmpxchg2o(dq, dgq_pending, 0, remaining, relaxed)) { - _dispatch_root_queue_debug("worker thread request still pending for " - "global queue: %p", dq); - return; - } - } - - int can_request, t_count; - // seq_cst with atomic store to tail - t_count = os_atomic_load2o(dq, dgq_thread_pool_size, ordered); - do { - can_request = t_count < floor ? 0 : t_count - floor; - if (remaining > can_request) { - _dispatch_root_queue_debug("pthread pool reducing request from %d to %d", - remaining, can_request); - os_atomic_sub2o(dq, dgq_pending, remaining - can_request, relaxed); - remaining = can_request; - } - if (remaining == 0) { - _dispatch_root_queue_debug("pthread pool is full for root queue: " - "%p", dq); - return; - } - } while (!os_atomic_cmpxchgv2o(dq, dgq_thread_pool_size, t_count, - t_count - remaining, &t_count, acquire)); - -#if !defined(_WIN32) - pthread_attr_t *attr = &pqc->dpq_thread_attr; - pthread_t tid, *pthr = &tid; -#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES - if (unlikely(dq == &_dispatch_mgr_root_queue)) { - pthr = _dispatch_mgr_root_queue_init(); - } -#endif - do { - _dispatch_retain(dq); // released in _dispatch_worker_thread - while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) { - if (r != EAGAIN) { - (void)dispatch_assume_zero(r); - } - _dispatch_temporary_resource_shortage(); - } - } while (--remaining); -#else // defined(_WIN32) -#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES - if (unlikely(dq == &_dispatch_mgr_root_queue)) { - _dispatch_mgr_root_queue_init(); - } -#endif - do { - _dispatch_retain(dq); // released in _dispatch_worker_thread -#if DISPATCH_DEBUG - unsigned dwStackSize = 0; -#else - unsigned dwStackSize = 64 * 1024; -#endif - uintptr_t hThread = 0; - while (!(hThread = _beginthreadex(NULL, dwStackSize, _dispatch_worker_thread_thunk, dq, STACK_SIZE_PARAM_IS_A_RESERVATION, NULL))) { - if (errno != EAGAIN) { - (void)dispatch_assume(hThread); - } - _dispatch_temporary_resource_shortage(); - } -#if DISPATCH_USE_PTHREAD_ROOT_QUEUES - if (_dispatch_mgr_sched.prio > _dispatch_mgr_sched.default_prio) { - (void)dispatch_assume_zero(SetThreadPriority((HANDLE)hThread, _dispatch_mgr_sched.prio) == TRUE); - } -#endif - CloseHandle((HANDLE)hThread); - } while (--remaining); -#endif // defined(_WIN32) -#else - (void)floor; -#endif // DISPATCH_USE_PTHREAD_POOL -} - -DISPATCH_NOINLINE -void -_dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor) -{ - if (!_dispatch_queue_class_probe(dq)) { - return; - } -#if !DISPATCH_USE_INTERNAL_WORKQUEUE -#if DISPATCH_USE_PTHREAD_POOL - if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) -#endif - { - if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, relaxed))) { - _dispatch_root_queue_debug("worker thread request still pending " - "for global queue: %p", dq); - return; - } - } -#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE - return _dispatch_root_queue_poke_slow(dq, n, floor); -} - -#define DISPATCH_ROOT_QUEUE_MEDIATOR ((struct dispatch_object_s *)~0ul) - -enum { - DISPATCH_ROOT_QUEUE_DRAIN_WAIT, - DISPATCH_ROOT_QUEUE_DRAIN_READY, - DISPATCH_ROOT_QUEUE_DRAIN_ABORT, -}; - -static int -_dispatch_root_queue_mediator_is_gone(dispatch_queue_global_t dq) -{ - return os_atomic_load2o(dq, dq_items_head, relaxed) != - DISPATCH_ROOT_QUEUE_MEDIATOR; -} - -static int -_dispatch_root_queue_head_tail_quiesced(dispatch_queue_global_t dq) -{ - // Wait for queue head and tail to be both non-empty or both empty - struct dispatch_object_s *head, *tail; - head = os_atomic_load2o(dq, dq_items_head, relaxed); - tail = os_atomic_load2o(dq, dq_items_tail, relaxed); - if ((head == NULL) == (tail == NULL)) { - if (tail == NULL) { // - return DISPATCH_ROOT_QUEUE_DRAIN_ABORT; - } - return DISPATCH_ROOT_QUEUE_DRAIN_READY; - } - return DISPATCH_ROOT_QUEUE_DRAIN_WAIT; -} - -DISPATCH_NOINLINE -static bool -__DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dispatch_queue_global_t dq, - int (*predicate)(dispatch_queue_global_t dq)) -{ - unsigned int sleep_time = DISPATCH_CONTENTION_USLEEP_START; - int status = DISPATCH_ROOT_QUEUE_DRAIN_READY; - bool pending = false; - - do { - // Spin for a short while in case the contention is temporary -- e.g. - // when starting up after dispatch_apply, or when executing a few - // short continuations in a row. - if (_dispatch_contention_wait_until(status = predicate(dq))) { - goto out; - } - // Since we have serious contention, we need to back off. - if (!pending) { - // Mark this queue as pending to avoid requests for further threads - (void)os_atomic_inc2o(dq, dgq_pending, relaxed); - pending = true; - } - _dispatch_contention_usleep(sleep_time); - if (likely(status = predicate(dq))) goto out; - sleep_time *= 2; - } while (sleep_time < DISPATCH_CONTENTION_USLEEP_MAX); - - // The ratio of work to libdispatch overhead must be bad. This - // scenario implies that there are too many threads in the pool. - // Create a new pending thread and then exit this thread. - // The kernel will grant a new thread when the load subsides. - _dispatch_debug("contention on global queue: %p", dq); -out: - if (pending) { - (void)os_atomic_dec2o(dq, dgq_pending, relaxed); - } - if (status == DISPATCH_ROOT_QUEUE_DRAIN_WAIT) { - _dispatch_root_queue_poke(dq, 1, 0); - } - return status == DISPATCH_ROOT_QUEUE_DRAIN_READY; -} - -DISPATCH_ALWAYS_INLINE_NDEBUG -static inline struct dispatch_object_s * -_dispatch_root_queue_drain_one(dispatch_queue_global_t dq) -{ - struct dispatch_object_s *head, *next; - -start: - // The MEDIATOR value acts both as a "lock" and a signal - head = os_atomic_xchg2o(dq, dq_items_head, - DISPATCH_ROOT_QUEUE_MEDIATOR, relaxed); - - if (unlikely(head == NULL)) { - // The first xchg on the tail will tell the enqueueing thread that it - // is safe to blindly write out to the head pointer. A cmpxchg honors - // the algorithm. - if (unlikely(!os_atomic_cmpxchg2o(dq, dq_items_head, - DISPATCH_ROOT_QUEUE_MEDIATOR, NULL, relaxed))) { - goto start; - } - if (unlikely(dq->dq_items_tail)) { // - if (__DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dq, - _dispatch_root_queue_head_tail_quiesced)) { - goto start; - } - } - _dispatch_root_queue_debug("no work on global queue: %p", dq); - return NULL; - } - - if (unlikely(head == DISPATCH_ROOT_QUEUE_MEDIATOR)) { - // This thread lost the race for ownership of the queue. - if (likely(__DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dq, - _dispatch_root_queue_mediator_is_gone))) { - goto start; - } - return NULL; - } - - // Restore the head pointer to a sane value before returning. - // If 'next' is NULL, then this item _might_ be the last item. - next = head->do_next; - - if (unlikely(!next)) { - os_atomic_store2o(dq, dq_items_head, NULL, relaxed); - // 22708742: set tail to NULL with release, so that NULL write to head - // above doesn't clobber head from concurrent enqueuer - if (os_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL, release)) { - // both head and tail are NULL now - goto out; - } - // There must be a next item now. - next = os_mpsc_get_next(head, do_next); - } - - os_atomic_store2o(dq, dq_items_head, next, relaxed); - _dispatch_root_queue_poke(dq, 1, 0); -out: - return head; -} - -static void -_dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi - DISPATCH_PERF_MON_ARGS_PROTO) -{ - dispatch_queue_global_t rq = ddi->ddi_stashed_rq; - dispatch_queue_t dq = ddi->ddi_stashed_dou._dq; - _dispatch_queue_set_current(rq); - - dispatch_invoke_context_s dic = { }; - dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | - DISPATCH_INVOKE_REDIRECTING_DRAIN | DISPATCH_INVOKE_WLH; - _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority); - uint64_t dq_state; - - _dispatch_init_basepri_wlh(rq->dq_priority); - ddi->ddi_wlh_servicing = true; -retry: - dispatch_assert(ddi->ddi_wlh_needs_delete); - _dispatch_trace_item_pop(rq, dq); - - if (_dispatch_queue_drain_try_lock_wlh(dq, &dq_state)) { - dx_invoke(dq, &dic, flags); -#if DISPATCH_USE_KEVENT_WORKLOOP - // - // dx_invoke() will always return `dq` unlocked or locked by another - // thread, and either have consumed the +2 or transferred it to the - // other thread. - // -#endif - if (!ddi->ddi_wlh_needs_delete) { -#if DISPATCH_USE_KEVENT_WORKLOOP - // - // The fate of the workloop thread request has already been dealt - // with, which can happen for 4 reasons, for which we just want - // to go park and skip trying to unregister the thread request: - // - the workloop target has been changed - // - the workloop has been re-enqueued because of narrowing - // - the workloop has been re-enqueued on the manager queue - // - the workloop ownership has been handed off to a sync owner - // -#endif - goto park; - } -#if DISPATCH_USE_KEVENT_WORKLOOP - // - // The workloop has been drained to completion or suspended. - // dx_invoke() has cleared the enqueued bit before it returned. - // - // Since a dispatch_set_target_queue() could occur between the unlock - // and our reload of `dq_state` (rdar://32671286) we need to re-assess - // the workloop-ness of the queue. If it's not a workloop anymore, - // _dispatch_event_loop_leave_immediate() will have handled the kevent - // deletion already. - // - // Then, we check one last time that the queue is still not enqueued, - // in which case we attempt to quiesce it. - // - // If we find it enqueued again, it means someone else has been - // enqueuing concurrently and has made a thread request that coalesced - // with ours, but since dx_invoke() cleared the enqueued bit, - // the other thread didn't realize that and added a +1 ref count. - // Take over that +1, and add our own to make the +2 this loop expects, - // and drain again. - // -#endif // DISPATCH_USE_KEVENT_WORKLOOP - dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(!_dq_state_is_base_wlh(dq_state))) { // rdar://32671286 - goto park; - } - if (unlikely(_dq_state_is_enqueued_on_target(dq_state))) { - _dispatch_retain(dq); - _dispatch_trace_item_push(dq->do_targetq, dq); - goto retry; - } - } else { -#if DISPATCH_USE_KEVENT_WORKLOOP - // - // The workloop enters this function with a +2 refcount, however we - // couldn't acquire the lock due to suspension or discovering that - // the workloop was locked by a sync owner. - // - // We need to give up, and _dispatch_event_loop_leave_deferred() - // will do a DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC transition to - // tell the kernel to stop driving this thread request. We leave - // a +1 with the thread request, and consume the extra +1 we have. - // -#endif - if (_dq_state_is_suspended(dq_state)) { - dispatch_assert(!_dq_state_is_enqueued(dq_state)); - _dispatch_release_2_no_dispose(dq); - } else { - dispatch_assert(_dq_state_is_enqueued(dq_state)); - dispatch_assert(_dq_state_drain_locked(dq_state)); - _dispatch_release_no_dispose(dq); - } - } - - _dispatch_event_loop_leave_deferred(ddi, dq_state); - -park: - // event thread that could steal - _dispatch_perfmon_end(perfmon_thread_event_steal); - _dispatch_clear_basepri(); - _dispatch_queue_set_current(NULL); - - _dispatch_voucher_debug("root queue clear", NULL); - _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); -} - -static void -_dispatch_root_queue_drain_deferred_item(dispatch_deferred_items_t ddi - DISPATCH_PERF_MON_ARGS_PROTO) -{ - dispatch_queue_global_t rq = ddi->ddi_stashed_rq; - _dispatch_queue_set_current(rq); - _dispatch_trace_runtime_event(worker_unpark, NULL, 0); - - dispatch_invoke_context_s dic = { }; - dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | - DISPATCH_INVOKE_REDIRECTING_DRAIN; -#if DISPATCH_COCOA_COMPAT - _dispatch_last_resort_autorelease_pool_push(&dic); -#endif // DISPATCH_COCOA_COMPAT - _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority); - _dispatch_init_basepri(rq->dq_priority); - - _dispatch_continuation_pop_inline(ddi->ddi_stashed_dou, &dic, flags, rq); - - // event thread that could steal - _dispatch_perfmon_end(perfmon_thread_event_steal); -#if DISPATCH_COCOA_COMPAT - _dispatch_last_resort_autorelease_pool_pop(&dic); -#endif // DISPATCH_COCOA_COMPAT - _dispatch_clear_basepri(); - _dispatch_queue_set_current(NULL); - - _dispatch_voucher_debug("root queue clear", NULL); - _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); -} -#endif - -DISPATCH_NOT_TAIL_CALLED // prevent tailcall (for Instrument DTrace probe) -static void -_dispatch_root_queue_drain(dispatch_queue_global_t dq, - dispatch_priority_t pri, dispatch_invoke_flags_t flags) -{ -#if DISPATCH_DEBUG - dispatch_queue_t cq; - if (unlikely(cq = _dispatch_queue_get_current())) { - DISPATCH_INTERNAL_CRASH(cq, "Premature thread recycling"); - } -#endif - _dispatch_queue_set_current(dq); - _dispatch_init_basepri(pri); - _dispatch_adopt_wlh_anon(); - - struct dispatch_object_s *item; - bool reset = false; - dispatch_invoke_context_s dic = { }; -#if DISPATCH_COCOA_COMPAT - _dispatch_last_resort_autorelease_pool_push(&dic); -#endif // DISPATCH_COCOA_COMPAT - _dispatch_queue_drain_init_narrowing_check_deadline(&dic, pri); - _dispatch_perfmon_start(); - while (likely(item = _dispatch_root_queue_drain_one(dq))) { - if (reset) _dispatch_wqthread_override_reset(); - _dispatch_continuation_pop_inline(item, &dic, flags, dq); - reset = _dispatch_reset_basepri_override(); - if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) { - break; - } - } - - // overcommit or not. worker thread - if (pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { - _dispatch_perfmon_end(perfmon_thread_worker_oc); - } else { - _dispatch_perfmon_end(perfmon_thread_worker_non_oc); - } - -#if DISPATCH_COCOA_COMPAT - _dispatch_last_resort_autorelease_pool_pop(&dic); -#endif // DISPATCH_COCOA_COMPAT - _dispatch_reset_wlh(); - _dispatch_clear_basepri(); - _dispatch_queue_set_current(NULL); -} - -#if !DISPATCH_USE_INTERNAL_WORKQUEUE -static void -_dispatch_worker_thread2(pthread_priority_t pp) -{ - bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - dispatch_queue_global_t dq; - - pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; - _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); - dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit); - - _dispatch_introspection_thread_add(); - _dispatch_trace_runtime_event(worker_unpark, dq, 0); - - int pending = os_atomic_dec2o(dq, dgq_pending, relaxed); - dispatch_assert(pending >= 0); - _dispatch_root_queue_drain(dq, dq->dq_priority, - DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN); - _dispatch_voucher_debug("root queue clear", NULL); - _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); - _dispatch_trace_runtime_event(worker_park, NULL, 0); -} -#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE - -#if DISPATCH_USE_PTHREAD_POOL -static inline void -_dispatch_root_queue_init_pthread_pool(dispatch_queue_global_t dq, - int pool_size, dispatch_priority_t pri) -{ - dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; - int thread_pool_size = DISPATCH_WORKQ_MAX_PTHREAD_COUNT; - if (!(pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)) { - thread_pool_size = (int32_t)dispatch_hw_config(active_cpus); - } - if (pool_size && pool_size < thread_pool_size) thread_pool_size = pool_size; - dq->dgq_thread_pool_size = thread_pool_size; - qos_class_t cls = _dispatch_qos_to_qos_class(_dispatch_priority_qos(pri) ?: - _dispatch_priority_fallback_qos(pri)); - if (cls) { -#if !defined(_WIN32) - pthread_attr_t *attr = &pqc->dpq_thread_attr; - int r = pthread_attr_init(attr); - dispatch_assume_zero(r); - r = pthread_attr_setdetachstate(attr, PTHREAD_CREATE_DETACHED); - dispatch_assume_zero(r); -#endif // !defined(_WIN32) -#if HAVE_PTHREAD_WORKQUEUE_QOS - r = pthread_attr_set_qos_class_np(attr, cls, 0); - dispatch_assume_zero(r); -#endif // HAVE_PTHREAD_WORKQUEUE_QOS - } - _dispatch_sema4_t *sema = &pqc->dpq_thread_mediator.dsema_sema; - pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); - _dispatch_sema4_init(sema, _DSEMA4_POLICY_LIFO); - _dispatch_sema4_create(sema, _DSEMA4_POLICY_LIFO); -} - -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol -static void * -_dispatch_worker_thread(void *context) -{ - dispatch_queue_global_t dq = context; - dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; - - int pending = os_atomic_dec2o(dq, dgq_pending, relaxed); - if (unlikely(pending < 0)) { - DISPATCH_INTERNAL_CRASH(pending, "Pending thread request underflow"); - } - - if (pqc->dpq_observer_hooks.queue_will_execute) { - _dispatch_set_pthread_root_queue_observer_hooks( - &pqc->dpq_observer_hooks); - } - if (pqc->dpq_thread_configure) { - pqc->dpq_thread_configure(); - } - -#if !defined(_WIN32) - // workaround tweaks the kernel workqueue does for us - _dispatch_sigmask(); -#endif - _dispatch_introspection_thread_add(); - - const int64_t timeout = 5ull * NSEC_PER_SEC; - pthread_priority_t pp = _dispatch_get_priority(); - dispatch_priority_t pri = dq->dq_priority; - - // If the queue is neither - // - the manager - // - with a fallback set - // - with a requested QoS or QoS floor - // then infer the basepri from the current priority. - if ((pri & (DISPATCH_PRIORITY_FLAG_MANAGER | - DISPATCH_PRIORITY_FLAG_FALLBACK | - DISPATCH_PRIORITY_FLAG_FLOOR | - DISPATCH_PRIORITY_REQUESTED_MASK)) == 0) { - pri &= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - if (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) { - pri |= _dispatch_priority_from_pp(pp); - } else { - pri |= _dispatch_priority_make_override(DISPATCH_QOS_SATURATED); - } - } - -#if DISPATCH_USE_INTERNAL_WORKQUEUE - bool monitored = ((pri & (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | - DISPATCH_PRIORITY_FLAG_MANAGER)) == 0); - if (monitored) _dispatch_workq_worker_register(dq); -#endif - - do { - _dispatch_trace_runtime_event(worker_unpark, dq, 0); - _dispatch_root_queue_drain(dq, pri, DISPATCH_INVOKE_REDIRECTING_DRAIN); - _dispatch_reset_priority_and_voucher(pp, NULL); - _dispatch_trace_runtime_event(worker_park, NULL, 0); - } while (dispatch_semaphore_wait(&pqc->dpq_thread_mediator, - dispatch_time(0, timeout)) == 0); - -#if DISPATCH_USE_INTERNAL_WORKQUEUE - if (monitored) _dispatch_workq_worker_unregister(dq); -#endif - (void)os_atomic_inc2o(dq, dgq_thread_pool_size, release); - _dispatch_root_queue_poke(dq, 1, 0); - _dispatch_release(dq); // retained in _dispatch_root_queue_poke_slow - return NULL; -} -#if defined(_WIN32) -static unsigned WINAPI -_dispatch_worker_thread_thunk(LPVOID lpParameter) -{ - _dispatch_worker_thread(lpParameter); - return 0; -} -#endif // defined(_WIN32) -#endif // DISPATCH_USE_PTHREAD_POOL - -DISPATCH_NOINLINE -void -_dispatch_root_queue_wakeup(dispatch_queue_global_t dq, - DISPATCH_UNUSED dispatch_qos_t qos, dispatch_wakeup_flags_t flags) -{ - if (!(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) { - DISPATCH_INTERNAL_CRASH(dq->dq_priority, - "Don't try to wake up or override a root queue"); - } - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - return _dispatch_release_2_tailcall(dq); - } -} - -DISPATCH_NOINLINE -void -_dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou, - dispatch_qos_t qos) -{ -#if DISPATCH_USE_KEVENT_WORKQUEUE - dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - if (unlikely(ddi && ddi->ddi_can_stash)) { - dispatch_object_t old_dou = ddi->ddi_stashed_dou; - dispatch_priority_t rq_overcommit; - rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - - if (likely(!old_dou._do || rq_overcommit)) { - dispatch_queue_global_t old_rq = ddi->ddi_stashed_rq; - dispatch_qos_t old_qos = ddi->ddi_stashed_qos; - ddi->ddi_stashed_rq = rq; - ddi->ddi_stashed_dou = dou; - ddi->ddi_stashed_qos = qos; - _dispatch_debug("deferring item %p, rq %p, qos %d", - dou._do, rq, qos); - if (rq_overcommit) { - ddi->ddi_can_stash = false; - } - if (likely(!old_dou._do)) { - return; - } - // push the previously stashed item - qos = old_qos; - rq = old_rq; - dou = old_dou; - } - } -#endif -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (_dispatch_root_queue_push_needs_override(rq, qos)) { - return _dispatch_root_queue_push_override(rq, dou, qos); - } -#else - (void)qos; -#endif - _dispatch_root_queue_push_inline(rq, dou, dou, 1); -} - -#pragma mark - -#pragma mark dispatch_pthread_root_queue -#if DISPATCH_USE_PTHREAD_ROOT_QUEUES - -static dispatch_queue_global_t -_dispatch_pthread_root_queue_create(const char *label, unsigned long flags, - const pthread_attr_t *attr, dispatch_block_t configure, - dispatch_pthread_root_queue_observer_hooks_t observer_hooks) -{ - dispatch_queue_pthread_root_t dpq; - dispatch_queue_flags_t dqf = 0; - int32_t pool_size = flags & _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE ? - (int8_t)(flags & ~_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE) : 0; - - if (label) { - const char *tmp = _dispatch_strdup_if_mutable(label); - if (tmp != label) { - dqf |= DQF_LABEL_NEEDS_FREE; - label = tmp; - } - } - - dpq = _dispatch_queue_alloc(queue_pthread_root, dqf, - DISPATCH_QUEUE_WIDTH_POOL, 0)._dpq; - dpq->dq_label = label; - dpq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; - dpq->dq_priority = DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - dpq->do_ctxt = &dpq->dpq_ctxt; - - dispatch_pthread_root_queue_context_t pqc = &dpq->dpq_ctxt; - _dispatch_root_queue_init_pthread_pool(dpq->_as_dgq, pool_size, - DISPATCH_PRIORITY_FLAG_OVERCOMMIT); - -#if !defined(_WIN32) - if (attr) { - memcpy(&pqc->dpq_thread_attr, attr, sizeof(pthread_attr_t)); - _dispatch_mgr_priority_raise(&pqc->dpq_thread_attr); - } else { - (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); - } - (void)dispatch_assume_zero(pthread_attr_setdetachstate( - &pqc->dpq_thread_attr, PTHREAD_CREATE_DETACHED)); -#else // defined(_WIN32) - dispatch_assert(attr == NULL); -#endif // defined(_WIN32) - if (configure) { - pqc->dpq_thread_configure = _dispatch_Block_copy(configure); - } - if (observer_hooks) { - pqc->dpq_observer_hooks = *observer_hooks; - } - _dispatch_object_debug(dpq, "%s", __func__); - return _dispatch_trace_queue_create(dpq)._dgq; -} - -dispatch_queue_global_t -dispatch_pthread_root_queue_create(const char *label, unsigned long flags, - const pthread_attr_t *attr, dispatch_block_t configure) -{ - return _dispatch_pthread_root_queue_create(label, flags, attr, configure, - NULL); -} - -#if DISPATCH_IOHID_SPI -dispatch_queue_global_t -_dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID(const char *label, - unsigned long flags, const pthread_attr_t *attr, - dispatch_pthread_root_queue_observer_hooks_t observer_hooks, - dispatch_block_t configure) -{ - if (!observer_hooks->queue_will_execute || - !observer_hooks->queue_did_execute) { - DISPATCH_CLIENT_CRASH(0, "Invalid pthread root queue observer hooks"); - } - return _dispatch_pthread_root_queue_create(label, flags, attr, configure, - observer_hooks); -} - -bool -_dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( - dispatch_queue_t dq) // rdar://problem/18033810 -{ - if (dq->dq_width != 1) { - DISPATCH_CLIENT_CRASH(dq->dq_width, "Invalid queue type"); - } - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - return _dq_state_drain_locked_by_self(dq_state); -} -#endif - -dispatch_queue_global_t -dispatch_pthread_root_queue_copy_current(void) -{ - dispatch_queue_t dq = _dispatch_queue_get_current(); - if (!dq) return NULL; - while (unlikely(dq->do_targetq)) { - dq = dq->do_targetq; - } - if (dx_type(dq) != DISPATCH_QUEUE_PTHREAD_ROOT_TYPE) { - return NULL; - } - _os_object_retain_with_resurrect(dq->_as_os_obj); - return upcast(dq)._dgq; -} - -void -_dispatch_pthread_root_queue_dispose(dispatch_queue_global_t dq, - bool *allow_free) -{ - dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; - - _dispatch_object_debug(dq, "%s", __func__); - _dispatch_trace_queue_dispose(dq); - -#if !defined(_WIN32) - pthread_attr_destroy(&pqc->dpq_thread_attr); -#endif - _dispatch_semaphore_dispose(&pqc->dpq_thread_mediator, NULL); - if (pqc->dpq_thread_configure) { - Block_release(pqc->dpq_thread_configure); - } - dq->do_targetq = _dispatch_get_default_queue(false); - _dispatch_lane_class_dispose(dq, allow_free); -} - -#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES -#pragma mark - -#pragma mark dispatch_runloop_queue - -DISPATCH_STATIC_GLOBAL(bool _dispatch_program_is_probably_callback_driven); - -#if DISPATCH_COCOA_COMPAT || defined(_WIN32) -DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_main_q_handle_pred); - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_runloop_handle_is_valid(dispatch_runloop_handle_t handle) -{ -#if TARGET_OS_MAC - return MACH_PORT_VALID(handle); -#elif defined(__linux__) - return handle >= 0; -#elif defined(_WIN32) - return handle != NULL; -#else -#error "runloop support not implemented on this platform" -#endif -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_runloop_handle_t -_dispatch_runloop_queue_get_handle(dispatch_lane_t dq) -{ -#if TARGET_OS_MAC - return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt); -#elif defined(__linux__) - // decode: 0 is a valid fd, so offset by 1 to distinguish from NULL - return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt) - 1; -#elif defined(_WIN32) - return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt); -#else -#error "runloop support not implemented on this platform" -#endif -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_runloop_queue_set_handle(dispatch_lane_t dq, - dispatch_runloop_handle_t handle) -{ -#if TARGET_OS_MAC - dq->do_ctxt = (void *)(uintptr_t)handle; -#elif defined(__linux__) - // encode: 0 is a valid fd, so offset by 1 to distinguish from NULL - dq->do_ctxt = (void *)(uintptr_t)(handle + 1); -#elif defined(_WIN32) - dq->do_ctxt = (void *)(uintptr_t)handle; -#else -#error "runloop support not implemented on this platform" -#endif -} - -static void -_dispatch_runloop_queue_handle_init(void *ctxt) -{ - dispatch_lane_t dq = (dispatch_lane_t)ctxt; - dispatch_runloop_handle_t handle; - - _dispatch_fork_becomes_unsafe(); - -#if TARGET_OS_MAC - mach_port_options_t opts = { - .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT | MPO_INSERT_SEND_RIGHT, - }; - mach_port_context_t guard = (uintptr_t)dq; - kern_return_t kr; - mach_port_t mp; - - if (dx_type(dq) == DISPATCH_QUEUE_MAIN_TYPE) { - opts.flags |= MPO_QLIMIT; - opts.mpl.mpl_qlimit = 1; - } - - kr = mach_port_construct(mach_task_self(), &opts, guard, &mp); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - - handle = mp; -#elif defined(__linux__) - int fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); - if (fd == -1) { - int err = errno; - switch (err) { - case EMFILE: - DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " - "process is out of file descriptors"); - break; - case ENFILE: - DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " - "system is out of file descriptors"); - break; - case ENOMEM: - DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " - "kernel is out of memory"); - break; - default: - DISPATCH_INTERNAL_CRASH(err, "eventfd() failure"); - break; - } - } - handle = fd; -#elif defined(_WIN32) - HANDLE hEvent; - hEvent = CreateEventW(NULL, /*bManualReset=*/TRUE, - /*bInitialState=*/FALSE, NULL); - if (hEvent == NULL) { - DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateEventW"); - } - handle = hEvent; -#else -#error "runloop support not implemented on this platform" -#endif - _dispatch_runloop_queue_set_handle(dq, handle); - - _dispatch_program_is_probably_callback_driven = true; -} - -static void -_dispatch_runloop_queue_handle_dispose(dispatch_lane_t dq) -{ - dispatch_runloop_handle_t handle = _dispatch_runloop_queue_get_handle(dq); - if (!_dispatch_runloop_handle_is_valid(handle)) { - return; - } - dq->do_ctxt = NULL; -#if TARGET_OS_MAC - mach_port_t mp = (mach_port_t)handle; - mach_port_context_t guard = (uintptr_t)dq; - kern_return_t kr; - kr = mach_port_destruct(mach_task_self(), mp, -1, guard); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); -#elif defined(__linux__) - int rc = close(handle); - (void)dispatch_assume_zero(rc); -#elif defined(_WIN32) - BOOL bSuccess; - bSuccess = CloseHandle(handle); - (void)dispatch_assume(bSuccess); -#else -#error "runloop support not implemented on this platform" -#endif -} -#endif // DISPATCH_COCOA_COMPAT || defined(_WIN32) - -#if DISPATCH_COCOA_COMPAT -static inline void -_dispatch_runloop_queue_class_poke(dispatch_lane_t dq) -{ - dispatch_runloop_handle_t handle = _dispatch_runloop_queue_get_handle(dq); - if (!_dispatch_runloop_handle_is_valid(handle)) { - return; - } - - _dispatch_trace_runtime_event(worker_request, dq, 1); -#if HAVE_MACH - mach_port_t mp = handle; - kern_return_t kr = _dispatch_send_wakeup_runloop_thread(mp, 0); - switch (kr) { - case MACH_SEND_TIMEOUT: - case MACH_SEND_TIMED_OUT: - case MACH_SEND_INVALID_DEST: - break; - default: - (void)dispatch_assume_zero(kr); - break; - } -#elif defined(__linux__) - int result; - do { - result = eventfd_write(handle, 1); - } while (result == -1 && errno == EINTR); - (void)dispatch_assume_zero(result); -#elif defined(_WIN32) - BOOL bSuccess; - bSuccess = SetEvent(handle); - (void)dispatch_assume(bSuccess); -#else -#error "runloop support not implemented on this platform" -#endif -} - -DISPATCH_NOINLINE -static void -_dispatch_runloop_queue_poke(dispatch_lane_t dq, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) -{ - // it's not useful to handle WAKEUP_MAKE_DIRTY because mach_msg() will have - // a release barrier and that when runloop queues stop being thread-bound - // they have a non optional wake-up to start being a "normal" queue - // either in _dispatch_runloop_queue_xref_dispose, - // or in _dispatch_queue_cleanup2() for the main thread. - uint64_t old_state, new_state; - - if (dx_type(dq) == DISPATCH_QUEUE_MAIN_TYPE) { - dispatch_once_f(&_dispatch_main_q_handle_pred, dq, - _dispatch_runloop_queue_handle_init); - } - - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - new_state = _dq_state_merge_qos(old_state, qos); - if (old_state == new_state) { - os_atomic_rmw_loop_give_up(goto no_change); - } - }); - - dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority); - if (qos > dq_qos) { - mach_port_t owner = _dq_state_drain_owner(new_state); - pthread_priority_t pp = _dispatch_qos_to_pp(qos); - _dispatch_thread_override_start(owner, pp, dq); - if (_dq_state_max_qos(old_state) > dq_qos) { - _dispatch_thread_override_end(owner, dq); - } - } -no_change: - _dispatch_runloop_queue_class_poke(dq); - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - return _dispatch_release_2_tailcall(dq); - } -} - -#if DISPATCH_COCOA_COMPAT || defined(_WIN32) - -DISPATCH_ALWAYS_INLINE -static inline dispatch_qos_t -_dispatch_runloop_queue_reset_max_qos(dispatch_lane_t dq) -{ - uint64_t old_state, clear_bits = DISPATCH_QUEUE_MAX_QOS_MASK | - DISPATCH_QUEUE_RECEIVED_OVERRIDE; - old_state = os_atomic_and_orig2o(dq, dq_state, ~clear_bits, relaxed); - return _dq_state_max_qos(old_state); -} - -void -_dispatch_runloop_queue_wakeup(dispatch_lane_t dq, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) -{ - if (unlikely(_dispatch_queue_atomic_flags(dq) & DQF_RELEASED)) { - // - return _dispatch_lane_wakeup(dq, qos, flags); - } - - if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { - os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); - } - if (_dispatch_queue_class_probe(dq)) { - return _dispatch_runloop_queue_poke(dq, qos, flags); - } - - qos = _dispatch_runloop_queue_reset_max_qos(dq); - if (qos) { - mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); - if (_dispatch_queue_class_probe(dq)) { - _dispatch_runloop_queue_poke(dq, qos, flags); - } - _dispatch_thread_override_end(owner, dq); - return; - } - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - return _dispatch_release_2_tailcall(dq); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_main_queue_update_priority_from_thread(void) -{ - dispatch_queue_main_t dq = &_dispatch_main_q; - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - mach_port_t owner = _dq_state_drain_owner(dq_state); - - dispatch_priority_t main_pri = - _dispatch_priority_from_pp_strip_flags(_dispatch_get_priority()); - dispatch_qos_t main_qos = _dispatch_priority_qos(main_pri); - dispatch_qos_t max_qos = _dq_state_max_qos(dq_state); - dispatch_qos_t old_qos = _dispatch_priority_qos(dq->dq_priority); - - // the main thread QoS was adjusted by someone else, learn the new QoS - // and reinitialize _dispatch_main_q.dq_priority - dq->dq_priority = main_pri; - - if (old_qos < max_qos && main_qos == DISPATCH_QOS_UNSPECIFIED) { - // main thread is opted out of QoS and we had an override - return _dispatch_thread_override_end(owner, dq); - } - - if (old_qos < max_qos && max_qos <= main_qos) { - // main QoS was raised, and we had an override which is now useless - return _dispatch_thread_override_end(owner, dq); - } - - if (main_qos < max_qos && max_qos <= old_qos) { - // main thread QoS was lowered, and we actually need an override - pthread_priority_t pp = _dispatch_qos_to_pp(max_qos); - return _dispatch_thread_override_start(owner, pp, dq); - } -} - -static void -_dispatch_main_queue_drain(dispatch_queue_main_t dq) -{ - dispatch_thread_frame_s dtf; - - if (!dq->dq_items_tail) { - return; - } - - _dispatch_perfmon_start_notrace(); - if (unlikely(!_dispatch_queue_is_thread_bound(dq))) { - DISPATCH_CLIENT_CRASH(0, "_dispatch_main_queue_callback_4CF called" - " after dispatch_main()"); - } - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(!_dq_state_drain_locked_by_self(dq_state))) { - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "_dispatch_main_queue_callback_4CF called" - " from the wrong thread"); - } - - dispatch_once_f(&_dispatch_main_q_handle_pred, dq, - _dispatch_runloop_queue_handle_init); - - // hide the frame chaining when CFRunLoop - // drains the main runloop, as this should not be observable that way - _dispatch_adopt_wlh_anon(); - _dispatch_thread_frame_push_and_rebase(&dtf, dq, NULL); - - pthread_priority_t pp = _dispatch_get_priority(); - dispatch_priority_t pri = _dispatch_priority_from_pp(pp); - dispatch_qos_t qos = _dispatch_priority_qos(pri); - voucher_t voucher = _voucher_copy(); - - if (unlikely(qos != _dispatch_priority_qos(dq->dq_priority))) { - _dispatch_main_queue_update_priority_from_thread(); - } - dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); - _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); - - dispatch_invoke_context_s dic = { }; - struct dispatch_object_s *dc, *next_dc, *tail; - dc = os_mpsc_capture_snapshot(os_mpsc(dq, dq_items), &tail); - do { - next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next); - _dispatch_continuation_pop_inline(dc, &dic, - DISPATCH_INVOKE_THREAD_BOUND, dq); - } while ((dc = next_dc)); - - dx_wakeup(dq->_as_dq, 0, 0); - _dispatch_voucher_debug("main queue restore", voucher); - _dispatch_reset_basepri(old_dbp); - _dispatch_reset_basepri_override(); - _dispatch_reset_priority_and_voucher(pp, voucher); - _dispatch_thread_frame_pop(&dtf); - _dispatch_reset_wlh(); - _dispatch_force_cache_cleanup(); - _dispatch_perfmon_end_notrace(); -} - -static bool -_dispatch_runloop_queue_drain_one(dispatch_lane_t dq) -{ - if (!dq->dq_items_tail) { - return false; - } - _dispatch_perfmon_start_notrace(); - dispatch_thread_frame_s dtf; - bool should_reset_wlh = _dispatch_adopt_wlh_anon_recurse(); - _dispatch_thread_frame_push(&dtf, dq); - pthread_priority_t pp = _dispatch_get_priority(); - dispatch_priority_t pri = _dispatch_priority_from_pp(pp); - voucher_t voucher = _voucher_copy(); - dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); - _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); - - dispatch_invoke_context_s dic = { }; - struct dispatch_object_s *dc, *next_dc; - dc = _dispatch_queue_get_head(dq); - next_dc = _dispatch_queue_pop_head(dq, dc); - _dispatch_continuation_pop_inline(dc, &dic, - DISPATCH_INVOKE_THREAD_BOUND, dq); - - if (!next_dc) { - dx_wakeup(dq, 0, 0); - } - - _dispatch_voucher_debug("runloop queue restore", voucher); - _dispatch_reset_basepri(old_dbp); - _dispatch_reset_basepri_override(); - _dispatch_reset_priority_and_voucher(pp, voucher); - _dispatch_thread_frame_pop(&dtf); - if (should_reset_wlh) _dispatch_reset_wlh(); - _dispatch_force_cache_cleanup(); - _dispatch_perfmon_end_notrace(); - return next_dc; -} - -dispatch_queue_serial_t -_dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) -{ - pthread_priority_t pp = _dispatch_get_priority(); - dispatch_lane_t dq; - - if (unlikely(flags)) { - return DISPATCH_BAD_INPUT; - } - dq = _dispatch_object_alloc(DISPATCH_VTABLE(queue_runloop), - sizeof(struct dispatch_lane_s)); - _dispatch_queue_init(dq, DQF_THREAD_BOUND, 1, - DISPATCH_QUEUE_ROLE_BASE_ANON); - dq->do_targetq = _dispatch_get_default_queue(true); - dq->dq_label = label ? label : "runloop-queue"; // no-copy contract - if (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) { - dq->dq_priority = _dispatch_priority_from_pp_strip_flags(pp); - } - _dispatch_runloop_queue_handle_init(dq); - _dispatch_queue_set_bound_thread(dq); - _dispatch_object_debug(dq, "%s", __func__); - return _dispatch_trace_queue_create(dq)._dl; -} - -void -_dispatch_runloop_queue_xref_dispose(dispatch_lane_t dq) -{ - _dispatch_object_debug(dq, "%s", __func__); - - dispatch_qos_t qos = _dispatch_runloop_queue_reset_max_qos(dq); - _dispatch_queue_clear_bound_thread(dq); - dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); - if (qos) _dispatch_thread_override_end(DISPATCH_QUEUE_DRAIN_OWNER(dq), dq); -} - -void -_dispatch_runloop_queue_dispose(dispatch_lane_t dq, bool *allow_free) -{ - _dispatch_object_debug(dq, "%s", __func__); - _dispatch_trace_queue_dispose(dq); - _dispatch_runloop_queue_handle_dispose(dq); - _dispatch_lane_class_dispose(dq, allow_free); -} - -bool -_dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t dq) -{ - if (unlikely(dx_type(dq) != DISPATCH_QUEUE_RUNLOOP_TYPE)) { - DISPATCH_CLIENT_CRASH(dx_type(dq), "Not a runloop queue"); - } - dispatch_retain(dq); - bool r = _dispatch_runloop_queue_drain_one(upcast(dq)._dl); - dispatch_release(dq); - return r; -} - -void -_dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t dq) -{ - if (unlikely(dx_type(dq) != DISPATCH_QUEUE_RUNLOOP_TYPE)) { - DISPATCH_CLIENT_CRASH(dx_type(dq), "Not a runloop queue"); - } - _dispatch_runloop_queue_wakeup(upcast(dq)._dl, 0, false); -} - -#if TARGET_OS_MAC || defined(_WIN32) -dispatch_runloop_handle_t -_dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) -{ - if (unlikely(dx_type(dq) != DISPATCH_QUEUE_RUNLOOP_TYPE)) { - DISPATCH_CLIENT_CRASH(dx_type(dq), "Not a runloop queue"); - } - return _dispatch_runloop_queue_get_handle(upcast(dq)._dl); -} -#endif - -#endif // DISPATCH_COCOA_COMPAT || defined(_WIN32) -#pragma mark - -#pragma mark dispatch_main_queue -#if DISPATCH_COCOA_COMPAT || defined(_WIN32) - -dispatch_runloop_handle_t -_dispatch_get_main_queue_handle_4CF(void) -{ - dispatch_queue_main_t dq = &_dispatch_main_q; - dispatch_once_f(&_dispatch_main_q_handle_pred, dq, - _dispatch_runloop_queue_handle_init); - return _dispatch_runloop_queue_get_handle(dq->_as_dl); -} - -dispatch_runloop_handle_t -_dispatch_get_main_queue_port_4CF(void) -{ - return _dispatch_get_main_queue_handle_4CF(); -} - -void -_dispatch_main_queue_callback_4CF( - void *ignored DISPATCH_UNUSED) -{ - // the main queue cannot be suspended and no-one looks at this bit - // so abuse it to avoid dirtying more memory - - if (_dispatch_main_q.dq_side_suspend_cnt) { - return; - } - _dispatch_main_q.dq_side_suspend_cnt = true; - _dispatch_main_queue_drain(&_dispatch_main_q); - _dispatch_main_q.dq_side_suspend_cnt = false; -} - -#endif // DISPATCH_COCOA_COMPAT || defined(_WIN32) - -DISPATCH_NOINLINE -void -_dispatch_main_queue_push(dispatch_queue_main_t dq, dispatch_object_t dou, - dispatch_qos_t qos) -{ - // Same as _dispatch_lane_push() but without the refcounting due to being - // a global object - if (_dispatch_queue_push_item(dq, dou)) { - return dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); - } - - qos = _dispatch_queue_push_qos(dq, qos); - if (_dispatch_queue_need_override(dq, qos)) { - return dx_wakeup(dq, qos, 0); - } -} - -void -_dispatch_main_queue_wakeup(dispatch_queue_main_t dq, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) -{ -#if DISPATCH_COCOA_COMPAT - if (_dispatch_queue_is_thread_bound(dq)) { - return _dispatch_runloop_queue_wakeup(dq->_as_dl, qos, flags); - } -#endif - return _dispatch_lane_wakeup(dq, qos, flags); -} - -#if !defined(_WIN32) -DISPATCH_NOINLINE DISPATCH_NORETURN -static void -_dispatch_sigsuspend(void) -{ - static const sigset_t mask; - pthread_sigmask(SIG_SETMASK, &mask, NULL); - for (;;) { - sigsuspend(&mask); - } -} -#endif // !defined(_WIN32) - -DISPATCH_NORETURN -static void -_dispatch_sig_thread(void *ctxt DISPATCH_UNUSED) -{ - // never returns, so burn bridges behind us - _dispatch_clear_stack(0); -#if defined(_WIN32) - Sleep(INFINITE); -#else - _dispatch_sigsuspend(); -#endif -} - -void -dispatch_main(void) -{ - _dispatch_root_queues_init(); -#if HAVE_PTHREAD_MAIN_NP - if (pthread_main_np()) { -#endif - _dispatch_object_debug(&_dispatch_main_q, "%s", __func__); - _dispatch_program_is_probably_callback_driven = true; - _dispatch_ktrace0(ARIADNE_ENTER_DISPATCH_MAIN_CODE); -#ifdef __linux__ - // On Linux, if the main thread calls pthread_exit, the process becomes a zombie. - // To avoid that, just before calling pthread_exit we register a TSD destructor - // that will call _dispatch_sig_thread -- thus capturing the main thread in sigsuspend. - // This relies on an implementation detail (currently true in glibc) that TSD destructors - // will be called in the order of creation to cause all the TSD cleanup functions to - // run before the thread becomes trapped in sigsuspend. - pthread_key_t dispatch_main_key; - pthread_key_create(&dispatch_main_key, _dispatch_sig_thread); - pthread_setspecific(dispatch_main_key, &dispatch_main_key); - _dispatch_sigmask(); -#endif -#if !defined(_WIN32) - pthread_exit(NULL); -#else - _endthreadex(0); -#endif // defined(_WIN32) - DISPATCH_INTERNAL_CRASH(errno, "pthread_exit() returned"); -#if HAVE_PTHREAD_MAIN_NP - } - DISPATCH_CLIENT_CRASH(0, "dispatch_main() must be called on the main thread"); -#endif -} - -DISPATCH_NOINLINE -static void -_dispatch_queue_cleanup2(void) -{ - dispatch_queue_main_t dq = &_dispatch_main_q; - uint64_t old_state, new_state; - - // Turning the main queue from a runloop queue into an ordinary serial queue - // is a 3 steps operation: - // 1. finish taking the main queue lock the usual way - // 2. clear the THREAD_BOUND flag - // 3. do a handoff - // - // If an enqueuer executes concurrently, he may do the wakeup the runloop - // way, because he still believes the queue to be thread-bound, but the - // dirty bit will force this codepath to notice the enqueue, and the usual - // lock transfer will do the proper wakeup. - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { - new_state = old_state & ~DISPATCH_QUEUE_DIRTY; - new_state += DISPATCH_QUEUE_WIDTH_INTERVAL; - new_state += DISPATCH_QUEUE_IN_BARRIER; - }); - _dispatch_queue_atomic_flags_clear(dq, DQF_THREAD_BOUND); - _dispatch_lane_barrier_complete(dq, 0, 0); - - // overload the "probably" variable to mean that dispatch_main() or - // similar non-POSIX API was called - // this has to run before the DISPATCH_COCOA_COMPAT below - // See dispatch_main for call to _dispatch_sig_thread on linux. -#ifndef __linux__ - if (_dispatch_program_is_probably_callback_driven) { - pthread_attr_t attr; - pthread_attr_init(&attr); - pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); - pthread_t tid; - int r = pthread_create(&tid, &attr, (void*)_dispatch_sig_thread, NULL); - if (unlikely(r)) { - DISPATCH_CLIENT_CRASH(r, "Unable to create signal thread"); - } - pthread_attr_destroy(&attr); - // this used to be here as a workaround for 6778970 - // but removing it had bincompat fallouts :'( - sleep(1); - } -#endif - -#if DISPATCH_COCOA_COMPAT - dispatch_once_f(&_dispatch_main_q_handle_pred, dq, - _dispatch_runloop_queue_handle_init); - _dispatch_runloop_queue_handle_dispose(dq->_as_dl); -#endif -} - -static void DISPATCH_TSD_DTOR_CC -_dispatch_queue_cleanup(void *ctxt) -{ - if (ctxt == &_dispatch_main_q) { - return _dispatch_queue_cleanup2(); - } - // POSIX defines that destructors are only called if 'ctxt' is non-null - DISPATCH_INTERNAL_CRASH(ctxt, - "Premature thread exit while a dispatch queue is running"); -} - -static void DISPATCH_TSD_DTOR_CC -_dispatch_wlh_cleanup(void *ctxt) -{ - // POSIX defines that destructors are only called if 'ctxt' is non-null - dispatch_queue_t wlh; - wlh = (dispatch_queue_t)((uintptr_t)ctxt & ~DISPATCH_WLH_STORAGE_REF); - _dispatch_queue_release_storage(wlh); -} - -DISPATCH_NORETURN -static void DISPATCH_TSD_DTOR_CC -_dispatch_deferred_items_cleanup(void *ctxt) -{ - // POSIX defines that destructors are only called if 'ctxt' is non-null - DISPATCH_INTERNAL_CRASH(ctxt, - "Premature thread exit with unhandled deferred items"); -} - -DISPATCH_NORETURN -static DISPATCH_TSD_DTOR_CC void -_dispatch_frame_cleanup(void *ctxt) -{ - // POSIX defines that destructors are only called if 'ctxt' is non-null - DISPATCH_INTERNAL_CRASH(ctxt, - "Premature thread exit while a dispatch frame is active"); -} - -DISPATCH_NORETURN -static void DISPATCH_TSD_DTOR_CC -_dispatch_context_cleanup(void *ctxt) -{ - // POSIX defines that destructors are only called if 'ctxt' is non-null - DISPATCH_INTERNAL_CRASH(ctxt, - "Premature thread exit while a dispatch context is set"); -} -#pragma mark - -#pragma mark dispatch_init - -static void -_dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) -{ - _dispatch_fork_becomes_unsafe(); -#if DISPATCH_USE_INTERNAL_WORKQUEUE - size_t i; - for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - _dispatch_root_queue_init_pthread_pool(&_dispatch_root_queues[i], 0, - _dispatch_root_queues[i].dq_priority); - } -#else - int wq_supported = _pthread_workqueue_supported(); - int r = ENOTSUP; - - if (!(wq_supported & WORKQ_FEATURE_MAINTENANCE)) { - DISPATCH_INTERNAL_CRASH(wq_supported, - "QoS Maintenance support required"); - } - -#if DISPATCH_USE_KEVENT_SETUP - struct pthread_workqueue_config cfg = { - .version = PTHREAD_WORKQUEUE_CONFIG_VERSION, - .flags = 0, - .workq_cb = 0, - .kevent_cb = 0, - .workloop_cb = 0, - .queue_serialno_offs = dispatch_queue_offsets.dqo_serialnum, -#if PTHREAD_WORKQUEUE_CONFIG_VERSION >= 2 - .queue_label_offs = dispatch_queue_offsets.dqo_label, -#endif - }; -#endif - -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunreachable-code" - if (unlikely(!_dispatch_kevent_workqueue_enabled)) { -#if DISPATCH_USE_KEVENT_SETUP - cfg.workq_cb = _dispatch_worker_thread2; - r = pthread_workqueue_setup(&cfg, sizeof(cfg)); -#else - r = _pthread_workqueue_init(_dispatch_worker_thread2, - offsetof(struct dispatch_queue_s, dq_serialnum), 0); -#endif // DISPATCH_USE_KEVENT_SETUP -#if DISPATCH_USE_KEVENT_WORKLOOP - } else if (wq_supported & WORKQ_FEATURE_WORKLOOP) { -#if DISPATCH_USE_KEVENT_SETUP - cfg.workq_cb = _dispatch_worker_thread2; - cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; - cfg.workloop_cb = (pthread_workqueue_function_workloop_t) _dispatch_workloop_worker_thread; - r = pthread_workqueue_setup(&cfg, sizeof(cfg)); -#else - r = _pthread_workqueue_init_with_workloop(_dispatch_worker_thread2, - (pthread_workqueue_function_kevent_t) - _dispatch_kevent_worker_thread, - (pthread_workqueue_function_workloop_t) - _dispatch_workloop_worker_thread, - offsetof(struct dispatch_queue_s, dq_serialnum), 0); -#endif // DISPATCH_USE_KEVENT_SETUP -#endif // DISPATCH_USE_KEVENT_WORKLOOP -#if DISPATCH_USE_KEVENT_WORKQUEUE - } else if (wq_supported & WORKQ_FEATURE_KEVENT) { -#if DISPATCH_USE_KEVENT_SETUP - cfg.workq_cb = _dispatch_worker_thread2; - cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; - r = pthread_workqueue_setup(&cfg, sizeof(cfg)); -#else - r = _pthread_workqueue_init_with_kevent(_dispatch_worker_thread2, - (pthread_workqueue_function_kevent_t) - _dispatch_kevent_worker_thread, - offsetof(struct dispatch_queue_s, dq_serialnum), 0); -#endif // DISPATCH_USE_KEVENT_SETUP -#endif - } else { - DISPATCH_INTERNAL_CRASH(wq_supported, "Missing Kevent WORKQ support"); - } -#pragma clang diagnostic pop - - if (r != 0) { - DISPATCH_INTERNAL_CRASH((r << 16) | wq_supported, - "Root queue initialization failed"); - } -#endif // DISPATCH_USE_INTERNAL_WORKQUEUE -} - -DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_root_queues_pred); -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_root_queues_init(void) -{ - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); -} - -DISPATCH_EXPORT DISPATCH_NOTHROW -void -libdispatch_init(void) -{ - dispatch_assert(sizeof(struct dispatch_apply_s) <= - DISPATCH_CONTINUATION_SIZE); - - if (_dispatch_getenv_bool("LIBDISPATCH_STRICT", false)) { - _dispatch_mode |= DISPATCH_MODE_STRICT; - } -#if HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - if (_dispatch_getenv_bool("LIBDISPATCH_NO_FAULTS", false)) { - _dispatch_mode |= DISPATCH_MODE_NO_FAULTS; - } else if (getpid() == 1 || - !os_variant_has_internal_diagnostics("com.apple.libdispatch")) { - _dispatch_mode |= DISPATCH_MODE_NO_FAULTS; - } -#endif // HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - - -#if DISPATCH_DEBUG || DISPATCH_PROFILE -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (getenv("LIBDISPATCH_DISABLE_KEVENT_WQ")) { - _dispatch_kevent_workqueue_enabled = false; - } -#endif -#endif - -#if HAVE_PTHREAD_WORKQUEUE_QOS - dispatch_qos_t qos = _dispatch_qos_from_qos_class(qos_class_main()); - _dispatch_main_q.dq_priority = _dispatch_priority_make(qos, 0); -#if DISPATCH_DEBUG - if (!getenv("LIBDISPATCH_DISABLE_SET_QOS")) { - _dispatch_set_qos_class_enabled = 1; - } -#endif -#endif - -#if DISPATCH_USE_THREAD_LOCAL_STORAGE - _dispatch_thread_key_create(&__dispatch_tsd_key, _libdispatch_tsd_cleanup); -#else - _dispatch_thread_key_create(&dispatch_priority_key, NULL); - _dispatch_thread_key_create(&dispatch_r2k_key, NULL); - _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); - _dispatch_thread_key_create(&dispatch_frame_key, _dispatch_frame_cleanup); - _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup); - _dispatch_thread_key_create(&dispatch_context_key, _dispatch_context_cleanup); - _dispatch_thread_key_create(&dispatch_pthread_root_queue_observer_hooks_key, - NULL); - _dispatch_thread_key_create(&dispatch_basepri_key, NULL); -#if DISPATCH_INTROSPECTION - _dispatch_thread_key_create(&dispatch_introspection_key , NULL); -#elif DISPATCH_PERF_MON - _dispatch_thread_key_create(&dispatch_bcounter_key, NULL); -#endif - _dispatch_thread_key_create(&dispatch_wlh_key, _dispatch_wlh_cleanup); - _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); - _dispatch_thread_key_create(&dispatch_deferred_items_key, - _dispatch_deferred_items_cleanup); -#endif - -#if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 - _dispatch_main_q.do_targetq = _dispatch_get_default_queue(true); -#endif - - _dispatch_queue_set_current(&_dispatch_main_q); - _dispatch_queue_set_bound_thread(&_dispatch_main_q); - -#if DISPATCH_USE_PTHREAD_ATFORK - (void)dispatch_assume_zero(pthread_atfork(dispatch_atfork_prepare, - dispatch_atfork_parent, dispatch_atfork_child)); -#endif - _dispatch_hw_config_init(); - _dispatch_time_init(); - _dispatch_vtable_init(); - _os_object_init(); - _voucher_init(); - _dispatch_introspection_init(); -} - -#if DISPATCH_USE_THREAD_LOCAL_STORAGE -#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) -#include -#endif -#if !defined(_WIN32) -#include -#endif - -#ifdef SYS_gettid -DISPATCH_ALWAYS_INLINE -static inline pid_t -_gettid(void) -{ - return (pid_t)syscall(SYS_gettid); -} -#elif defined(__FreeBSD__) -DISPATCH_ALWAYS_INLINE -static inline pid_t -_gettid(void) -{ - return (pid_t)pthread_getthreadid_np(); -} -#elif defined(_WIN32) -DISPATCH_ALWAYS_INLINE -static inline DWORD -_gettid(void) -{ - return GetCurrentThreadId(); -} -#else -#error "SYS_gettid unavailable on this system" -#endif /* SYS_gettid */ - -#define _tsd_call_cleanup(k, f) do { \ - if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \ - } while (0) - -#ifdef __ANDROID__ -static void (*_dispatch_thread_detach_callback)(void); - -void -_dispatch_install_thread_detach_callback(void (*cb)(void)) -{ - if (os_atomic_xchg(&_dispatch_thread_detach_callback, cb, relaxed)) { - DISPATCH_CLIENT_CRASH(0, "Installing a thread detach callback twice"); - } -} -#endif - -#if defined(_WIN32) -static bool -_dispatch_process_is_exiting(void) -{ - // The goal here is to detect if the current thread is executing cleanup - // code (e.g. FLS destructors) as a result of calling ExitProcess(). Windows - // doesn't provide an official method of getting this information, so we - // take advantage of how ExitProcess() works internally. The first thing - // that it does (according to MSDN) is terminate every other thread in the - // process. Logically, it should not be possible to create more threads - // after this point, and Windows indeed enforces this. Try to create a - // lightweight suspended thread, and if access is denied, assume that this - // is because the process is exiting. - // - // We aren't worried about any race conditions here during process exit. - // Cleanup code is only run on the thread that already called ExitProcess(), - // and every other thread will have been forcibly terminated by the time - // that happens. Additionally, while CreateThread() could conceivably fail - // due to resource exhaustion, the process would already be in a bad state - // if that happens. This is only intended to prevent unwanted cleanup code - // from running, so the worst case is that a thread doesn't clean up after - // itself when the process is about to die anyway. - const size_t stack_size = 1; // As small as possible - HANDLE thread = CreateThread(NULL, stack_size, NULL, NULL, - CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, NULL); - if (thread) { - // Although Microsoft recommends against using TerminateThread, it's - // safe to use it here because we know that the thread is suspended and - // it has not executed any code due to a NULL lpStartAddress. There was - // a bug in Windows Server 2003 and Windows XP where the initial stack - // would not be freed, but libdispatch does not support them anyway. - TerminateThread(thread, 0); - CloseHandle(thread); - return false; - } - return GetLastError() == ERROR_ACCESS_DENIED; -} -#endif // defined(_WIN32) - - -void DISPATCH_TSD_DTOR_CC -_libdispatch_tsd_cleanup(void *ctx) -{ -#if defined(_WIN32) - // On Windows, exiting a process will still call FLS destructors for the - // thread that called ExitProcess(). pthreads-based platforms don't call key - // destructors on exit, so be consistent. - if (_dispatch_process_is_exiting()) { - return; - } -#endif // defined(_WIN32) - - struct dispatch_tsd *tsd = (struct dispatch_tsd*) ctx; - - _tsd_call_cleanup(dispatch_priority_key, NULL); - _tsd_call_cleanup(dispatch_r2k_key, NULL); - - _tsd_call_cleanup(dispatch_queue_key, _dispatch_queue_cleanup); - _tsd_call_cleanup(dispatch_frame_key, _dispatch_frame_cleanup); - _tsd_call_cleanup(dispatch_cache_key, _dispatch_cache_cleanup); - _tsd_call_cleanup(dispatch_context_key, _dispatch_context_cleanup); - _tsd_call_cleanup(dispatch_pthread_root_queue_observer_hooks_key, - NULL); - _tsd_call_cleanup(dispatch_basepri_key, NULL); -#if DISPATCH_INTROSPECTION - _tsd_call_cleanup(dispatch_introspection_key, NULL); -#elif DISPATCH_PERF_MON - _tsd_call_cleanup(dispatch_bcounter_key, NULL); -#endif - _tsd_call_cleanup(dispatch_wlh_key, _dispatch_wlh_cleanup); - _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup); - _tsd_call_cleanup(dispatch_deferred_items_key, - _dispatch_deferred_items_cleanup); -#ifdef __ANDROID__ - if (_dispatch_thread_detach_callback) { - _dispatch_thread_detach_callback(); - } -#endif - tsd->tid = 0; -} - -DISPATCH_NOINLINE -void -libdispatch_tsd_init(void) -{ -#if !defined(_WIN32) - pthread_setspecific(__dispatch_tsd_key, &__dispatch_tsd); -#else - FlsSetValue(__dispatch_tsd_key, &__dispatch_tsd); -#endif // defined(_WIN32) - __dispatch_tsd.tid = _gettid(); -} - -DISPATCH_NOTHROW -void -_dispatch_queue_atfork_child(void) -{ - dispatch_queue_main_t main_q = &_dispatch_main_q; - void *crash = (void *)0x100; - size_t i; - - if (_dispatch_queue_is_thread_bound(main_q)) { - _dispatch_queue_set_bound_thread(main_q); - } - - if (!_dispatch_is_multithreaded_inline()) return; - - main_q->dq_items_head = crash; - main_q->dq_items_tail = crash; - - _dispatch_mgr_q.dq_items_head = crash; - _dispatch_mgr_q.dq_items_tail = crash; - - for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - _dispatch_root_queues[i].dq_items_head = crash; - _dispatch_root_queues[i].dq_items_tail = crash; - } -} - -DISPATCH_NOINLINE -void -_dispatch_fork_becomes_unsafe_slow(void) -{ - uint8_t value = (uint8_t)os_atomic_or(&_dispatch_unsafe_fork, - _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed); - if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) { - DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); - } -} -#endif // TARGET_OS_MAC - -DISPATCH_NOINLINE -void -_dispatch_prohibit_transition_to_multithreaded(bool prohibit) -{ - if (prohibit) { - uint8_t value = (uint8_t)os_atomic_or(&_dispatch_unsafe_fork, - _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); - if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) { - DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded"); - } - } else { - os_atomic_and(&_dispatch_unsafe_fork, - (uint8_t)~_DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); - } -} From 82fefaf0a3c9346c282b41ec47b53f61e927ccd6 Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Tue, 20 Aug 2019 11:47:29 -0700 Subject: [PATCH 204/249] event: implement source muxing on Windows Implement the cases in `_dispatch_unote_register_muxed()` and `_dispatch_unote_unregister_muxed()` for when multiple event sources are open on a handle and we need to combine them. The test suite doesn't hit these codepaths anywhere and we haven't run into issues with Foundation yet, so I added a dispatch_io_muxed test which opens multiple sources on a file/pipe/socket and checks that events fire correctly. Signed-off-by: Rokhini Prabhu --- src/event/event_windows.c | 108 +++++++++++++++++++++++++++++--------- 1 file changed, 82 insertions(+), 26 deletions(-) diff --git a/src/event/event_windows.c b/src/event/event_windows.c index 3576774b2..ce322258a 100644 --- a/src/event/event_windows.c +++ b/src/event/event_windows.c @@ -174,27 +174,62 @@ _dispatch_muxnote_create(dispatch_unote_t du, } static void -_dispatch_muxnote_stop(dispatch_muxnote_t dmn) +_dispatch_muxnote_disarm_events(dispatch_muxnote_t dmn, + enum _dispatch_muxnote_events events) { - if (dmn->dmn_thread) { - // Keep trying to cancel ReadFile() until the thread exits - os_atomic_store(&dmn->dmn_stop, true, relaxed); - SetEvent(dmn->dmn_event); - do { - CancelIoEx((HANDLE)dmn->dmn_ident, /* lpOverlapped */ NULL); - } while (WaitForSingleObject(dmn->dmn_thread, 1) == WAIT_TIMEOUT); - CloseHandle(dmn->dmn_thread); - dmn->dmn_thread = NULL; - } - if (dmn->dmn_threadpool_wait) { - SetThreadpoolWait(dmn->dmn_threadpool_wait, NULL, NULL); - WaitForThreadpoolWaitCallbacks(dmn->dmn_threadpool_wait, - /* fCancelPendingCallbacks */ FALSE); - CloseThreadpoolWait(dmn->dmn_threadpool_wait); - dmn->dmn_threadpool_wait = NULL; - } - if (dmn->dmn_handle_type == DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET) { - WSAEventSelect((SOCKET)dmn->dmn_ident, NULL, 0); + long lNetworkEvents; + dmn->dmn_events &= ~events; + switch (dmn->dmn_handle_type) { + case DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID: + DISPATCH_INTERNAL_CRASH(0, "invalid handle"); + + case DISPATCH_MUXNOTE_HANDLE_TYPE_FILE: + break; + + case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: + if ((events & DISPATCH_MUXNOTE_EVENT_READ) && dmn->dmn_thread) { + // Keep trying to cancel ReadFile() until the thread exits + os_atomic_store(&dmn->dmn_stop, true, relaxed); + SetEvent(dmn->dmn_event); + do { + CancelIoEx((HANDLE)dmn->dmn_ident, /* lpOverlapped */ NULL); + } while (WaitForSingleObject(dmn->dmn_thread, 1) == WAIT_TIMEOUT); + CloseHandle(dmn->dmn_thread); + dmn->dmn_thread = NULL; + } + break; + + case DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET: + lNetworkEvents = dmn->dmn_network_events; + if (events & DISPATCH_MUXNOTE_EVENT_READ) { + lNetworkEvents &= ~FD_READ; + } + if (events & DISPATCH_MUXNOTE_EVENT_WRITE) { + lNetworkEvents &= ~FD_WRITE; + } + if (lNetworkEvents == dmn->dmn_network_events) { + break; + } + int iResult; + if (lNetworkEvents & (FD_READ | FD_WRITE)) { + iResult = WSAEventSelect((SOCKET)dmn->dmn_ident, + (WSAEVENT)dmn->dmn_event, lNetworkEvents); + } else { + lNetworkEvents = 0; + iResult = WSAEventSelect((SOCKET)dmn->dmn_ident, NULL, 0); + } + if (iResult != 0) { + DISPATCH_INTERNAL_CRASH(WSAGetLastError(), "WSAEventSelect"); + } + dmn->dmn_network_events = lNetworkEvents; + if (!lNetworkEvents && dmn->dmn_threadpool_wait) { + SetThreadpoolWait(dmn->dmn_threadpool_wait, NULL, NULL); + WaitForThreadpoolWaitCallbacks(dmn->dmn_threadpool_wait, + /* fCancelPendingCallbacks */ FALSE); + CloseThreadpoolWait(dmn->dmn_threadpool_wait); + dmn->dmn_threadpool_wait = NULL; + } + break; } } @@ -389,8 +424,16 @@ _dispatch_io_trigger(dispatch_muxnote_t dmn) } if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_WRITE) { _dispatch_muxnote_retain(dmn); - DWORD available = + DWORD available; + if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_READ) { + // We can't query a pipe which has a read source open on it + // because the ReadFile() in the background thread might cause + // NtQueryInformationFile() to block + available = 1; + } else { + available = _dispatch_pipe_write_availability((HANDLE)dmn->dmn_ident); + } bSuccess = PostQueuedCompletionStatus(hPort, available, (ULONG_PTR)DISPATCH_PORT_PIPE_HANDLE_WRITE, (LPOVERLAPPED)dmn); @@ -487,8 +530,12 @@ _dispatch_unote_register_muxed(dispatch_unote_t du) dmn = _dispatch_unote_muxnote_find(dmb, du._du->du_ident, du._du->du_filter); if (dmn) { - WIN_PORT_ERROR(); - DISPATCH_INTERNAL_CRASH(0, "muxnote updating is not supported"); + if (events & ~dmn->dmn_events) { + dmn->dmn_events |= events; + if (_dispatch_io_trigger(dmn) == FALSE) { + return false; + } + } } else { dmn = _dispatch_muxnote_create(du, events); if (!dmn) { @@ -551,9 +598,18 @@ _dispatch_unote_unregister_muxed(dispatch_unote_t du) } dul->du_muxnote = NULL; - LIST_REMOVE(dmn, dmn_list); - _dispatch_muxnote_stop(dmn); - _dispatch_muxnote_release(dmn); + enum _dispatch_muxnote_events disarmed = 0; + if (LIST_EMPTY(&dmn->dmn_readers_head)) { + disarmed |= DISPATCH_MUXNOTE_EVENT_READ; + } + if (LIST_EMPTY(&dmn->dmn_writers_head)) { + disarmed |= DISPATCH_MUXNOTE_EVENT_WRITE; + } + _dispatch_muxnote_disarm_events(dmn, disarmed); + if (!dmn->dmn_events) { + LIST_REMOVE(dmn, dmn_list); + _dispatch_muxnote_release(dmn); + } _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); return true; From 56dc31bbd1ca66f33e6b2f02bbb04e45ff631adf Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Thu, 22 Aug 2019 06:57:55 -0700 Subject: [PATCH 205/249] Merge pull request #512 from adierking/cross-the-streams event: implement source muxing on Windows Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 10277a45c..374a48bd9 100644 --- a/PATCHES +++ b/PATCHES @@ -522,3 +522,4 @@ github commits starting with 29bdc2f from [68875cb] APPLIED rdar://54572081 [fc73866] APPLIED rdar://54572081 [3cf1bf3] APPLIED rdar://54572081 +[3da29dd] APPLIED rdar://81276248 From d42204403299b05c097bdb79aa4cd69b9c8a3f47 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Sat, 17 Aug 2019 17:40:29 -0700 Subject: [PATCH 206/249] build: remove unnecessary flag to control the linker The `SWIFT_LINKER_FLAGS` allows the user to specify the linker that they want for Swift and C/C++ have their own flags which let you control flags which should permit control of the C/C+ linker. This removes the unnecessary logic for controlling the linker. This simplification always paves the way for migrating further to CMake 3.15+. Signed-off-by: Rokhini Prabhu --- CMakeLists.txt | 13 ------------- cmake/modules/DispatchUtilities.cmake | 19 ------------------- src/CMakeLists.txt | 16 ---------------- 3 files changed, 48 deletions(-) delete mode 100644 cmake/modules/DispatchUtilities.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 9f3f221e6..ab5d2abde 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,7 +30,6 @@ include(CheckLibraryExists) include(CheckSymbolExists) include(GNUInstallDirs) include(SwiftSupport) -include(DispatchUtilities) set(SWIFT_LIBDIR "lib" CACHE PATH "Library folder name, defined by swift main buildscript") set(INSTALL_LIBDIR "${SWIFT_LIBDIR}" CACHE PATH "Path where the libraries should be installed") @@ -83,18 +82,6 @@ option(ENABLE_DTRACE "enable dtrace support" "") option(ENABLE_TESTING "build libdispatch tests" ON) -option(USE_LLD_LINKER "use the lld linker" FALSE) - -if(NOT USE_LLD_LINKER AND - (CMAKE_SYSTEM_NAME STREQUAL Linux OR - CMAKE_SYSTEM_NAME STREQUAL FreeBSD OR - CMAKE_SYSTEM_NAME STREQUAL Android)) - set(USE_GOLD_LINKER_DEFAULT TRUE) -else() - set(USE_GOLD_LINKER_DEFAULT FALSE) -endif() -option(USE_GOLD_LINKER "use the gold linker" ${USE_GOLD_LINKER_DEFAULT}) - option(ENABLE_THREAD_LOCAL_STORAGE "enable usage of thread local storage via _Thread_local" ON) set(DISPATCH_USE_THREAD_LOCAL_STORAGE ${ENABLE_THREAD_LOCAL_STORAGE}) diff --git a/cmake/modules/DispatchUtilities.cmake b/cmake/modules/DispatchUtilities.cmake deleted file mode 100644 index fea3622ec..000000000 --- a/cmake/modules/DispatchUtilities.cmake +++ /dev/null @@ -1,19 +0,0 @@ - -function(dispatch_set_linker target) - if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) - set(CMAKE_HOST_EXECUTABLE_SUFFIX .exe) - endif() - - if(USE_GOLD_LINKER) - set_property(TARGET ${target} - APPEND_STRING - PROPERTY LINK_FLAGS - -fuse-ld=gold${CMAKE_HOST_EXECUTABLE_SUFFIX}) - endif() - if(USE_LLD_LINKER) - set_property(TARGET ${target} - APPEND_STRING - PROPERTY LINK_FLAGS - -fuse-ld=lld${CMAKE_HOST_EXECUTABLE_SUFFIX}) - endif() -endfunction() diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index f71b68f45..9c4ae4c9c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -116,20 +116,6 @@ if(ENABLE_SWIFT) PROPERTIES POSITION_INDEPENDENT_CODE YES) - if(USE_LLD_LINKER) - if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) - set(use_ld_flag -use-ld=lld.exe) - else() - set(use_ld_flag -use-ld=lld) - endif() - elseif(USE_GOLD_LINKER) - if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) - set(use_ld_flag -use-ld=gold.exe) - else() - set(use_ld_flag -use-ld=gold) - endif() - endif() - add_swift_library(swiftDispatch CFLAGS -fblocks @@ -138,7 +124,6 @@ if(ENABLE_SWIFT) module-maps DispatchStubs LINK_FLAGS - ${use_ld_flag} -lDispatchStubs -L $ -lBlocksRuntime @@ -260,7 +245,6 @@ if(CMAKE_SYSTEM_NAME STREQUAL Darwin) "-Xlinker -dead_strip" "-Xlinker -alias_list -Xlinker ${PROJECT_SOURCE_DIR}/xcodeconfig/libdispatch.aliases") endif() -dispatch_set_linker(dispatch) install(TARGETS dispatch From d8aa3c93da97661e9b2434563a76f61228da76d6 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 26 Aug 2019 13:23:33 -0700 Subject: [PATCH 207/249] Merge pull request #513 from compnerd/linker-be-gone build: remove unnecessary flag to control the linker Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 374a48bd9..f2cb63d50 100644 --- a/PATCHES +++ b/PATCHES @@ -523,3 +523,4 @@ github commits starting with 29bdc2f from [fc73866] APPLIED rdar://54572081 [3cf1bf3] APPLIED rdar://54572081 [3da29dd] APPLIED rdar://81276248 +[90a45ce] APPLIED rdar://81276248 From e5c17ba18621e267c0ed09a9d9424b1ea046903d Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 29 Aug 2019 11:04:16 -0700 Subject: [PATCH 208/249] build: place executable content into the root of the build Adjust the output location of the generated executable content to the root of the build tree. This is needed primarily on Windows where there is no concept of a RPATH, and the current directory is scanned for the dependent libraries. Signed-off-by: Rokhini Prabhu --- CMakeLists.txt | 6 ++++++ src/CMakeLists.txt | 1 + 2 files changed, 7 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index ab5d2abde..0377b0b39 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -19,6 +19,12 @@ set(CMAKE_CXX_STANDARD 11) set(CMAKE_C_VISIBILITY_PRESET hidden) +# NOTE(compnerd) this is a horrible workaround for Windows to ensure that the +# tests can run as there is no rpath equivalent and `PATH` is used to lookup the +# libraries. +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) + set(CMAKE_THREAD_PREFER_PTHREAD TRUE) set(THREADS_PREFER_PTHREAD_FLAG TRUE) find_package(Threads REQUIRED) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 9c4ae4c9c..2809c11c9 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -124,6 +124,7 @@ if(ENABLE_SWIFT) module-maps DispatchStubs LINK_FLAGS + -L $ -lDispatchStubs -L $ -lBlocksRuntime From d8986aad2b7cebc79e86b8905158c4cf88008ac9 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 3 Sep 2019 11:22:41 -0700 Subject: [PATCH 209/249] Merge pull request #515 from compnerd/output-directory build: place executable content into the root of the build Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index f2cb63d50..34e66f9fc 100644 --- a/PATCHES +++ b/PATCHES @@ -524,3 +524,4 @@ github commits starting with 29bdc2f from [3cf1bf3] APPLIED rdar://54572081 [3da29dd] APPLIED rdar://81276248 [90a45ce] APPLIED rdar://81276248 +[37c8c28] APPLIED rdar://81276248 From 7910811868523e4e5818e68311761ea83e44b73c Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 9 Sep 2019 22:27:36 -0700 Subject: [PATCH 210/249] build: sink BlocksRuntime into its own directory This follows the CMake recommendations for the tree layout. It makes it easier to follow the build infrastructure, and prepares the tree for migration to CMake 3.15 to enable Swift support. Signed-off-by: Rokhini Prabhu --- CMakeLists.txt | 47 -------------------------------- src/BlocksRuntime/CMakeLists.txt | 34 +++++++++++++++++++++++ src/CMakeLists.txt | 2 ++ 3 files changed, 36 insertions(+), 47 deletions(-) create mode 100644 src/BlocksRuntime/CMakeLists.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index 0377b0b39..6471c9781 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -116,53 +116,6 @@ endif() option(INSTALL_PRIVATE_HEADERS "installs private headers in the same location as the public ones" OFF) -if(NOT CMAKE_SYSTEM_NAME STREQUAL Darwin) - set(BlocksRuntime_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/src/BlocksRuntime) - - # NOTE(compnerd) use the `BUILD_SHARED_LIBS` variable to determine what type - # of library to build. If it is true, we will generate shared libraries, - # otherwise we will generate static libraries. - add_library(BlocksRuntime - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/data.c - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/runtime.c) - if(CMAKE_SYSTEM_NAME STREQUAL Windows) - target_sources(BlocksRuntime - PRIVATE - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/BlocksRuntime.def) - if(NOT BUILD_SHARED_LIBS) - target_compile_definitions(BlocksRuntime - PRIVATE - BlocksRuntime_STATIC) - endif() - endif() - set_target_properties(BlocksRuntime - PROPERTIES - POSITION_INDEPENDENT_CODE TRUE) - if(HAVE_OBJC AND CMAKE_DL_LIBS) - target_link_libraries(BlocksRuntime - PUBLIC - ${CMAKE_DL_LIBS}) - endif() - - add_library(BlocksRuntime::BlocksRuntime ALIAS BlocksRuntime) - - install(FILES - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/Block.h - DESTINATION - "${INSTALL_BLOCK_HEADERS_DIR}") - if(INSTALL_PRIVATE_HEADERS) - install(FILES - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/Block_private.h - DESTINATION - "${INSTALL_BLOCK_HEADERS_DIR}") - endif() - install(TARGETS - BlocksRuntime - ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} - LIBRARY DESTINATION ${INSTALL_TARGET_DIR} - RUNTIME DESTINATION bin) -endif() - check_symbol_exists(__GNU_LIBRARY__ "features.h" _GNU_SOURCE) if(_GNU_SOURCE) set(CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} -D_GNU_SOURCE) diff --git a/src/BlocksRuntime/CMakeLists.txt b/src/BlocksRuntime/CMakeLists.txt new file mode 100644 index 000000000..a5388d6eb --- /dev/null +++ b/src/BlocksRuntime/CMakeLists.txt @@ -0,0 +1,34 @@ + +add_library(BlocksRuntime + data.c + runtime.c) +if(CMAKE_SYSTEM_NAME STREQUAL Windows) + target_sources(BlocksRuntime PRIVATE + BlocksRuntime.def) + + if(NOT BUILD_SHARED_LIBS) + target_compile_definitions(BlocksRuntime PRIVATE + BlocksRuntime_STATIC) + endif() +endif() + +set_target_properties(BlocksRuntime PROPERTIES + POSITION_INDEPENDENT_CODE TRUE + INTERFACE_INCLUDE_DIRECTORIES ${CMAKE_CURRENT_SOURCE_DIR}) +if(HAVE_OBJC AND CMAKE_DL_LIBS) + target_link_libraries(BlocksRuntime PUBLIC + ${CMAKE_DL_LIBS}) +endif() + +add_library(BlocksRuntime::BlocksRuntime ALIAS BlocksRuntime) + +install(FILES Block.h + DESTINATION ${INSTALL_BLOCK_HEADERS_DIR}) +if(INSTALL_PRIVATE_HEADERS) + install(FILES Block_private.h + DESTINATION ${INSTALL_BLOCK_HEADERS_DIR}) +endif() +install(TARGETS BlocksRuntime + ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} + LIBRARY DESTINATION ${INSTALL_TARGET_DIR} + RUNTIME DESTINATION bin) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 2809c11c9..58419d430 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -3,6 +3,8 @@ include(CheckCCompilerFlag) include(SwiftSupport) include(DTrace) +add_subdirectory(BlocksRuntime) + add_library(dispatch allocator.c apply.c From f47dc3b41f09530768bc5862ade1dd1caf1892a6 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 10 Sep 2019 09:59:48 -0700 Subject: [PATCH 211/249] Merge pull request #517 from compnerd/blocks build: sink BlocksRuntime into its own directory Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 34e66f9fc..825111e64 100644 --- a/PATCHES +++ b/PATCHES @@ -525,3 +525,4 @@ github commits starting with 29bdc2f from [3da29dd] APPLIED rdar://81276248 [90a45ce] APPLIED rdar://81276248 [37c8c28] APPLIED rdar://81276248 +[c023edd] APPLIED rdar://81276248 From 19fefedb5dc74b9c04b84703587bce80ea06167f Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Wed, 11 Sep 2019 19:23:06 -0700 Subject: [PATCH 212/249] build: migrate to standard mechanism for testing Use `CTest` module rather than the custom `ENABLE_TESTING` option. The inclusion of `CTest` will default `BUILD_TESTING` to true, which preserves the existing behaviour. Signed-off-by: Rokhini Prabhu --- CMakeLists.txt | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6471c9781..46e3aa5ee 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,7 +6,6 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules") project(dispatch VERSION 1.3 LANGUAGES C CXX) -enable_testing() if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") include(ClangClCompileRules) @@ -36,6 +35,7 @@ include(CheckLibraryExists) include(CheckSymbolExists) include(GNUInstallDirs) include(SwiftSupport) +include(CTest) set(SWIFT_LIBDIR "lib" CACHE PATH "Library folder name, defined by swift main buildscript") set(INSTALL_LIBDIR "${SWIFT_LIBDIR}" CACHE PATH "Path where the libraries should be installed") @@ -86,8 +86,6 @@ option(DISPATCH_ENABLE_ASSERTS "enable debug assertions" FALSE) option(ENABLE_DTRACE "enable dtrace support" "") -option(ENABLE_TESTING "build libdispatch tests" ON) - option(ENABLE_THREAD_LOCAL_STORAGE "enable usage of thread local storage via _Thread_local" ON) set(DISPATCH_USE_THREAD_LOCAL_STORAGE ${ENABLE_THREAD_LOCAL_STORAGE}) @@ -286,7 +284,7 @@ add_subdirectory(man) add_subdirectory(os) add_subdirectory(private) add_subdirectory(src) -if(ENABLE_TESTING) +if(BUILD_TESTING) add_subdirectory(tests) endif() From b6315ff218ba0a3ad1434ec543dfca6b775df6f6 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 17 Sep 2019 09:25:54 -0700 Subject: [PATCH 213/249] Merge pull request #518 from compnerd/enable-testing build: migrate to standard mechanism for testing Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 825111e64..7e394fd6c 100644 --- a/PATCHES +++ b/PATCHES @@ -526,3 +526,4 @@ github commits starting with 29bdc2f from [90a45ce] APPLIED rdar://81276248 [37c8c28] APPLIED rdar://81276248 [c023edd] APPLIED rdar://81276248 +[ab8a151] APPLIED rdar://81276248 From 5bc081a85566b6ebb1d4bc768fb65b452aa77fbc Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Sat, 21 Sep 2019 18:48:10 -0700 Subject: [PATCH 214/249] hw_config: fix Windows CPU detection The Windows CPU detection code is not reporting the intended values. `physical_cpus` should report the number of physical cores instead of the number of packages, and `active_cpus` should report the number of available logical cores instead of the number of available physical cores. This fixes an issue where the `dispatch_apply` test times out on hyperthreaded systems. Signed-off-by: Rokhini Prabhu --- src/shims/hw_config.h | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index 89b7f8f61..8ab79f3ed 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -124,8 +124,7 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) PSYSTEM_LOGICAL_PROCESSOR_INFORMATION slpiInfo = NULL; PSYSTEM_LOGICAL_PROCESSOR_INFORMATION slpiCurrent = NULL; DWORD dwProcessorLogicalCount = 0; - DWORD dwProcessorPackageCount = 0; - DWORD dwProcessorCoreCount = 0; + DWORD dwProcessorPhysicalCount = 0; DWORD dwSize = 0; while (true) { @@ -154,12 +153,10 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) slpiCurrent++, dwSize -= sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION)) { switch (slpiCurrent->Relationship) { case RelationProcessorCore: - ++dwProcessorCoreCount; + ++dwProcessorPhysicalCount; dwProcessorLogicalCount += __popcnt64(slpiCurrent->ProcessorMask); break; case RelationProcessorPackage: - ++dwProcessorPackageCount; - break; case RelationNumaNode: case RelationCache: case RelationGroup: @@ -172,11 +169,10 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) switch (c) { case _dispatch_hw_config_logical_cpus: + case _dispatch_hw_config_active_cpus: return dwProcessorLogicalCount; case _dispatch_hw_config_physical_cpus: - return dwProcessorPackageCount; - case _dispatch_hw_config_active_cpus: - return dwProcessorCoreCount; + return dwProcessorPhysicalCount; } #else const char *name = NULL; From 00f88eb671d2f069e1fae55016c59fda66435428 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 23 Sep 2019 10:11:37 -0700 Subject: [PATCH 215/249] Merge pull request #521 from adierking/learn-to-count hw_config: fix Windows CPU detection Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 7e394fd6c..ed21f3a20 100644 --- a/PATCHES +++ b/PATCHES @@ -527,3 +527,4 @@ github commits starting with 29bdc2f from [37c8c28] APPLIED rdar://81276248 [c023edd] APPLIED rdar://81276248 [ab8a151] APPLIED rdar://81276248 +[c66cb25] APPLIED rdar://81276248 From a44fc5e4efa5e3a814664673dee010d50ed169e7 Mon Sep 17 00:00:00 2001 From: Aaron Dierking Date: Sat, 21 Sep 2019 19:09:57 -0700 Subject: [PATCH 216/249] queue: use the default stack size on Windows The Windows thread pool implementation is calling `_beginthreadex` with a stack size of 64 KB. This seems like a misunderstanding to me. In the pthread code, `_dispatch_mgr_root_queue_init` does call `pthread_attr_setstacksize` with a 64 KB size. However, this only applies to the pthread root queue manager, and pthread root queues are only supported on Apple platforms anyway. We can confirm that using the OS default stack size is the intended semantic by looking at the `dispatch_select` test, which expects to be able to stack-allocate a 500 KB buffer in an event handler. This test is failing on Windows because there is not enough stack space. We've also run into various related issues in Swift which only happen on Windows, and I suspect that this is just because the stack size isn't so small on other platforms. Signed-off-by: Rokhini Prabhu --- src/queue.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/queue.c b/src/queue.c index 55c4873b1..2536093e2 100644 --- a/src/queue.c +++ b/src/queue.c @@ -6266,13 +6266,8 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) #endif do { _dispatch_retain(dq); // released in _dispatch_worker_thread -#if DISPATCH_DEBUG - unsigned dwStackSize = 0; -#else - unsigned dwStackSize = 64 * 1024; -#endif uintptr_t hThread = 0; - while (!(hThread = _beginthreadex(NULL, dwStackSize, _dispatch_worker_thread_thunk, dq, STACK_SIZE_PARAM_IS_A_RESERVATION, NULL))) { + while (!(hThread = _beginthreadex(NULL, /* stack_size */ 0, _dispatch_worker_thread_thunk, dq, STACK_SIZE_PARAM_IS_A_RESERVATION, NULL))) { if (errno != EAGAIN) { (void)dispatch_assume(hThread); } From 24f21db3033a1425c46d940c06e055d8e0794438 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 23 Sep 2019 10:11:50 -0700 Subject: [PATCH 217/249] Merge pull request #522 from adierking/stack-size queue: use the default stack size on Windows Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index ed21f3a20..646517be7 100644 --- a/PATCHES +++ b/PATCHES @@ -528,3 +528,4 @@ github commits starting with 29bdc2f from [c023edd] APPLIED rdar://81276248 [ab8a151] APPLIED rdar://81276248 [c66cb25] APPLIED rdar://81276248 +[289e552] APPLIED rdar://81276248 From cd4e47924ace078825c4a02042e0c5d3f68d2bc3 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 26 Aug 2019 15:42:09 -0700 Subject: [PATCH 218/249] semaphore: annotate fallthrough (NFC) Address `-Werror,-Wimplicit-fallthrough` as identified by clang. This fixes a couple of build warnings which are treated as errors. NFC. Signed-off-by: Rokhini Prabhu --- dispatch/base.h | 27 +++++++++++++++++++++++++++ src/event/event_epoll.c | 1 + src/io.c | 6 +++--- src/semaphore.c | 8 ++++---- src/shims/lock.c | 4 ++-- src/transform.c | 3 +++ 6 files changed, 40 insertions(+), 9 deletions(-) diff --git a/dispatch/base.h b/dispatch/base.h index e6c71b0e0..0a2370bd8 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -127,6 +127,33 @@ #define DISPATCH_UNAVAILABLE_MSG(msg) #endif +#if defined(__cplusplus) +# if __cplusplus >= 201703L +# define DISPATCH_FALLTHROUGH [[fallthrough]] +# elif __cplusplus >= 201103L +# if defined(__clang__) +# define DISPATCH_FALLTHROUGH [[clang::fallthrough]] +# elif defined(__GNUC__) && __GNUC__ >= 7 +# define DISPATCH_FALLTHROUGH [[gnu::fallthrough]] +# else +# define DISPATCH_FALLTHROUGH +# endif +# else +# define DISPATCH_FALLTHROUGH +# endif +#elif defined(__GNUC__) && __GNUC__ >= 7 +# define DISPATCH_FALLTHROUGH __attribute__((__fallthrough__)) +#elif defined(__clang__) +# if __has_attribute(fallthrough) && __clang_major__ >= 5 +# define DISPATCH_FALLTHROUGH __attribute__((__fallthrough__)) +# else +# define DISPATCH_FALLTHROUGH +# endif +#else +# define DISPATCH_FALLTHROUGH +#endif + + #ifdef __linux__ #define DISPATCH_LINUX_UNAVAILABLE() \ DISPATCH_UNAVAILABLE_MSG( \ diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index 8210b923b..f31d13ee0 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -174,6 +174,7 @@ _dispatch_muxnote_create(dispatch_unote_t du, uint32_t events) } case EVFILT_WRITE: filter = EVFILT_READ; + DISPATCH_FALLTHROUGH; case EVFILT_READ: if (fstat(fd, &sb) < 0) { return NULL; diff --git a/src/io.c b/src/io.c index 73295df29..99a9ba6cb 100644 --- a/src/io.c +++ b/src/io.c @@ -2085,12 +2085,12 @@ _dispatch_stream_handler(void *ctx) switch (result) { case DISPATCH_OP_DELIVER: flags = DOP_DEFAULT; - // Fall through + DISPATCH_FALLTHROUGH; case DISPATCH_OP_DELIVER_AND_COMPLETE: flags = (flags != DOP_DEFAULT) ? DOP_DELIVER | DOP_NO_EMPTY : DOP_DEFAULT; _dispatch_operation_deliver_data(op, flags); - // Fall through + DISPATCH_FALLTHROUGH; case DISPATCH_OP_COMPLETE: if (flags != DOP_DEFAULT) { _dispatch_stream_complete_operation(stream, op); @@ -2102,7 +2102,7 @@ _dispatch_stream_handler(void *ctx) break; case DISPATCH_OP_COMPLETE_RESUME: _dispatch_stream_complete_operation(stream, op); - // Fall through + DISPATCH_FALLTHROUGH; case DISPATCH_OP_RESUME: if (_dispatch_stream_operation_avail(stream)) { stream->source_running = true; diff --git a/src/semaphore.c b/src/semaphore.c index 1d164f17f..987333740 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -116,8 +116,8 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, if (!_dispatch_sema4_timedwait(&dsema->dsema_sema, timeout)) { break; } - // Fall through and try to undo what the fast path did to - // dsema->dsema_value + // Try to undo what the fast path did to dsema->dsema_value + DISPATCH_FALLTHROUGH; case DISPATCH_TIME_NOW: orig = dsema->dsema_value; while (orig < 0) { @@ -126,8 +126,8 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, return _DSEMA4_TIMEOUT(); } } - // Another thread called semaphore_signal(). - // Fall through and drain the wakeup. + // Another thread called semaphore_signal(). Drain the wakeup. + DISPATCH_FALLTHROUGH; case DISPATCH_TIME_FOREVER: _dispatch_sema4_wait(&dsema->dsema_sema); break; diff --git a/src/shims/lock.c b/src/shims/lock.c index e96408981..2f91d8d1d 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -338,7 +338,7 @@ _dlock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, uint32_t flags) if (timeout == 0) { continue; } - /* FALLTHROUGH */ + DISPATCH_FALLTHROUGH; case ETIMEDOUT: case EFAULT: return -rc; @@ -427,7 +427,7 @@ _futex_blocking_op(uint32_t *uaddr, int futex_op, uint32_t val, if (timeout == 0) { continue; } - /* FALLTHROUGH */ + DISPATCH_FALLTHROUGH; case ETIMEDOUT: case EFAULT: case EWOULDBLOCK: diff --git a/src/transform.c b/src/transform.c index 39147fa7a..b1b84aafd 100644 --- a/src/transform.c +++ b/src/transform.c @@ -781,11 +781,14 @@ _dispatch_transform_to_base32_with_table(dispatch_data_t data, const unsigned ch case 1: *ptr++ = '='; // c *ptr++ = '='; // d + DISPATCH_FALLTHROUGH; case 2: *ptr++ = '='; // e + DISPATCH_FALLTHROUGH; case 3: *ptr++ = '='; // f *ptr++ = '='; // g + DISPATCH_FALLTHROUGH; case 4: *ptr++ = '='; // h break; From 37a319f8cfa0e0cbd04980234a7de94d4a9e13fc Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 24 Sep 2019 10:22:33 -0700 Subject: [PATCH 219/249] Merge pull request #514 from compnerd/implicit-fallthrough semaphore: annotate fallthrough (NFC) Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 646517be7..dc4e1eae1 100644 --- a/PATCHES +++ b/PATCHES @@ -529,3 +529,4 @@ github commits starting with 29bdc2f from [ab8a151] APPLIED rdar://81276248 [c66cb25] APPLIED rdar://81276248 [289e552] APPLIED rdar://81276248 +[afd6b6d] APPLIED rdar://81276248 From a0de861a44bb8adc865643f633adb2c8ff32882e Mon Sep 17 00:00:00 2001 From: Mishal Shah Date: Thu, 26 Sep 2019 11:57:40 -0700 Subject: [PATCH 220/249] Revert "Revert "build: port to new Swift support"" Signed-off-by: Rokhini Prabhu --- CMakeLists.txt | 138 +++---- cmake/modules/DispatchCompilerWarnings.cmake | 138 ++++--- cmake/modules/SwiftSupport.cmake | 217 ---------- src/CMakeLists.txt | 394 ++++++------------- src/swift/CMakeLists.txt | 56 +++ 5 files changed, 317 insertions(+), 626 deletions(-) create mode 100644 src/swift/CMakeLists.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index 46e3aa5ee..10a0e4623 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,22 +1,38 @@ -cmake_minimum_required(VERSION 3.4.3) +cmake_minimum_required(VERSION 3.15.1) -list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules") +list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules) + +# NOTE(compnerd) enable CMP0091 - select MSVC runtime based on +# CMAKE_MSVC_RUNTIME_LIBRARY. Requires CMake 3.15 or newer. +if(POLICY CMP0091) + cmake_policy(SET CMP0091 NEW) +endif() project(dispatch - VERSION 1.3 - LANGUAGES C CXX) + VERSION 1.3 + LANGUAGES C CXX) if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") include(ClangClCompileRules) endif() +if(CMAKE_SYSTEM_NAME STREQUAL Windows) + include(DispatchWindowsSupport) + dispatch_windows_arch_spelling(${CMAKE_SYSTEM_PROCESSOR} DISPATCH_MSVC_ARCH) + dispatch_windows_include_for_arch(${DISPATCH_MSVC_ARCH} DISPATCH_INCLUDES) + include_directories(BEFORE SYSTEM ${DISPATCH_INCLUDES}) + dispatch_windows_lib_for_arch(${CMAKE_SYSTEM_PROCESSOR} DISPATCH_LIBDIR) + link_directories(${DISPATCH_LIBDIR}) +endif() + set(CMAKE_C_STANDARD 11) set(CMAKE_C_STANDARD_REQUIRED YES) set(CMAKE_CXX_STANDARD 11) set(CMAKE_C_VISIBILITY_PRESET hidden) +set(CMAKE_C_VISIBILITY_INLINES_HIDDEN YES) # NOTE(compnerd) this is a horrible workaround for Windows to ensure that the # tests can run as there is no rpath equivalent and `PATH` is used to lookup the @@ -28,74 +44,37 @@ set(CMAKE_THREAD_PREFER_PTHREAD TRUE) set(THREADS_PREFER_PTHREAD_FLAG TRUE) find_package(Threads REQUIRED) +include(CheckCCompilerFlag) include(CheckCSourceCompiles) include(CheckFunctionExists) include(CheckIncludeFiles) include(CheckLibraryExists) include(CheckSymbolExists) include(GNUInstallDirs) -include(SwiftSupport) include(CTest) -set(SWIFT_LIBDIR "lib" CACHE PATH "Library folder name, defined by swift main buildscript") -set(INSTALL_LIBDIR "${SWIFT_LIBDIR}" CACHE PATH "Path where the libraries should be installed") - include(DispatchAppleOptions) include(DispatchSanitization) - include(DispatchCompilerWarnings) -dispatch_common_warnings() - -option(ENABLE_DISPATCH_INIT_CONSTRUCTOR "enable libdispatch_init as a constructor" ON) -set(USE_LIBDISPATCH_INIT_CONSTRUCTOR ${ENABLE_DISPATCH_INIT_CONSTRUCTOR}) +include(DTrace) +include(SwiftSupport) # NOTE(abdulras) this is the CMake supported way to control whether we generate # shared or static libraries. This impacts the behaviour of `add_library` in # what type of library it generates. option(BUILD_SHARED_LIBS "build shared libraries" ON) -option(ENABLE_SWIFT "enable libdispatch swift overlay" OFF) -if(ENABLE_SWIFT) - if(NOT CMAKE_SWIFT_COMPILER) - message(FATAL_ERROR "CMAKE_SWIFT_COMPILER must be defined to enable swift") - endif() - - string(TOLOWER ${CMAKE_SYSTEM_NAME} swift_os) - get_swift_host_arch(swift_arch) - - if(BUILD_SHARED_LIBS) - set(swift_dir swift) - else() - set(swift_dir swift_static) - endif() - - set(INSTALL_TARGET_DIR "${INSTALL_LIBDIR}/${swift_dir}/${swift_os}" CACHE PATH "Path where the libraries will be installed") - set(INSTALL_DISPATCH_HEADERS_DIR "${INSTALL_LIBDIR}/${swift_dir}/dispatch" CACHE PATH "Path where the headers will be installed for libdispatch") - set(INSTALL_BLOCK_HEADERS_DIR "${INSTALL_LIBDIR}/${swift_dir}/Block" CACHE PATH "Path where the headers will be installed for the blocks runtime") - set(INSTALL_OS_HEADERS_DIR "${INSTALL_LIBDIR}/${swift_dir}/os" CACHE PATH "Path where the os/ headers will be installed") -endif() - -if(NOT ENABLE_SWIFT) - set(INSTALL_TARGET_DIR "${INSTALL_LIBDIR}" CACHE PATH "Path where the libraries will be installed") - set(INSTALL_DISPATCH_HEADERS_DIR "include/dispatch" CACHE PATH "Path where the headers will be installed") - set(INSTALL_BLOCK_HEADERS_DIR "include" CACHE PATH "Path where the headers will be installed for the blocks runtime") - set(INSTALL_OS_HEADERS_DIR "include/os" CACHE PATH "Path where the headers will be installed") -endif() - option(DISPATCH_ENABLE_ASSERTS "enable debug assertions" FALSE) -option(ENABLE_DTRACE "enable dtrace support" "") +option(ENABLE_DISPATCH_INIT_CONSTRUCTOR "enable libdispatch_init as a constructor" ON) +set(USE_LIBDISPATCH_INIT_CONSTRUCTOR ${ENABLE_DISPATCH_INIT_CONSTRUCTOR}) -option(ENABLE_THREAD_LOCAL_STORAGE "enable usage of thread local storage via _Thread_local" ON) -set(DISPATCH_USE_THREAD_LOCAL_STORAGE ${ENABLE_THREAD_LOCAL_STORAGE}) +option(ENABLE_DTRACE "enable dtrace support" "") -if(CMAKE_SYSTEM_NAME STREQUAL Linux OR - CMAKE_SYSTEM_NAME STREQUAL Android OR - CMAKE_SYSTEM_NAME STREQUAL FreeBSD OR - CMAKE_SYSTEM_NAME STREQUAL Windows) - set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT ON) -else() +if(CMAKE_SYSTEM_NAME STREQUAL Darwin OR CMAKE_SYSTEM_NAME STREQUAL FreeBSD) set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT OFF) +else() + set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT ON) endif() option(ENABLE_INTERNAL_PTHREAD_WORKQUEUES "use libdispatch's own implementation of pthread workqueues" ${ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT}) if(ENABLE_INTERNAL_PTHREAD_WORKQUEUES) @@ -114,6 +93,15 @@ endif() option(INSTALL_PRIVATE_HEADERS "installs private headers in the same location as the public ones" OFF) +option(ENABLE_SWIFT "enable libdispatch swift overlay" OFF) +if(ENABLE_SWIFT) + enable_language(Swift) +endif() + +option(ENABLE_THREAD_LOCAL_STORAGE "enable usage of thread local storage via _Thread_local" ON) +set(DISPATCH_USE_THREAD_LOCAL_STORAGE ${ENABLE_THREAD_LOCAL_STORAGE}) + + check_symbol_exists(__GNU_LIBRARY__ "features.h" _GNU_SOURCE) if(_GNU_SOURCE) set(CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} -D_GNU_SOURCE) @@ -144,8 +132,6 @@ check_function_exists(strlcpy HAVE_STRLCPY) check_function_exists(sysconf HAVE_SYSCONF) check_function_exists(arc4random HAVE_ARC4RANDOM) -find_package(Threads REQUIRED) - check_include_files("TargetConditionals.h" HAVE_TARGETCONDITIONALS_H) check_include_files("dlfcn.h" HAVE_DLFCN_H) check_include_files("fcntl.h" HAVE_FCNTL_H) @@ -181,7 +167,7 @@ else() set(USE_MACH_SEM 0) endif() if(CMAKE_SYSTEM_NAME STREQUAL Windows) - add_definitions(-DUSE_WIN32_SEM) + add_compile_definitions($<$,$>:USE_WIN32_SEM>) endif() check_library_exists(pthread sem_init "" USE_POSIX_SEM) # NOTE: android has not always provided a libpthread, but uses the pthreads API @@ -211,7 +197,7 @@ check_symbol_exists(VQ_FREE_SPACE_CHANGE "sys/mount.h" HAVE_DECL_VQ_FREE_SPACE_C check_symbol_exists(strlcpy "string.h" HAVE_STRLCPY) check_symbol_exists(program_invocation_name "errno.h" HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME) if (HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME) - add_definitions(-D_GNU_SOURCE=1) + add_compile_definitions($<$,$>:_GNU_SOURCE=1>) endif() check_symbol_exists(__printflike "bsd/sys/cdefs.h" HAVE_PRINTFLIKE) @@ -220,24 +206,20 @@ if(CMAKE_SYSTEM_NAME STREQUAL Android) endif() if(CMAKE_SYSTEM_NAME STREQUAL FreeBSD) - add_definitions(-D_WITH_DPRINTF) + add_compile_definitions($<$,$>:_WITH_DPRINTF>) endif() -if(ENABLE_DTRACE STREQUAL "") +if(ENABLE_DTRACE) find_program(dtrace_EXECUTABLE dtrace) - if(dtrace_EXECUTABLE) - add_definitions(-DDISPATCH_USE_DTRACE=1) - else() - add_definitions(-DDISPATCH_USE_DTRACE=0) - endif() -elseif(ENABLE_DTRACE) - find_program(dtrace_EXECUTABLE dtrace) - if(NOT dtrace_EXECUTABLE) + if(NOT dtrace_EXECUTABLE AND NOT ENABLE_DTRACE STREQUAL "") message(FATAL_ERROR "dtrace not found but explicitly requested") endif() - add_definitions(-DDISPATCH_USE_DTRACE=1) +endif() + +if(dtrace_EXECUTABLE) + add_compile_definitions($<$,$>:DISPATCH_USE_DTRACE=1>) else() - add_definitions(-DDISPATCH_USE_DTRACE=0) + add_compile_definitions($<$,$>:DISPATCH_USE_DTRACE=0>) endif() find_program(leaks_EXECUTABLE leaks) @@ -245,6 +227,7 @@ if(leaks_EXECUTABLE) set(HAVE_LEAKS TRUE) endif() + if(CMAKE_SYSTEM_NAME STREQUAL Darwin) add_custom_command(OUTPUT "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" @@ -266,19 +249,25 @@ add_custom_target(module-maps ALL DEPENDS "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") + configure_file("${PROJECT_SOURCE_DIR}/cmake/config.h.in" "${PROJECT_BINARY_DIR}/config/config_ac.h") -add_definitions(-DHAVE_CONFIG_H) +add_compile_definitions($<$,$>:HAVE_CONFIG_H>) -if(CMAKE_SYSTEM_NAME STREQUAL Windows) - include(DispatchWindowsSupport) - dispatch_windows_arch_spelling(${CMAKE_SYSTEM_PROCESSOR} DISPATCH_MSVC_ARCH) - dispatch_windows_include_for_arch(${DISPATCH_MSVC_ARCH} DISPATCH_INCLUDES) - include_directories(BEFORE SYSTEM ${DISPATCH_INCLUDES}) - dispatch_windows_lib_for_arch(${CMAKE_SYSTEM_PROCESSOR} DISPATCH_LIBDIR) - link_directories(${DISPATCH_LIBDIR}) + +if(ENABLE_SWIFT) + set(INSTALL_TARGET_DIR "${CMAKE_INSTALL_LIBDIR}/swift$<$>:_static>/$" CACHE PATH "Path where the libraries will be installed") + set(INSTALL_DISPATCH_HEADERS_DIR "${CMAKE_INSTALL_LIBDIR}/swift$<$>:_static>/dispatch" CACHE PATH "Path where the headers will be installed for libdispatch") + set(INSTALL_BLOCK_HEADERS_DIR "${CMAKE_INSTALL_LIBDIR}/swift$<$>:_static>/Block" CACHE PATH "Path where the headers will be installed for the blocks runtime") + set(INSTALL_OS_HEADERS_DIR "${CMAKE_INSTALL_LIBDIR}/swift$<$>:_static>/os" CACHE PATH "Path where the os/ headers will be installed") +else() + set(INSTALL_TARGET_DIR "${CMAKE_INSTALL_LIBDIR}" CACHE PATH "Path where the libraries will be installed") + set(INSTALL_DISPATCH_HEADERS_DIR "include/dispatch" CACHE PATH "Path where the headers will be installed") + set(INSTALL_BLOCK_HEADERS_DIR "include" CACHE PATH "Path where the headers will be installed for the blocks runtime") + set(INSTALL_OS_HEADERS_DIR "include/os" CACHE PATH "Path where the headers will be installed") endif() + add_subdirectory(dispatch) add_subdirectory(man) add_subdirectory(os) @@ -287,4 +276,3 @@ add_subdirectory(src) if(BUILD_TESTING) add_subdirectory(tests) endif() - diff --git a/cmake/modules/DispatchCompilerWarnings.cmake b/cmake/modules/DispatchCompilerWarnings.cmake index d568c721a..6ef9d3164 100644 --- a/cmake/modules/DispatchCompilerWarnings.cmake +++ b/cmake/modules/DispatchCompilerWarnings.cmake @@ -1,79 +1,75 @@ if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") # TODO: someone needs to provide the msvc equivalent warning flags - macro(dispatch_common_warnings) - endmacro() else() - macro(dispatch_common_warnings) - add_compile_options(-Werror) - add_compile_options(-Wall) - add_compile_options(-Wextra) + add_compile_options($<$,$>:-Werror>) + add_compile_options($<$,$>:-Wall>) + add_compile_options($<$,$>:-Wextra>) - add_compile_options(-Warray-bounds-pointer-arithmetic) - add_compile_options(-Wassign-enum) - add_compile_options(-Watomic-properties) - add_compile_options(-Wcomma) - add_compile_options(-Wconditional-uninitialized) - add_compile_options(-Wconversion) - add_compile_options(-Wcovered-switch-default) - add_compile_options(-Wdate-time) - add_compile_options(-Wdeprecated) - add_compile_options(-Wdocumentation) - add_compile_options(-Wdouble-promotion) - add_compile_options(-Wduplicate-enum) - add_compile_options(-Wexpansion-to-defined) - add_compile_options(-Wfloat-equal) - add_compile_options(-Widiomatic-parentheses) - add_compile_options(-Winfinite-recursion) - add_compile_options(-Wmissing-prototypes) - add_compile_options(-Wnewline-eof) - add_compile_options(-Wnullable-to-nonnull-conversion) - add_compile_options(-Wobjc-interface-ivars) - add_compile_options(-Wover-aligned) - add_compile_options(-Wpacked) - add_compile_options(-Wpointer-arith) - add_compile_options(-Wselector) - add_compile_options(-Wshadow) - add_compile_options(-Wshorten-64-to-32) - add_compile_options(-Wsign-conversion) - add_compile_options(-Wstatic-in-inline) - add_compile_options(-Wsuper-class-method-mismatch) - add_compile_options(-Wswitch) - add_compile_options(-Wunguarded-availability) - add_compile_options(-Wunreachable-code) - add_compile_options(-Wunused) + add_compile_options($<$,$>:-Warray-bounds-pointer-arithmetic>) + add_compile_options($<$,$>:-Wassign-enum>) + add_compile_options($<$,$>:-Watomic-properties>) + add_compile_options($<$,$>:-Wcomma>) + add_compile_options($<$,$>:-Wconditional-uninitialized>) + add_compile_options($<$,$>:-Wconversion>) + add_compile_options($<$,$>:-Wcovered-switch-default>) + add_compile_options($<$,$>:-Wdate-time>) + add_compile_options($<$,$>:-Wdeprecated>) + add_compile_options($<$,$>:-Wdocumentation>) + add_compile_options($<$,$>:-Wdouble-promotion>) + add_compile_options($<$,$>:-Wduplicate-enum>) + add_compile_options($<$,$>:-Wexpansion-to-defined>) + add_compile_options($<$,$>:-Wfloat-equal>) + add_compile_options($<$,$>:-Widiomatic-parentheses>) + add_compile_options($<$,$>:-Winfinite-recursion>) + add_compile_options($<$,$>:-Wmissing-prototypes>) + add_compile_options($<$,$>:-Wnewline-eof>) + add_compile_options($<$,$>:-Wnullable-to-nonnull-conversion>) + add_compile_options($<$,$>:-Wobjc-interface-ivars>) + add_compile_options($<$,$>:-Wover-aligned>) + add_compile_options($<$,$>:-Wpacked>) + add_compile_options($<$,$>:-Wpointer-arith>) + add_compile_options($<$,$>:-Wselector>) + add_compile_options($<$,$>:-Wshadow>) + add_compile_options($<$,$>:-Wshorten-64-to-32>) + add_compile_options($<$,$>:-Wsign-conversion>) + add_compile_options($<$,$>:-Wstatic-in-inline>) + add_compile_options($<$,$>:-Wsuper-class-method-mismatch>) + add_compile_options($<$,$>:-Wswitch>) + add_compile_options($<$,$>:-Wunguarded-availability>) + add_compile_options($<$,$>:-Wunreachable-code>) + add_compile_options($<$,$>:-Wunused>) - add_compile_options(-Wno-unknown-warning-option) - add_compile_options(-Wno-trigraphs) - add_compile_options(-Wno-four-char-constants) - add_compile_options(-Wno-disabled-macro-expansion) - add_compile_options(-Wno-pedantic) - add_compile_options(-Wno-bad-function-cast) - add_compile_options(-Wno-c++-compat) - add_compile_options(-Wno-c++98-compat) - add_compile_options(-Wno-c++98-compat-pedantic) - add_compile_options(-Wno-cast-align) - add_compile_options(-Wno-cast-qual) - add_compile_options(-Wno-documentation-unknown-command) - add_compile_options(-Wno-format-nonliteral) - add_compile_options(-Wno-missing-variable-declarations) - add_compile_options(-Wno-old-style-cast) - add_compile_options(-Wno-padded) - add_compile_options(-Wno-reserved-id-macro) - add_compile_options(-Wno-shift-sign-overflow) - add_compile_options(-Wno-undef) - add_compile_options(-Wno-unreachable-code-aggressive) - add_compile_options(-Wno-unused-macros) - add_compile_options(-Wno-used-but-marked-unused) - add_compile_options(-Wno-vla) + add_compile_options($<$,$>:-Wno-unknown-warning-option>) + add_compile_options($<$,$>:-Wno-trigraphs>) + add_compile_options($<$,$>:-Wno-four-char-constants>) + add_compile_options($<$,$>:-Wno-disabled-macro-expansion>) + add_compile_options($<$,$>:-Wno-pedantic>) + add_compile_options($<$,$>:-Wno-bad-function-cast>) + add_compile_options($<$,$>:-Wno-c++-compat>) + add_compile_options($<$,$>:-Wno-c++98-compat>) + add_compile_options($<$,$>:-Wno-c++98-compat-pedantic>) + add_compile_options($<$,$>:-Wno-cast-align>) + add_compile_options($<$,$>:-Wno-cast-qual>) + add_compile_options($<$,$>:-Wno-documentation-unknown-command>) + add_compile_options($<$,$>:-Wno-format-nonliteral>) + add_compile_options($<$,$>:-Wno-missing-variable-declarations>) + add_compile_options($<$,$>:-Wno-old-style-cast>) + add_compile_options($<$,$>:-Wno-padded>) + add_compile_options($<$,$>:-Wno-reserved-id-macro>) + add_compile_options($<$,$>:-Wno-shift-sign-overflow>) + add_compile_options($<$,$>:-Wno-undef>) + add_compile_options($<$,$>:-Wno-unreachable-code-aggressive>) + add_compile_options($<$,$>:-Wno-unused-macros>) + add_compile_options($<$,$>:-Wno-used-but-marked-unused>) + add_compile_options($<$,$>:-Wno-vla>) - if(CMAKE_SYSTEM_NAME STREQUAL Android) - add_compile_options(-Wno-incompatible-function-pointer-types) - add_compile_options(-Wno-implicit-function-declaration) - add_compile_options(-Wno-conversion) - add_compile_options(-Wno-int-conversion) - add_compile_options(-Wno-shorten-64-to-32) - endif() - add_compile_options(-Wno-error=assign-enum) - endmacro() + if(CMAKE_SYSTEM_NAME STREQUAL Android) + add_compile_options($<$,$>:-Wno-incompatible-function-pointer-types>) + add_compile_options($<$,$>:-Wno-implicit-function-declaration>) + add_compile_options($<$,$>:-Wno-conversion>) + add_compile_options($<$,$>:-Wno-int-conversion>) + add_compile_options($<$,$>:-Wno-shorten-64-to-32>) + endif() + add_compile_options($<$,$>:-Wno-error=assign-enum>) endif() diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index da7a201e3..4310b54c9 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -1,221 +1,4 @@ -include(CMakeParseArguments) - -function(add_swift_target target) - set(options LIBRARY;SHARED;STATIC) - set(single_value_options MODULE_NAME;MODULE_LINK_NAME;MODULE_PATH;MODULE_CACHE_PATH;OUTPUT;TARGET) - set(multiple_value_options CFLAGS;DEPENDS;LINK_FLAGS;RESOURCES;SOURCES;SWIFT_FLAGS) - - cmake_parse_arguments(AST "${options}" "${single_value_options}" "${multiple_value_options}" ${ARGN}) - - set(compile_flags ${CMAKE_SWIFT_FLAGS}) - set(link_flags ${CMAKE_SWIFT_LINK_FLAGS}) - - if(AST_TARGET) - list(APPEND compile_flags -target;${AST_TARGET}) - list(APPEND link_flags -target;${AST_TARGET}) - endif() - if(AST_MODULE_NAME) - list(APPEND compile_flags -module-name;${AST_MODULE_NAME}) - else() - list(APPEND compile_flags -module-name;${target}) - endif() - if(AST_MODULE_LINK_NAME) - list(APPEND compile_flags -module-link-name;${AST_MODULE_LINK_NAME}) - endif() - if(AST_MODULE_CACHE_PATH) - list(APPEND compile_flags -module-cache-path;${AST_MODULE_CACHE_PATH}) - endif() - if(CMAKE_BUILD_TYPE MATCHES Debug OR CMAKE_BUILD_TYPE MATCHES RelWithDebInfo) - list(APPEND compile_flags -g) - endif() - if(AST_SWIFT_FLAGS) - foreach(flag ${AST_SWIFT_FLAGS}) - list(APPEND compile_flags ${flag}) - endforeach() - endif() - if(AST_CFLAGS) - foreach(flag ${AST_CFLAGS}) - list(APPEND compile_flags -Xcc;${flag}) - endforeach() - endif() - if(AST_LINK_FLAGS) - foreach(flag ${AST_LINK_FLAGS}) - list(APPEND link_flags ${flag}) - endforeach() - endif() - if(AST_LIBRARY) - if(AST_STATIC AND AST_SHARED) - message(SEND_ERROR "add_swift_target asked to create library as STATIC and SHARED") - elseif(AST_STATIC OR NOT BUILD_SHARED_LIBS) - set(library_kind STATIC) - elseif(AST_SHARED OR BUILD_SHARED_LIBS) - set(library_kind SHARED) - endif() - else() - if(AST_STATIC OR AST_SHARED) - message(SEND_ERROR "add_swift_target asked to create executable as STATIC or SHARED") - endif() - endif() - if(NOT AST_OUTPUT) - if(AST_LIBRARY) - if(AST_SHARED OR BUILD_SHARED_LIBS) - set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_SHARED_LIBRARY_PREFIX}${target}${CMAKE_SHARED_LIBRARY_SUFFIX}) - else() - set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_STATIC_LIBRARY_PREFIX}${target}${CMAKE_STATIC_LIBRARY_SUFFIX}) - endif() - else() - set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${target}${CMAKE_EXECUTABLE_SUFFIX}) - endif() - endif() - if(CMAKE_SYSTEM_NAME STREQUAL Windows) - if(AST_SHARED OR BUILD_SHARED_LIBS) - set(IMPORT_LIBRARY ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_IMPORT_LIBRARY_PREFIX}${target}${CMAKE_IMPORT_LIBRARY_SUFFIX}) - endif() - endif() - - set(sources) - foreach(source ${AST_SOURCES}) - get_filename_component(location ${source} PATH) - if(IS_ABSOLUTE ${location}) - list(APPEND sources ${source}) - else() - list(APPEND sources ${CMAKE_CURRENT_SOURCE_DIR}/${source}) - endif() - endforeach() - - set(objs) - set(mods) - set(docs) - set(i 0) - foreach(source ${sources}) - get_filename_component(name ${source} NAME) - - set(obj ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${name}${CMAKE_C_OUTPUT_EXTENSION}) - set(mod ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${name}.swiftmodule) - set(doc ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${name}.swiftdoc) - - set(all_sources ${sources}) - list(INSERT all_sources ${i} -primary-file) - - add_custom_command(OUTPUT - ${obj} - ${mod} - ${doc} - DEPENDS - ${source} - ${AST_DEPENDS} - COMMAND - ${CMAKE_SWIFT_COMPILER} -frontend ${compile_flags} -emit-module-path ${mod} -emit-module-doc-path ${doc} -o ${obj} -c ${all_sources}) - - list(APPEND objs ${obj}) - list(APPEND mods ${mod}) - list(APPEND docs ${doc}) - - math(EXPR i "${i}+1") - endforeach() - - if(AST_LIBRARY) - get_filename_component(module_directory ${AST_MODULE_PATH} DIRECTORY) - - set(module ${AST_MODULE_PATH}) - set(documentation ${module_directory}/${AST_MODULE_NAME}.swiftdoc) - - add_custom_command(OUTPUT - ${module} - ${documentation} - DEPENDS - ${mods} - ${docs} - ${AST_DEPENDS} - COMMAND - ${CMAKE_SWIFT_COMPILER} -frontend ${compile_flags} -sil-merge-partial-modules -emit-module ${mods} -o ${module} -emit-module-doc-path ${documentation}) - endif() - - if(AST_LIBRARY) - if(CMAKE_SYSTEM_NAME STREQUAL Windows OR CMAKE_SYSTEM_NAME STREQUAL Darwin) - set(emit_library -emit-library) - else() - set(emit_library -emit-library -Xlinker -soname -Xlinker ${CMAKE_SHARED_LIBRARY_PREFIX}${target}${CMAKE_SHARED_LIBRARY_SUFFIX}) - endif() - endif() - if(NOT AST_LIBRARY OR library_kind STREQUAL SHARED) - add_custom_command(OUTPUT - ${AST_OUTPUT} - DEPENDS - ${objs} - ${AST_DEPENDS} - COMMAND - ${CMAKE_SWIFT_COMPILER} ${emit_library} ${link_flags} -o ${AST_OUTPUT} ${objs}) - add_custom_target(${target} - ALL - DEPENDS - ${AST_OUTPUT} - ${module} - ${documentation}) - else() - add_library(${target}-static STATIC ${objs}) - add_dependencies(${target}-static ${AST_DEPENDS}) - get_filename_component(ast_output_bn ${AST_OUTPUT} NAME) - if(NOT CMAKE_STATIC_LIBRARY_PREFIX STREQUAL "") - string(REGEX REPLACE "^${CMAKE_STATIC_LIBRARY_PREFIX}" "" ast_output_bn ${ast_output_bn}) - endif() - if(NOT CMAKE_STATIC_LIBRARY_SUFFIX STREQUAL "") - string(REGEX REPLACE "${CMAKE_STATIC_LIBRARY_SUFFIX}$" "" ast_output_bn ${ast_output_bn}) - endif() - get_filename_component(ast_output_dn ${AST_OUTPUT} DIRECTORY) - set_target_properties(${target}-static - PROPERTIES - LINKER_LANGUAGE C - ARCHIVE_OUTPUT_DIRECTORY ${ast_output_dn} - OUTPUT_DIRECTORY ${ast_output_dn} - OUTPUT_NAME ${ast_output_bn}) - add_custom_target(${target} - ALL - DEPENDS - ${target}-static - ${module} - ${documentation}) - endif() - - if(AST_RESOURCES) - add_custom_command(TARGET - ${target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/${target} - COMMAND - ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}/${target} - COMMAND - ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/${target}/Resources - COMMAND - ${CMAKE_COMMAND} -E copy ${AST_RESOURCES} ${CMAKE_CURRENT_BINARY_DIR}/${target}/Resources) - else() - add_custom_command(TARGET - ${target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}) - if(CMAKE_SYSTEM_NAME STREQUAL Windows) - if(AST_SHARED OR BUILD_SHARED_LIBS) - add_custom_command(TARGET - ${target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} -E copy ${IMPORT_LIBRARY} ${CMAKE_CURRENT_BINARY_DIR}) - endif() - endif() - endif() -endfunction() - -function(add_swift_library library) - add_swift_target(${library} LIBRARY ${ARGN}) -endfunction() - -function(add_swift_executable executable) - add_swift_target(${executable} ${ARGN}) -endfunction() - # Returns the current achitecture name in a variable # # Usage: diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 58419d430..120798711 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,297 +1,165 @@ -include(CheckCCompilerFlag) -include(SwiftSupport) -include(DTrace) - -add_subdirectory(BlocksRuntime) +if(NOT CMAKE_SYSTEM_NAME STREQUAL Darwin) + add_subdirectory(BlocksRuntime) +endif() add_library(dispatch - allocator.c - apply.c - benchmark.c - data.c - init.c - introspection.c - io.c - mach.c - object.c - once.c - queue.c - semaphore.c - source.c - time.c - transform.c - voucher.c - shims.c - protocol.defs - provider.d - allocator_internal.h - data_internal.h - inline_internal.h - internal.h - introspection_internal.h - io_internal.h - mach_internal.h - object_internal.h - queue_internal.h - semaphore_internal.h - shims.h - source_internal.h - trace.h - voucher_internal.h - event/event.c - event/event_config.h - event/event_epoll.c - event/event_internal.h - event/event_kevent.c - event/event_windows.c - firehose/firehose_internal.h - shims/android_stubs.h - shims/atomic.h - shims/atomic_sfb.h - shims/getprogname.h - shims/hw_config.h - shims/lock.c - shims/lock.h - shims/perfmon.h - shims/time.h - shims/tsd.h - shims/yield.c - shims/yield.h) - -set_target_properties(dispatch - PROPERTIES - POSITION_INDEPENDENT_CODE YES) + allocator.c + apply.c + benchmark.c + data.c + init.c + introspection.c + io.c + mach.c + object.c + once.c + queue.c + semaphore.c + source.c + time.c + transform.c + voucher.c + shims.c + protocol.defs + provider.d + allocator_internal.h + data_internal.h + inline_internal.h + internal.h + introspection_internal.h + io_internal.h + mach_internal.h + object_internal.h + queue_internal.h + semaphore_internal.h + shims.h + source_internal.h + trace.h + voucher_internal.h + event/event.c + event/event_config.h + event/event_epoll.c + event/event_internal.h + event/event_kevent.c + event/event_windows.c + firehose/firehose_internal.h + shims/android_stubs.h + shims/atomic.h + shims/atomic_sfb.h + shims/getprogname.h + shims/hw_config.h + shims/lock.c + shims/lock.h + shims/perfmon.h + shims/time.h + shims/tsd.h + shims/yield.c + shims/yield.h) -if(WIN32) - target_sources(dispatch - PRIVATE - shims/generic_sys_queue.h - shims/generic_win_stubs.c - shims/generic_win_stubs.h - shims/getprogname.c) +if(CMAKE_SYSTEM_NAME STREQUAL Windows) + target_sources(dispatch PRIVATE + shims/generic_sys_queue.h + shims/generic_win_stubs.c + shims/generic_win_stubs.h + shims/getprogname.c) endif() + if(DISPATCH_USE_INTERNAL_WORKQUEUE) - target_sources(dispatch - PRIVATE - event/workqueue.c - event/workqueue_internal.h) + target_sources(dispatch PRIVATE + event/workqueue.c + event/workqueue_internal.h) endif() -target_sources(dispatch - PRIVATE - block.cpp) + +target_sources(dispatch PRIVATE + block.cpp) + +if(ENABLE_DTRACE) + dtrace_usdt_probe(${CMAKE_CURRENT_SOURCE_DIR}/provider.d OUTPUT_SOURCES + dispatch_dtrace_provider_headers) + target_sources(dispatch PRIVATE + ${dispatch_dtrace_provider_headers}) +endif() + if(HAVE_OBJC) # TODO(compnerd) split DispatchStubs.cc into a separate component for the ObjC # registration and a separate component for the swift compiler's emission of a # call to the ObjC autorelease elision entry point. - target_sources(dispatch - PRIVATE - data.m - object.m - swift/DispatchStubs.cc) + target_sources(dispatch PRIVATE + data.m + object.m + swift/DispatchStubs.cc) endif() -if(ENABLE_SWIFT) - set(swift_optimization_flags) - if(NOT CMAKE_BUILD_TYPE MATCHES Debug) - set(swift_optimization_flags -O) - endif() - # NOTE(compnerd) Today regardless of whether or not ObjC interop is enabled, - # swift will use an autoreleased return value convention for certain CF - # functions (including some that are used/related to dispatch). This means - # that the swift compiler in callers to such functions will call the function, - # and then pass the result of the function to - # objc_retainAutoreleasedReturnValue. In a context where we have ObjC interop - # disabled, we do not have access to the objc runtime so an implementation of - # objc_retainAutoreleasedReturnValue is not available. To work around this, we - # provide a shim for objc_retainAutoreleasedReturnValue in DispatchStubs.cc - # that just calls retain on the object. Once we fix the swift compiler to - # switch to a different model for handling these arguments with objc-interop - # disabled these shims can be eliminated. - add_library(DispatchStubs - STATIC - swift/DispatchStubs.cc) - target_include_directories(DispatchStubs - PRIVATE - ${PROJECT_SOURCE_DIR}) - set_target_properties(DispatchStubs - PROPERTIES - POSITION_INDEPENDENT_CODE YES) - add_swift_library(swiftDispatch - CFLAGS - -fblocks - -fmodule-map-file=${PROJECT_SOURCE_DIR}/dispatch/module.modulemap - DEPENDS - module-maps - DispatchStubs - LINK_FLAGS - -L $ - -lDispatchStubs - -L $ - -lBlocksRuntime - -L $ - -ldispatch - $<$,$>:-lmsvcrtd> - $<$,$>>:-lmsvcrt> - MODULE_NAME - Dispatch - MODULE_LINK_NAME - swiftDispatch - MODULE_PATH - ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule - SOURCES - swift/Block.swift - swift/Data.swift - swift/Dispatch.swift - swift/IO.swift - swift/Private.swift - swift/Queue.swift - swift/Source.swift - swift/Time.swift - swift/Wrapper.swift - SWIFT_FLAGS - -I ${PROJECT_SOURCE_DIR} - ${swift_optimization_flags} - $<$:-Xcc> - $<$:-D_MT> - # TODO(compnerd) handle /MT builds - $<$:-Xcc> - $<$:-D_DLL> - TARGET - ${CMAKE_SWIFT_COMPILER_TARGET}) -endif() -if(ENABLE_DTRACE) - dtrace_usdt_probe(${CMAKE_CURRENT_SOURCE_DIR}/provider.d - OUTPUT_SOURCES - dispatch_dtrace_provider_headers) - target_sources(dispatch - PRIVATE - ${dispatch_dtrace_provider_headers}) +set_target_properties(dispatch PROPERTIES + POSITION_INDEPENDENT_CODE YES) + +target_include_directories(dispatch PRIVATE + ${PROJECT_BINARY_DIR} + ${PROJECT_SOURCE_DIR} + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} + ${PROJECT_SOURCE_DIR}/private) +target_include_directories(dispatch SYSTEM BEFORE PRIVATE + "${BlocksRuntime_INCLUDE_DIR}") + +if(CMAKE_SYSTEM_NAME STREQUAL Windows) + target_compile_definitions(dispatch PRIVATE + _CRT_NONSTDC_NO_WARNINGS + _CRT_SECURE_NO_WARNINGS) +elseif(CMAKE_SYSTEM_NAME STREQUAL Android) + target_compile_definitions(dispatch PRIVATE + -U_GNU_SOURCE) endif() -target_include_directories(dispatch - PRIVATE - ${PROJECT_BINARY_DIR} - ${PROJECT_SOURCE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_BINARY_DIR} - ${PROJECT_SOURCE_DIR}/private) -target_include_directories(dispatch - SYSTEM BEFORE PRIVATE - "${BlocksRuntime_INCLUDE_DIR}") -if(WIN32) - target_compile_definitions(dispatch - PRIVATE - _CRT_NONSTDC_NO_WARNINGS) +if(DISPATCH_ENABLE_ASSERTS) + target_compile_definitions(dispatch PRIVATE + -DDISPATCH_DEBUG=1) endif() + if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") target_compile_options(dispatch PRIVATE /EHs-c-) + target_compile_options(dispatch PRIVATE /W3) else() target_compile_options(dispatch PRIVATE -fno-exceptions) + target_compile_options(dispatch PRIVATE -Wall) endif() -if(DISPATCH_ENABLE_ASSERTS) - target_compile_definitions(dispatch - PRIVATE - -DDISPATCH_DEBUG=1) -endif() -if(CMAKE_SYSTEM_NAME STREQUAL Windows) - target_compile_definitions(dispatch - PRIVATE - -D_CRT_SECURE_NO_WARNINGS) -elseif(CMAKE_SYSTEM_NAME STREQUAL Android) - target_compile_options(dispatch - PRIVATE - -U_GNU_SOURCE) -endif() -if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") - target_compile_options(dispatch - PRIVATE - /W3) -else() - target_compile_options(dispatch - PRIVATE - -Wall) -endif() + # FIXME(compnerd) add check for -fblocks? -if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") - target_compile_options(dispatch - PRIVATE - -Xclang -fblocks) -else() - check_c_compiler_flag("-momit-leaf-frame-pointer -Werror -Wall -O3" C_SUPPORTS_OMIT_LEAF_FRAME_POINTER) - target_compile_options(dispatch PRIVATE -fblocks) - if (C_SUPPORTS_OMIT_LEAF_FRAME_POINTER) - target_compile_options(dispatch PRIVATE -momit-leaf-frame-pointer) - endif() +target_compile_options(dispatch PRIVATE -fblocks) + +check_c_compiler_flag("-momit-leaf-frame-pointer -Werror -Wall -O3" C_SUPPORTS_OMIT_LEAF_FRAME_POINTER) +if (C_SUPPORTS_OMIT_LEAF_FRAME_POINTER) + target_compile_options(dispatch PRIVATE -momit-leaf-frame-pointer) endif() + if(LibRT_FOUND) target_link_libraries(dispatch PRIVATE RT::rt) endif() -target_link_libraries(dispatch - PRIVATE - Threads::Threads - BlocksRuntime::BlocksRuntime) +target_link_libraries(dispatch PRIVATE + Threads::Threads + BlocksRuntime::BlocksRuntime) if(CMAKE_SYSTEM_NAME STREQUAL Windows) - target_link_libraries(dispatch - PRIVATE - ShLwApi - WS2_32 - WinMM - synchronization) + target_link_libraries(dispatch PRIVATE + ShLwApi + WS2_32 + WinMM + synchronization) endif() + if(CMAKE_SYSTEM_NAME STREQUAL Darwin) - set_property(TARGET dispatch - APPEND_STRING - PROPERTY LINK_FLAGS - "-Xlinker -compatibility_version -Xlinker 1" - "-Xlinker -current_version -Xlinker ${VERSION}" - "-Xlinker -dead_strip" - "-Xlinker -alias_list -Xlinker ${PROJECT_SOURCE_DIR}/xcodeconfig/libdispatch.aliases") + set_property(TARGET dispatch APPEND_STRING PROPERTY LINK_FLAGS + "-Xlinker -compatibility_version -Xlinker 1" + "-Xlinker -current_version -Xlinker ${VERSION}" + "-Xlinker -dead_strip" + "-Xlinker -alias_list -Xlinker ${PROJECT_SOURCE_DIR}/xcodeconfig/libdispatch.aliases") endif() -install(TARGETS - dispatch - ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} - LIBRARY DESTINATION ${INSTALL_TARGET_DIR} - RUNTIME DESTINATION bin) - if(ENABLE_SWIFT) - install(FILES - ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule - ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftdoc - DESTINATION - ${INSTALL_TARGET_DIR}/${swift_arch}) - - if(BUILD_SHARED_LIBS) - set(library_kind SHARED) - else() - set(library_kind STATIC) - endif() - - set(swiftDispatch_OUTPUT_FILE - ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_${library_kind}_LIBRARY_PREFIX}swiftDispatch${CMAKE_${library_kind}_LIBRARY_SUFFIX}) - - if(CMAKE_SYSTEM_NAME STREQUAL Windows AND BUILD_SHARED_LIBS) - install(FILES - ${swiftDispatch_OUTPUT_FILE} - DESTINATION - bin) - install(FILES - ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_IMPORT_LIBRARY_PREFIX}swiftDispatch${CMAKE_IMPORT_LIBRARY_SUFFIX} - DESTINATION - ${INSTALL_TARGET_DIR}) - else() - install(FILES - ${swiftDispatch_OUTPUT_FILE} - DESTINATION - ${INSTALL_TARGET_DIR}) - endif() - - if(NOT BUILD_SHARED_LIBS) - install(FILES - $ - DESTINATION - ${INSTALL_TARGET_DIR}) - endif() + add_subdirectory(swift) endif() +install(TARGETS dispatch + ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} + LIBRARY DESTINATION ${INSTALL_TARGET_DIR} + RUNTIME DESTINATION bin) diff --git a/src/swift/CMakeLists.txt b/src/swift/CMakeLists.txt new file mode 100644 index 000000000..a10d969a3 --- /dev/null +++ b/src/swift/CMakeLists.txt @@ -0,0 +1,56 @@ + +# NOTE(compnerd) Today regardless of whether or not ObjC interop is enabled, +# swift will use an autoreleased return value convention for certain CF +# functions (including some that are used/related to dispatch). This means that +# the swift compiler in callers to such functions will call the function, and +# then pass the result of the function to objc_retainAutoreleasedReturnValue. In +# a context where we have ObjC interop disabled, we do not have access to the +# objc runtime so an implementation of objc_retainAutoreleasedReturnValue is not +# available. To work around this, we provide a shim for +# objc_retainAutoreleasedReturnValue in DispatchStubs.cc that just calls retain +# on the object. Once we fix the swift compiler to switch to a different model +# for handling these arguments with objc-interop disabled these shims can be +# eliminated. +add_library(DispatchStubs STATIC + DispatchStubs.cc) +target_include_directories(DispatchStubs PRIVATE + ${PROJECT_SOURCE_DIR}) +set_target_properties(DispatchStubs PROPERTIES + POSITION_INDEPENDENT_CODE YES) + +add_library(swiftDispatch + Block.swift + Data.swift + Dispatch.swift + IO.swift + Private.swift + Queue.swift + Source.swift + Time.swift + Wrapper.swift) +target_compile_options(swiftDispatch PRIVATE + "SHELL:-Xcc -fblocks" + "SHELL:-Xcc -fmodule-map-file=${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + "SHELL:-Xcc -I${PROJECT_SOURCE_DIR}") +set_target_properties(swiftDispatch PROPERTIES + Swift_MODULE_NAME Dispatch + Swift_MODULE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/swift) +target_link_libraries(swiftDispatch PRIVATE + DispatchStubs + BlocksRuntime::BlocksRuntime + dispatch) +add_dependencies(swiftDispatch module-maps) + +get_swift_host_arch(swift_arch) +install(FILES + ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule + ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftdoc + DESTINATION ${INSTALL_TARGET_DIR}/${swift_arch}) +install(TARGETS swiftDispatch + ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} + LIBRARY DESTINATION ${INSTALL_TARGET_DIR} + RUNTIME DESTINATION bin) +if(NOT BUILD_SHARED_LIBS) + install(TARGETS DispatchStubs + DESTINATION ${INSTALL_TARGET_DIR}) +endif() From 44dd24b5a56223438c029d452fbce4d69542fab9 Mon Sep 17 00:00:00 2001 From: Mishal Shah Date: Thu, 26 Sep 2019 15:42:18 -0700 Subject: [PATCH 221/249] Merge pull request #527 from apple/revert-526-revert-519-swift-support Revert "Revert "build: port to new Swift support"" Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index dc4e1eae1..ff1d6cded 100644 --- a/PATCHES +++ b/PATCHES @@ -530,3 +530,4 @@ github commits starting with 29bdc2f from [c66cb25] APPLIED rdar://81276248 [289e552] APPLIED rdar://81276248 [afd6b6d] APPLIED rdar://81276248 +[4c91d20] APPLIED rdar://81276248 From 9395c68fc84916f78695945df1fc33131d8e0981 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 27 Sep 2019 09:31:38 -0700 Subject: [PATCH 222/249] build: repair macro definition handling Signed-off-by: Rokhini Prabhu --- src/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 120798711..a81e7dcfe 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -109,12 +109,12 @@ if(CMAKE_SYSTEM_NAME STREQUAL Windows) _CRT_NONSTDC_NO_WARNINGS _CRT_SECURE_NO_WARNINGS) elseif(CMAKE_SYSTEM_NAME STREQUAL Android) - target_compile_definitions(dispatch PRIVATE + target_compile_options(dispatch PRIVATE -U_GNU_SOURCE) endif() if(DISPATCH_ENABLE_ASSERTS) target_compile_definitions(dispatch PRIVATE - -DDISPATCH_DEBUG=1) + DISPATCH_DEBUG=1) endif() if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") From ba84fd801727138f9b369dbb005cdb21fcfd8c63 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Fri, 27 Sep 2019 11:06:52 -0700 Subject: [PATCH 223/249] Merge pull request #529 from compnerd/definition-handling build: repair macro definition handling Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index ff1d6cded..f18d38095 100644 --- a/PATCHES +++ b/PATCHES @@ -531,3 +531,4 @@ github commits starting with 29bdc2f from [289e552] APPLIED rdar://81276248 [afd6b6d] APPLIED rdar://81276248 [4c91d20] APPLIED rdar://81276248 +[2accb0b] APPLIED rdar://81276248 From f4505244f2a0bf88a48c7bd87401b11d9db73691 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 26 Sep 2019 08:34:10 -0700 Subject: [PATCH 224/249] build: add exports targets Now that we can build the Swift content as a library, we can do export targets. This enables us to have CMake determine more information about the target, track dependencies across projects, and manage the flags across the trees without the special variables to specify the source and build trees. Signed-off-by: Rokhini Prabhu --- CMakeLists.txt | 2 ++ cmake/modules/CMakeLists.txt | 7 +++++++ cmake/modules/dispatchConfig.cmake.in | 7 +++++++ src/BlocksRuntime/CMakeLists.txt | 2 ++ src/CMakeLists.txt | 2 ++ src/swift/CMakeLists.txt | 4 ++++ 6 files changed, 24 insertions(+) create mode 100644 cmake/modules/CMakeLists.txt create mode 100644 cmake/modules/dispatchConfig.cmake.in diff --git a/CMakeLists.txt b/CMakeLists.txt index 10a0e4623..75c419c13 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -276,3 +276,5 @@ add_subdirectory(src) if(BUILD_TESTING) add_subdirectory(tests) endif() + +add_subdirectory(cmake/modules) diff --git a/cmake/modules/CMakeLists.txt b/cmake/modules/CMakeLists.txt new file mode 100644 index 000000000..10cc0e100 --- /dev/null +++ b/cmake/modules/CMakeLists.txt @@ -0,0 +1,7 @@ + +set(DISPATCH_EXPORTS_FILE ${CMAKE_CURRENT_BINARY_DIR}/dispatchExports.cmake) +configure_file(dispatchConfig.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/dispatchConfig.cmake) + +get_property(DISPATCH_EXPORTS GLOBAL PROPERTY DISPATCH_EXPORTS) +export(TARGETS ${DISPATCH_EXPORTS} FILE ${DISPATCH_EXPORTS_FILE}) diff --git a/cmake/modules/dispatchConfig.cmake.in b/cmake/modules/dispatchConfig.cmake.in new file mode 100644 index 000000000..81228f271 --- /dev/null +++ b/cmake/modules/dispatchConfig.cmake.in @@ -0,0 +1,7 @@ + +set(DISPATCH_HAS_SWIFT_SDK_OVERLAY @ENABLE_SWIFT@) + +if(NOT TARGET dispatch) + include(@DISPATCH_EXPORTS_FILE@) +endif() + diff --git a/src/BlocksRuntime/CMakeLists.txt b/src/BlocksRuntime/CMakeLists.txt index a5388d6eb..4c8fa9929 100644 --- a/src/BlocksRuntime/CMakeLists.txt +++ b/src/BlocksRuntime/CMakeLists.txt @@ -28,7 +28,9 @@ if(INSTALL_PRIVATE_HEADERS) install(FILES Block_private.h DESTINATION ${INSTALL_BLOCK_HEADERS_DIR}) endif() +set_property(GLOBAL APPEND PROPERTY DISPATCH_EXPORTS BlocksRuntime) install(TARGETS BlocksRuntime + EXPORT dispatchExports ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} LIBRARY DESTINATION ${INSTALL_TARGET_DIR} RUNTIME DESTINATION bin) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a81e7dcfe..c401758c2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -159,7 +159,9 @@ if(ENABLE_SWIFT) add_subdirectory(swift) endif() +set_property(GLOBAL APPEND PROPERTY DISPATCH_EXPORTS dispatch) install(TARGETS dispatch + EXPORT dispatchExports ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} LIBRARY DESTINATION ${INSTALL_TARGET_DIR} RUNTIME DESTINATION bin) diff --git a/src/swift/CMakeLists.txt b/src/swift/CMakeLists.txt index a10d969a3..18a297f90 100644 --- a/src/swift/CMakeLists.txt +++ b/src/swift/CMakeLists.txt @@ -46,11 +46,15 @@ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftdoc DESTINATION ${INSTALL_TARGET_DIR}/${swift_arch}) +set_property(GLOBAL APPEND PROPERTY DISPATCH_EXPORTS swiftDispatch) install(TARGETS swiftDispatch + EXPORT dispatchExports ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} LIBRARY DESTINATION ${INSTALL_TARGET_DIR} RUNTIME DESTINATION bin) if(NOT BUILD_SHARED_LIBS) + set_property(GLOBAL APPEND PROPERTY DISPATCH_EXPORTS DispatchStubs) install(TARGETS DispatchStubs + EXPORT dispatchExports DESTINATION ${INSTALL_TARGET_DIR}) endif() From 39db556ecd44ff47341131083f717974abdee2aa Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 27 Sep 2019 15:40:39 -0700 Subject: [PATCH 225/249] build: adjust linking visibility for exported targets Make BlocksRuntime public rather than private since the public headers reference `Blocks.h` making the users dependent on BlocksRuntime, requiring the explicit link. This also enables the use of the export targets to automatically setup the include paths for users. Signed-off-by: Rokhini Prabhu --- src/BlocksRuntime/CMakeLists.txt | 8 +++++--- src/CMakeLists.txt | 10 +++++----- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/BlocksRuntime/CMakeLists.txt b/src/BlocksRuntime/CMakeLists.txt index 4c8fa9929..1bed20210 100644 --- a/src/BlocksRuntime/CMakeLists.txt +++ b/src/BlocksRuntime/CMakeLists.txt @@ -12,14 +12,16 @@ if(CMAKE_SYSTEM_NAME STREQUAL Windows) endif() endif() -set_target_properties(BlocksRuntime PROPERTIES - POSITION_INDEPENDENT_CODE TRUE - INTERFACE_INCLUDE_DIRECTORIES ${CMAKE_CURRENT_SOURCE_DIR}) +target_include_directories(BlocksRuntime PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}) if(HAVE_OBJC AND CMAKE_DL_LIBS) target_link_libraries(BlocksRuntime PUBLIC ${CMAKE_DL_LIBS}) endif() +set_target_properties(BlocksRuntime PROPERTIES + POSITION_INDEPENDENT_CODE TRUE) + add_library(BlocksRuntime::BlocksRuntime ALIAS BlocksRuntime) install(FILES Block.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c401758c2..c88b430de 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -95,14 +95,13 @@ endif() set_target_properties(dispatch PROPERTIES POSITION_INDEPENDENT_CODE YES) -target_include_directories(dispatch PRIVATE +target_include_directories(dispatch PUBLIC ${PROJECT_BINARY_DIR} ${PROJECT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_CURRENT_BINARY_DIR}) +target_include_directories(dispatch PRIVATE ${PROJECT_SOURCE_DIR}/private) -target_include_directories(dispatch SYSTEM BEFORE PRIVATE - "${BlocksRuntime_INCLUDE_DIR}") if(CMAKE_SYSTEM_NAME STREQUAL Windows) target_compile_definitions(dispatch PRIVATE @@ -137,7 +136,8 @@ if(LibRT_FOUND) target_link_libraries(dispatch PRIVATE RT::rt) endif() target_link_libraries(dispatch PRIVATE - Threads::Threads + Threads::Threads) +target_link_libraries(dispatch PUBLIC BlocksRuntime::BlocksRuntime) if(CMAKE_SYSTEM_NAME STREQUAL Windows) target_link_libraries(dispatch PRIVATE From d6e8ffce884382d652d54f62493c7bb502937e26 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Mon, 30 Sep 2019 13:26:18 -0700 Subject: [PATCH 226/249] Merge pull request #525 from compnerd/bring-out-your-targets build: add exports targets Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index f18d38095..14c02f1ac 100644 --- a/PATCHES +++ b/PATCHES @@ -532,3 +532,4 @@ github commits starting with 29bdc2f from [afd6b6d] APPLIED rdar://81276248 [4c91d20] APPLIED rdar://81276248 [2accb0b] APPLIED rdar://81276248 +[b0b314c] APPLIED rdar://81276248 From c60e834650e7d932c20d388099caf91541a77d73 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 30 Sep 2019 15:43:44 -0700 Subject: [PATCH 227/249] build: make some linkage flags public Some of the compiler and linker flags need to be used by dependent libraries which requires making the flags public so that users of the libraries and the export targets are setup properly. Signed-off-by: Rokhini Prabhu --- src/swift/CMakeLists.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/swift/CMakeLists.txt b/src/swift/CMakeLists.txt index 18a297f90..ba987e747 100644 --- a/src/swift/CMakeLists.txt +++ b/src/swift/CMakeLists.txt @@ -34,10 +34,12 @@ target_compile_options(swiftDispatch PRIVATE "SHELL:-Xcc -I${PROJECT_SOURCE_DIR}") set_target_properties(swiftDispatch PROPERTIES Swift_MODULE_NAME Dispatch - Swift_MODULE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/swift) + Swift_MODULE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/swift + INTERFACE_INCLUDE_DIRECTORIES ${CMAKE_CURRENT_BINARY_DIR}/swift) target_link_libraries(swiftDispatch PRIVATE DispatchStubs - BlocksRuntime::BlocksRuntime + BlocksRuntime::BlocksRuntime) +target_link_libraries(swiftDispatch PUBLIC dispatch) add_dependencies(swiftDispatch module-maps) From 819591ddf875095467ff95480951b9c090308ef9 Mon Sep 17 00:00:00 2001 From: ktopley-apple Date: Tue, 1 Oct 2019 08:57:33 -0700 Subject: [PATCH 228/249] Merge pull request #530 from compnerd/public-flags build: make some linkage flags public Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 14c02f1ac..844363d0a 100644 --- a/PATCHES +++ b/PATCHES @@ -533,3 +533,4 @@ github commits starting with 29bdc2f from [4c91d20] APPLIED rdar://81276248 [2accb0b] APPLIED rdar://81276248 [b0b314c] APPLIED rdar://81276248 +[c992dac] APPLIED rdar://81276248 From fb301e0c141ecdd859aa98c99676467009cd6491 Mon Sep 17 00:00:00 2001 From: Alex Langford Date: Tue, 18 Feb 2020 13:04:21 -0800 Subject: [PATCH 229/249] [build] Silence `-Wvoid-pointer-to-int-cast` libdispatch will fail to build with a newly introduced clang diagnostic `pointer-to-int-cast`. libdispatch converts a void pointer to a dispatch_invoke_flags_t (aka unsigned int) in a few places. Ideally we would not be doing this, but this solution at least gets libdispatch building again with a newer version of clang. Signed-off-by: Rokhini Prabhu --- cmake/modules/DispatchCompilerWarnings.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/cmake/modules/DispatchCompilerWarnings.cmake b/cmake/modules/DispatchCompilerWarnings.cmake index 6ef9d3164..35b80f3ec 100644 --- a/cmake/modules/DispatchCompilerWarnings.cmake +++ b/cmake/modules/DispatchCompilerWarnings.cmake @@ -62,6 +62,7 @@ else() add_compile_options($<$,$>:-Wno-unreachable-code-aggressive>) add_compile_options($<$,$>:-Wno-unused-macros>) add_compile_options($<$,$>:-Wno-used-but-marked-unused>) + add_compile_options($<$,$>:-Wno-void-pointer-to-int-cast>) add_compile_options($<$,$>:-Wno-vla>) if(CMAKE_SYSTEM_NAME STREQUAL Android) From 578a672f946acc5a4ba6d6af4565c84ed1648cc2 Mon Sep 17 00:00:00 2001 From: Ben Langmuir Date: Mon, 16 Mar 2020 14:10:12 -0700 Subject: [PATCH 230/249] Merge pull request #538 from xiaobai/fix-build-new-flag [build] Silence `-Wvoid-pointer-to-int-cast` Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 844363d0a..361bb4228 100644 --- a/PATCHES +++ b/PATCHES @@ -534,3 +534,4 @@ github commits starting with 29bdc2f from [2accb0b] APPLIED rdar://81276248 [b0b314c] APPLIED rdar://81276248 [c992dac] APPLIED rdar://81276248 +[80b1772] APPLIED rdar://81276248 From 250f7a2117d61049ab0d4252cc865dcea1aa1215 Mon Sep 17 00:00:00 2001 From: Butta Date: Wed, 6 May 2020 11:32:07 +0530 Subject: [PATCH 231/249] [CMake] fix runpath for ELF platforms Remove the absolute path to the host stdlib for libswiftDispatch.so and add $ORIGIN to it and libdispatch.so. Signed-off-by: Rokhini Prabhu --- src/CMakeLists.txt | 4 ++++ src/swift/CMakeLists.txt | 3 +++ 2 files changed, 7 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index c88b430de..adc989d42 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -155,6 +155,10 @@ if(CMAKE_SYSTEM_NAME STREQUAL Darwin) "-Xlinker -alias_list -Xlinker ${PROJECT_SOURCE_DIR}/xcodeconfig/libdispatch.aliases") endif() +if(NOT CMAKE_SYSTEM_NAME MATCHES "Darwin|Windows") + set_target_properties(dispatch PROPERTIES INSTALL_RPATH "$ORIGIN") +endif() + if(ENABLE_SWIFT) add_subdirectory(swift) endif() diff --git a/src/swift/CMakeLists.txt b/src/swift/CMakeLists.txt index ba987e747..53924723e 100644 --- a/src/swift/CMakeLists.txt +++ b/src/swift/CMakeLists.txt @@ -59,4 +59,7 @@ if(NOT BUILD_SHARED_LIBS) install(TARGETS DispatchStubs EXPORT dispatchExports DESTINATION ${INSTALL_TARGET_DIR}) +elseif(NOT CMAKE_SYSTEM_NAME MATCHES "Darwin|Windows") + target_link_options(swiftDispatch PRIVATE "SHELL:-no-toolchain-stdlib-rpath") + set_target_properties(swiftDispatch PROPERTIES INSTALL_RPATH "$ORIGIN") endif() From e94c8ea09f71a1f633bce07952e8ae825b4ad726 Mon Sep 17 00:00:00 2001 From: Pierre Habouzit Date: Mon, 11 May 2020 14:55:59 -0700 Subject: [PATCH 232/249] Merge pull request #541 from buttaface/rpath Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 361bb4228..2d0635a14 100644 --- a/PATCHES +++ b/PATCHES @@ -535,3 +535,4 @@ github commits starting with 29bdc2f from [b0b314c] APPLIED rdar://81276248 [c992dac] APPLIED rdar://81276248 [80b1772] APPLIED rdar://81276248 +[1986f39] APPLIED rdar://81276248 From c59fb3e5b45c37dc901ba8a3a82cfef78efbdcbb Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 26 May 2020 16:41:36 +0000 Subject: [PATCH 233/249] dispatch: use auto-reset event for dispatch queue Use an auto-reset event for the dispatch queue on Windows. On Linux, `eventfd` is used, which is auto-reset unless `EFD_SEMAPHORE` is specified. This mirrors that behaviour. The test suite continues to pass after this change. Signed-off-by: Rokhini Prabhu --- src/queue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/queue.c b/src/queue.c index 2536093e2..67a57406f 100644 --- a/src/queue.c +++ b/src/queue.c @@ -7057,7 +7057,7 @@ _dispatch_runloop_queue_handle_init(void *ctxt) handle = fd; #elif defined(_WIN32) HANDLE hEvent; - hEvent = CreateEventW(NULL, /*bManualReset=*/TRUE, + hEvent = CreateEventW(NULL, /*bManualReset=*/FALSE, /*bInitialState=*/FALSE, NULL); if (hEvent == NULL) { DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateEventW"); From 252434b451da9ea3f9dbd361e0d509424c17835b Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Wed, 27 May 2020 15:05:35 -0400 Subject: [PATCH 234/249] Merge pull request #542 from compnerd/auto-reset dispatch: use auto-reset event for dispatch queue Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 2d0635a14..d5920dad3 100644 --- a/PATCHES +++ b/PATCHES @@ -536,3 +536,4 @@ github commits starting with 29bdc2f from [c992dac] APPLIED rdar://81276248 [80b1772] APPLIED rdar://81276248 [1986f39] APPLIED rdar://81276248 +[598ce42] APPLIED rdar://81276248 From 758dfcb272b73ec477ae85cbdb35a70753cf966a Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Wed, 25 Mar 2020 08:20:00 -0700 Subject: [PATCH 235/249] IO: match the signature for `DispatchIO` with `dispatch_io_create` The `DispatchIO` constructor maps to `dispatch_io_create`. The value on Windows for the `fd` (which is actually a `HANDLE`) is being truncated since `dispatch_fd_t` is actually `Int` not `Int32` on Windows. Use the `dispatch_fd_t` to ensure that the right size is always passed along. Signed-off-by: Rokhini Prabhu --- src/swift/IO.swift | 2 +- src/swift/Private.swift | 2 +- src/swift/Wrapper.swift | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/swift/IO.swift b/src/swift/IO.swift index ad985c944..3c0e22484 100644 --- a/src/swift/IO.swift +++ b/src/swift/IO.swift @@ -67,7 +67,7 @@ extension DispatchIO { public convenience init( type: StreamType, - fileDescriptor: Int32, + fileDescriptor: dispatch_fd_t, queue: DispatchQueue, cleanupHandler: @escaping (_ error: Int32) -> Void) { diff --git a/src/swift/Private.swift b/src/swift/Private.swift index 89b1bb2f4..1683e2e51 100644 --- a/src/swift/Private.swift +++ b/src/swift/Private.swift @@ -27,7 +27,7 @@ public func dispatch_queue_create_with_target(_ label: UnsafePointer?, _ a } @available(*, unavailable, renamed:"DispatchIO.init(type:fileDescriptor:queue:cleanupHandler:)") -public func dispatch_io_create(_ type: UInt, _ fd: Int32, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO +public func dispatch_io_create(_ type: UInt, _ fd: dispatch_fd_t, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO { fatalError() } diff --git a/src/swift/Wrapper.swift b/src/swift/Wrapper.swift index 678631b03..1bf26b184 100644 --- a/src/swift/Wrapper.swift +++ b/src/swift/Wrapper.swift @@ -91,7 +91,7 @@ public class DispatchIO : DispatchObject { return unsafeBitCast(__wrapped, to: dispatch_object_t.self) } - internal init(__type: UInt, fd: Int32, queue: DispatchQueue, + internal init(__type: UInt, fd: dispatch_fd_t, queue: DispatchQueue, handler: @escaping (_ error: Int32) -> Void) { __wrapped = dispatch_io_create(dispatch_io_type_t(__type), dispatch_fd_t(fd), queue.__wrapped, handler) } From 968304903b117241ef92ef5c1e0ea94928fd17e1 Mon Sep 17 00:00:00 2001 From: Pierre Habouzit Date: Mon, 8 Jun 2020 10:57:36 -0700 Subject: [PATCH 236/249] Merge pull request #539 from compnerd/IO IO: match the signature for `DispatchIO` with `dispatch_io_create` Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index d5920dad3..ccac8b5bb 100644 --- a/PATCHES +++ b/PATCHES @@ -537,3 +537,4 @@ github commits starting with 29bdc2f from [80b1772] APPLIED rdar://81276248 [1986f39] APPLIED rdar://81276248 [598ce42] APPLIED rdar://81276248 +[feb4421] APPLIED rdar://81276248 From 88e7baaab7f54e19d8a808a122bd49b330364b2f Mon Sep 17 00:00:00 2001 From: Dario Rexin Date: Fri, 31 Jul 2020 10:10:48 -0700 Subject: [PATCH 237/249] Add modulemap for static compilation When compiling statically, we have to link against DispatchStubs in addition to the other dependencies, so we are defining a separate modulemap for that. Signed-off-by: Rokhini Prabhu --- dispatch/CMakeLists.txt | 12 +++++++++++- dispatch/generic_static/module.modulemap | 19 +++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 dispatch/generic_static/module.modulemap diff --git a/dispatch/CMakeLists.txt b/dispatch/CMakeLists.txt index 8b8be8cfb..7f68ed381 100644 --- a/dispatch/CMakeLists.txt +++ b/dispatch/CMakeLists.txt @@ -16,7 +16,17 @@ install(FILES DESTINATION "${INSTALL_DISPATCH_HEADERS_DIR}") if(ENABLE_SWIFT) - get_filename_component(MODULE_MAP module.modulemap REALPATH) + set(base_dir "${CMAKE_CURRENT_SOURCE_DIR}") + if(NOT BUILD_SHARED_LIBS) + set(base_dir "${CMAKE_CURRENT_SOURCE_DIR}/generic_static") + endif() + + get_filename_component( + MODULE_MAP + module.modulemap + REALPATH + BASE_DIR "${base_dir}") + install(FILES ${MODULE_MAP} DESTINATION diff --git a/dispatch/generic_static/module.modulemap b/dispatch/generic_static/module.modulemap new file mode 100644 index 000000000..d5d64d2d1 --- /dev/null +++ b/dispatch/generic_static/module.modulemap @@ -0,0 +1,19 @@ +module Dispatch { + requires blocks + export * + link "dispatch" + link "BlocksRuntime" + link "DispatchStubs" +} + +module DispatchIntrospection [system] [extern_c] { + header "introspection.h" + export * +} + +module CDispatch [system] [extern_c] { + umbrella header "dispatch.h" + export * + requires blocks + link "dispatch" +} From 753ca52ddb25f5ff29c05e097fc230220d8dfeaf Mon Sep 17 00:00:00 2001 From: Dario Rexin Date: Wed, 12 Aug 2020 13:35:44 -0700 Subject: [PATCH 238/249] Merge pull request #544 from drexin/wip-static-modulemap Add modulemap for static compilation Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index ccac8b5bb..17118e659 100644 --- a/PATCHES +++ b/PATCHES @@ -538,3 +538,4 @@ github commits starting with 29bdc2f from [1986f39] APPLIED rdar://81276248 [598ce42] APPLIED rdar://81276248 [feb4421] APPLIED rdar://81276248 +[f152471] APPLIED rdar://81276248 From ed27ebe8e7bdf3b45cd9bc5a2046b74f0c4efde9 Mon Sep 17 00:00:00 2001 From: 3405691582 Date: Fri, 31 Jan 2020 20:49:40 -0500 Subject: [PATCH 239/249] Add missing DISPATCH_COCOA_COMPAT preprocessor symbol. _dispatch_runloop_queue_xref_dispose is declared in src/queue_internal.h but the declaration is hidden behind DISPATCH_COCOA_COMPAT. This means the call to _dispatch_runloop_queue_xref_dispose must also be put behind this preprocessor symbol. Signed-off-by: Rokhini Prabhu --- src/object.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/object.c b/src/object.c index 4eb49fda8..028800206 100644 --- a/src/object.c +++ b/src/object.c @@ -226,10 +226,11 @@ _dispatch_xref_dispose(dispatch_object_t dou) _dispatch_mach_xref_dispose(dou._dm); break; #endif +#if DISPATCH_COCOA_COMPAT case DISPATCH_QUEUE_RUNLOOP_TYPE: _dispatch_runloop_queue_xref_dispose(dou._dl); break; - } +#endif } return _dispatch_release_tailcall(dou._os_obj); } From 77590c1fb1e22e200c6c60d8da4876285f1c8e05 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 14 Aug 2020 14:49:24 -0700 Subject: [PATCH 240/249] Merge pull request #537 from 3405691582/MissingDispatchCocoaCompat Add missing DISPATCH_COCOA_COMPAT preprocessor symbol. Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 17118e659..75fe01828 100644 --- a/PATCHES +++ b/PATCHES @@ -539,3 +539,4 @@ github commits starting with 29bdc2f from [598ce42] APPLIED rdar://81276248 [feb4421] APPLIED rdar://81276248 [f152471] APPLIED rdar://81276248 +[457b110] APPLIED rdar://81276248 From 4e1147fae6adf2183383232d66b53bae207e92f8 Mon Sep 17 00:00:00 2001 From: 3405691582 Date: Fri, 31 Jan 2020 19:37:19 -0500 Subject: [PATCH 241/249] Make some preliminary porting changes. These changes include: * hw_config.h: prevent referring to sysctlbyname on OpenBSD, as this is not available on all platforms. * transform.c: these stanzas referring to FreeBSD or Linux also apply to OpenBSD. * tests/dispatch_apply.c: stanza applying to Linux applies to OpenBSD and also tweak style for consistency. * tests/dispatch_io_net.c, tests/dispatch_test.h: stanzas applying to FreeBSD also apply to OpenBSD. Signed-off-by: Rokhini Prabhu --- src/shims/hw_config.h | 6 +++++- src/transform.c | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index 8ab79f3ed..5ed1739eb 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -187,12 +187,16 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) name = "hw.activecpu"; break; } #elif defined(__FreeBSD__) - (void)c; name = "kern.smp.cpus"; + (void)c; name = "kern.smp.cpus"; +#elif defined(__OpenBSD__) + (void)c; #endif if (name) { size_t valsz = sizeof(val); +#if !defined(__OpenBSD__) r = sysctlbyname(name, &val, &valsz, NULL, 0); (void)dispatch_assume_zero(r); +#endif dispatch_assert(valsz == sizeof(uint32_t)); } else { #if HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) diff --git a/src/transform.c b/src/transform.c index b1b84aafd..6e65567ad 100644 --- a/src/transform.c +++ b/src/transform.c @@ -26,7 +26,7 @@ #include #define OSLittleEndian __LITTLE_ENDIAN #define OSBigEndian __BIG_ENDIAN -#elif defined(__FreeBSD__) +#elif defined(__FreeBSD__) || defined(__OpenBSD__) #include #define OSLittleEndian _LITTLE_ENDIAN #define OSBigEndian _BIG_ENDIAN @@ -35,7 +35,7 @@ #define OSBigEndian 4321 #endif -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) #define OSSwapLittleToHostInt16 le16toh #define OSSwapBigToHostInt16 be16toh #define OSSwapHostToLittleInt16 htole16 From 3df74e6e9f8bf83ea5e1bb7e9c40a49bd7422fcb Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Fri, 14 Aug 2020 14:49:59 -0700 Subject: [PATCH 242/249] Merge pull request #536 from 3405691582/NoSysctlbyname_AddOSSymbol Make some preliminary porting changes. Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 75fe01828..4d735556c 100644 --- a/PATCHES +++ b/PATCHES @@ -540,3 +540,4 @@ github commits starting with 29bdc2f from [feb4421] APPLIED rdar://81276248 [f152471] APPLIED rdar://81276248 [457b110] APPLIED rdar://81276248 +[f13ea5d] APPLIED rdar://81276248 From 7a97c24cfb4504cbbb8121870410d32825c942fc Mon Sep 17 00:00:00 2001 From: Tim Gates Date: Fri, 27 Nov 2020 20:43:14 +1100 Subject: [PATCH 243/249] docs: fix simple typo, transiton -> transition There is a small typo in src/shims/lock.h. Should read `transition` rather than `transiton`. Signed-off-by: Rokhini Prabhu --- src/shims/lock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shims/lock.h b/src/shims/lock.h index a05dd1152..9c602724c 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -564,7 +564,7 @@ typedef struct dispatch_once_gate_s { * effect of the dispatch once initialization. * * Per Lemma 2, when the DONE transition happens in a thread zone { N+3, N+4 }, - * then threads can observe this transiton in their { N+2, N+3 } zone at the + * then threads can observe this transition in their { N+2, N+3 } zone at the * earliest. * * Hence for an initializer bracket of { N, N+1 }, the first safe bracket for From 0e37faf77ef3d2005e3e28fb76592d30e1c774b2 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 19 Jan 2021 08:45:31 -0800 Subject: [PATCH 244/249] Merge pull request #553 from timgates42/bugfix_typo_transition docs: fix simple typo, transiton -> transition Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 4d735556c..12049d978 100644 --- a/PATCHES +++ b/PATCHES @@ -541,3 +541,4 @@ github commits starting with 29bdc2f from [f152471] APPLIED rdar://81276248 [457b110] APPLIED rdar://81276248 [f13ea5d] APPLIED rdar://81276248 +[1c303fa] APPLIED rdar://81276248 From 8ffad06d42a078d61a7ea16fa666b889c02a3729 Mon Sep 17 00:00:00 2001 From: Butta Date: Wed, 2 Dec 2020 18:33:48 +0530 Subject: [PATCH 245/249] [android] Put in fixes for librt and armv7-a Android doesn't have a separate librt, it's just part of libc. Also, the static build wasn't working for armv7-a, because the test executables wouldn't link with the multiple definition errors listed in android/ndk#176, so use the workaround given there. Signed-off-by: Rokhini Prabhu --- CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 75c419c13..36da01122 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -113,7 +113,9 @@ if(__BUILTIN_TRAP) set(HAVE_NORETURN_BUILTIN_TRAP 1) endif() -find_package(LibRT) +if(NOT CMAKE_SYSTEM_NAME STREQUAL Android) + find_package(LibRT) +endif() check_function_exists(_pthread_workqueue_init HAVE__PTHREAD_WORKQUEUE_INIT) check_function_exists(getprogname HAVE_GETPROGNAME) From 98bfc287073d193b7bd08e00726f779df59b6ce0 Mon Sep 17 00:00:00 2001 From: Dario Rexin Date: Wed, 24 Feb 2021 09:21:37 -0800 Subject: [PATCH 246/249] Merge pull request #554 from buttaface/droid-arm [android] Put in fixes for librt and armv7-a Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 12049d978..a43172521 100644 --- a/PATCHES +++ b/PATCHES @@ -542,3 +542,4 @@ github commits starting with 29bdc2f from [457b110] APPLIED rdar://81276248 [f13ea5d] APPLIED rdar://81276248 [1c303fa] APPLIED rdar://81276248 +[34f383d] APPLIED rdar://81276248 From e42a145fc56404f6f6e54d1d84ec27b4b6f7bdef Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Thu, 22 Jul 2021 10:18:24 -0700 Subject: [PATCH 247/249] shims: adjust the Windows path for Windows 11 The Windows 10 SDK Version 2104 (10.0.20348.0) which adds support for Windows 11 introduces new enumerated values for the logical processor configuration. Only one of the two is documented at MSDN. https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-logical_processor_relationship: ~~~ `RelationNumaNodeEx` Introduced in TBD - Release Iron. Requests that the full affinity be returned. Unlike the other relation types, `RelationNumaNodeEx` is not used oninput. It is simply a request for `RelationNumaNode` with full group information. ~~~ Treat this enumerated value as `RelationNodeNode`. It is unclear what the details of `RelationProcessorDie` is currently. For now, we leave that value ignored, though it is likely that we may have to address it in the future once the value is explained. Signed-off-by: Rokhini Prabhu --- src/shims/hw_config.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index 5ed1739eb..788064964 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -156,8 +156,14 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) ++dwProcessorPhysicalCount; dwProcessorLogicalCount += __popcnt64(slpiCurrent->ProcessorMask); break; +#if defined(RelationProcessorDie) + case RelationProcessorDie: +#endif case RelationProcessorPackage: case RelationNumaNode: +#if defined(RelationNumaNodeEx) + case RelationNumaNodeEx: +#endif case RelationCache: case RelationGroup: case RelationAll: From 1b3d2a5372874f5244c6197d1d57f9a8055fe193 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Mon, 26 Jul 2021 13:36:11 -0700 Subject: [PATCH 248/249] Merge pull request #562 from compnerd/windows-11 shims: adjust the Windows path for Windows 11 Signed-off-by: Rokhini Prabhu --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index a43172521..42b01784b 100644 --- a/PATCHES +++ b/PATCHES @@ -543,3 +543,4 @@ github commits starting with 29bdc2f from [f13ea5d] APPLIED rdar://81276248 [1c303fa] APPLIED rdar://81276248 [34f383d] APPLIED rdar://81276248 +[7870521] APPLIED rdar://81276248 From 3136e153d7bad7721eeec698369775aef58fecb6 Mon Sep 17 00:00:00 2001 From: Rokhini Prabhu Date: Mon, 4 Oct 2021 10:33:22 -0700 Subject: [PATCH 249/249] Merge libdispatch-1324.41.2 Signed-off-by: Rokhini Prabhu --- config/config.h | 16 + dispatch/dispatch.h | 1 + dispatch/queue.h | 4 +- dispatch/workloop.h | 27 + libdispatch.xcodeproj/project.pbxproj | 353 ++++- man/dispatch.3 | 10 +- man/dispatch_after.3 | 19 +- man/dispatch_api.3 | 22 +- man/dispatch_apply.3 | 27 +- man/dispatch_async.3 | 87 +- man/dispatch_data_create.3 | 48 +- man/dispatch_group_create.3 | 40 +- man/dispatch_io_create.3 | 142 +- man/dispatch_io_read.3 | 52 +- man/dispatch_object.3 | 54 +- man/dispatch_once.3 | 1 - man/dispatch_queue_create.3 | 157 ++- man/dispatch_read.3 | 59 +- man/dispatch_semaphore_create.3 | 30 +- man/dispatch_source_create.3 | 135 +- man/dispatch_time.3 | 15 +- os/clock.h | 18 + os/eventlink_private.h | 296 +++++ os/firehose_buffer_private.h | 8 +- os/object.h | 44 +- os/object_private.h | 16 +- os/voucher_private.h | 75 +- os/workgroup.h | 37 + os/workgroup_base.h | 78 ++ os/workgroup_interval.h | 164 +++ os/workgroup_interval_private.h | 188 +++ os/workgroup_object.h | 371 ++++++ os/workgroup_object_private.h | 285 ++++ os/workgroup_parallel.h | 78 ++ os/workgroup_private.h | 17 + private/apply_private.h | 338 +++++ private/mach_private.h | 91 +- private/private.h | 1 + private/queue_private.h | 97 +- private/source_private.h | 6 +- private/time_private.h | 35 + src/apply.c | 467 ++++++- src/benchmark.c | 8 +- src/data_internal.h | 4 +- src/event/event_config.h | 2 +- src/event/event_epoll.c | 6 + src/event/event_internal.h | 33 +- src/event/event_kevent.c | 229 ++-- src/event/workqueue.c | 2 +- src/eventlink.c | 555 ++++++++ src/eventlink_internal.h | 67 + src/firehose/firehose_buffer.c | 15 +- src/firehose/firehose_inline_internal.h | 2 + src/firehose/firehose_server.c | 9 +- src/firehose/firehose_server_internal.h | 5 +- src/init.c | 185 ++- src/inline_internal.h | 161 ++- src/internal.h | 68 +- src/introspection.c | 30 +- src/io.c | 29 +- src/mach.c | 244 ++-- src/object.c | 7 +- src/object.m | 81 +- src/object_internal.h | 86 +- src/queue.c | 684 +++++++--- src/queue_internal.h | 96 +- src/shims.h | 105 +- src/shims/lock.c | 4 +- src/shims/priority.h | 15 +- src/shims/target.h | 12 +- src/shims/time.h | 23 +- src/shims/tsd.h | 40 +- src/shims/yield.c | 11 +- src/shims/yield.h | 26 +- src/source.c | 69 +- src/source_internal.h | 3 +- src/time.c | 56 +- src/trace.h | 11 + src/voucher.c | 95 +- src/voucher_internal.h | 9 +- src/workgroup.c | 1580 +++++++++++++++++++++++ src/workgroup_internal.h | 223 ++++ xcodeconfig/libdispatch.clean | 10 +- xcodeconfig/libdispatch.dirty | 16 + xcodeconfig/libdispatch.order | 11 + xcodeconfig/libdispatch.xcconfig | 160 --- xcodescripts/install-headers.sh | 21 +- xcodescripts/postprocess-headers.sh | 5 + 88 files changed, 7804 insertions(+), 1318 deletions(-) create mode 100644 os/clock.h create mode 100644 os/eventlink_private.h create mode 100644 os/workgroup.h create mode 100644 os/workgroup_base.h create mode 100644 os/workgroup_interval.h create mode 100644 os/workgroup_interval_private.h create mode 100644 os/workgroup_object.h create mode 100644 os/workgroup_object_private.h create mode 100644 os/workgroup_parallel.h create mode 100644 os/workgroup_private.h create mode 100644 private/apply_private.h create mode 100644 src/eventlink.c create mode 100644 src/eventlink_internal.h create mode 100644 src/workgroup.c create mode 100644 src/workgroup_internal.h diff --git a/config/config.h b/config/config.h index 2fcd922b5..c1ef8aaeb 100644 --- a/config/config.h +++ b/config/config.h @@ -61,6 +61,10 @@ you don't. */ #define HAVE_DECL_VQ_VERYLOWDISK 1 +/* Define to 1 if you have the declaration of `VQ_SERVEREVENT', and to 0 if + you don't. */ +#define HAVE_DECL_VQ_SERVEREVENT 1 + /* Define to 1 if you have the declaration of `VQ_QUOTA', and to 0 if you don't. */ #define HAVE_DECL_VQ_QUOTA 1 @@ -121,15 +125,27 @@ /* Define if you have the Objective-C runtime */ #define HAVE_OBJC 1 +/* Define to 1 if you have the `posix_fadvise' function. */ +#define HAVE_POSIX_FADVISE 0 + +/* Define to 1 if you have the `posix_spawnp' function. */ +#define HAVE_POSIX_SPAWNP 1 + /* Define to 1 if you have the `pthread_key_init_np' function. */ #define HAVE_PTHREAD_KEY_INIT_NP 1 +/* Define to 1 if you have the `pthread_attr_setcpupercent_np' function. */ +#define HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP 1 + /* Define to 1 if you have the header file. */ #define HAVE_PTHREAD_MACHDEP_H 1 /* Define to 1 if you have the `pthread_main_np' function. */ #define HAVE_PTHREAD_MAIN_NP 1 +/* Define to 1 if you have the `pthread_yield_np' function. */ +#define HAVE_PTHREAD_YIELD_NP 1 + /* Define to 1 if you have the header file. */ /* #undef HAVE_PTHREAD_NP_H */ diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index 0ed604fce..9b517f36c 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -62,6 +62,7 @@ #endif #include +#include #include #include #include diff --git a/dispatch/queue.h b/dispatch/queue.h index dc5aae79a..c4820b6c4 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -496,7 +496,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_apply(size_t iterations, dispatch_queue_t DISPATCH_APPLY_QUEUE_ARG_NULLABILITY queue, - DISPATCH_NOESCAPE void (^block)(size_t)); + DISPATCH_NOESCAPE void (^block)(size_t iteration)); #endif /*! @@ -531,7 +531,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_apply_f(size_t iterations, dispatch_queue_t DISPATCH_APPLY_QUEUE_ARG_NULLABILITY queue, - void *_Nullable context, void (*work)(void *_Nullable, size_t)); + void *_Nullable context, void (*work)(void *_Nullable context, size_t iteration)); /*! * @function dispatch_get_current_queue diff --git a/dispatch/workloop.h b/dispatch/workloop.h index 2c6cf18c5..98c4f8a41 100644 --- a/dispatch/workloop.h +++ b/dispatch/workloop.h @@ -133,6 +133,33 @@ void dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t workloop, dispatch_autorelease_frequency_t frequency); +/*! + * @function dispatch_workloop_set_os_workgroup + * + * @abstract + * Associates an os_workgroup_t with the specified dispatch workloop. + * + * The worker thread will be a member of the specified os_workgroup_t while executing + * work items submitted to the workloop. + * + * @param workloop + * The dispatch workloop to modify. + * + * This workloop must be inactive, passing an activated object is undefined + * and will cause the process to be terminated. + * + * @param workgroup + * The workgroup to associate with this workloop. + * + * The workgroup specified is retained and the previously associated workgroup + * (if any) is released. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_workloop_set_os_workgroup(dispatch_workloop_t workloop, + os_workgroup_t workgroup); + __END_DECLS DISPATCH_ASSUME_NONNULL_END diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index 68d920082..a4f706e60 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -222,6 +222,51 @@ 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + 9B2A588123A412B400A7BB27 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9B3713F623D24594001C5C88 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + 9B404D6C255A191A0014912B /* apply_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B404D6B255A191A0014912B /* apply_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9B404DAA255A1E6F0014912B /* apply_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B404D6B255A191A0014912B /* apply_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9B404DC0255A1E7D0014912B /* apply_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B404D6B255A191A0014912B /* apply_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9B81557B234AFC9800DB5CA3 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9B8ED5792350C79100507521 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; }; + 9B8ED5A6235183D100507521 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9B9DB6F9234ECE92003F962B /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; }; + 9BA656E4236BB55000D13FAE /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; }; + 9BA656E6236BB56700D13FAE /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; }; + 9BA7221523E293CB0058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; }; + 9BA7221623E293FD0058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; }; + 9BA7221723E294140058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; }; + 9BCAF76F23A8540A00E4F685 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; }; + 9BCAF77123A8550100E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF77223A8550B00E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF77323A8551300E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF77423A8551E00E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF77523A8552600E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF79423AAEDED00E4F685 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; + 9BCAF79623AAEDF700E4F685 /* workgroup_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */; }; + 9BE3E56F23CE62BB006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BE3E57423CE62C2006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BE3E57523CE62C9006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BE3E57623CE62D8006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BE3E57723CE62E9006FE059 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; }; + 9BE3E57823CE62E9006FE059 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; }; + 9BE3E57923CE62E9006FE059 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; }; + 9BE3E57A23CE62E9006FE059 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; }; + 9BE3E57B23CE6325006FE059 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; }; + 9BE3E58323CE637F006FE059 /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; }; + 9BE3E58423CE637F006FE059 /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; }; + 9BE52545238747D30041C2A0 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; }; + 9BE5254A238747ED0041C2A0 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; }; + 9BE5254B238747ED0041C2A0 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; }; + 9BE5254C238747ED0041C2A0 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; }; + 9BE5254D238747F90041C2A0 /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; }; + 9BE5254E238747F90041C2A0 /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; }; + 9BE525502387480F0041C2A0 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BFD342C23C94F2500B08420 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; }; + 9BFD342D23C94F3500B08420 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; + 9BFD342F23C94F6D00B08420 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; }; + 9BFD343023C94F8C00B08420 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BFD343C23CD032800B08420 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; B609581E221DFA2A00F39D1F /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; B609581F221DFA4B00F39D1F /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; B683588F1FA77F5A00AA0D58 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -252,6 +297,30 @@ C93D6165143E190E00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; C93D6167143E190F00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; C9C5F80E143C1771006DC718 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + E4053A5A26EAF06C00362F72 /* workgroup_object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */; }; + E4053A5B26EAF07700362F72 /* workgroup_object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */; }; + E4053A5C26EAF07900362F72 /* workgroup_object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */; }; + E4053A5D26EAF12D00362F72 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; }; + E4053A5E26EAF16600362F72 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + E4053A5F26EAF16700362F72 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + E4053A6026EAF1A600362F72 /* workgroup_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */; }; + E4053A6226EAF1B000362F72 /* workgroup_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */; }; + E4053A6326EAF25500362F72 /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; + E4053A6426EAF25600362F72 /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; + E4053A6526EAF27A00362F72 /* target.h in Headers */ = {isa = PBXBuildFile; fileRef = F7DC045A2060BBBE00C90737 /* target.h */; }; + E4053A6626EAF27B00362F72 /* target.h in Headers */ = {isa = PBXBuildFile; fileRef = F7DC045A2060BBBE00C90737 /* target.h */; }; + E4053A6726EAF2A000362F72 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A3109923C7003403D5 /* time.h */; }; + E4053A6826EAF2A700362F72 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; + E4053A6926EAF2A800362F72 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; + E4053A6A26EAF4BD00362F72 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C901445E1C73A7FE002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Public, ); }; }; + E4053A6B26EAF54F00362F72 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; }; + E4053A6C26EAF55000362F72 /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; }; + E4053A6D26EAF55000362F72 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; }; + E4053A6E26EAF55000362F72 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; }; + E4053A6F26EAF55000362F72 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; }; + E4053A7026EAF55000362F72 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; + E4053A7126EAF55000362F72 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + E4053A7226EAF67D00362F72 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C90144641C73A845002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Private, ); }; }; E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; E417A38512A472C5004D659D /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; @@ -395,8 +464,6 @@ E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; E44F9DBE1654405B001DCD38 /* tsd.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A4109923C7003403D5 /* tsd.h */; }; E44F9DBF165440EF001DCD38 /* config.h in Headers */ = {isa = PBXBuildFile; fileRef = FC9C70E7105EC9620074F9CA /* config.h */; }; - E44F9DC016544115001DCD38 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; - E44F9DC116544115001DCD38 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; }; E454569314746F1B00106147 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; E454569414746F1B00106147 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; E4630251176162D200E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; @@ -715,8 +782,8 @@ 5A27262510F26F1900751FBC /* io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = io.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore_internal.h; sourceTree = ""; }; 5AAB45BF10D30B79004407EA /* data.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = data.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; - 5AAB45C310D30CC7004407EA /* io.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io.h; sourceTree = ""; tabWidth = 8; }; - 5AAB45C510D30D0C004407EA /* data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data.h; sourceTree = ""; tabWidth = 8; }; + 5AAB45C310D30CC7004407EA /* io.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io.h; sourceTree = ""; }; + 5AAB45C510D30D0C004407EA /* data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data.h; sourceTree = ""; }; 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_kernel.a; sourceTree = BUILT_PRODUCTS_DIR; }; 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose_kernel.xcconfig; sourceTree = ""; }; 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_label.c; sourceTree = ""; }; @@ -733,7 +800,6 @@ 6E326ABB1C229895002A6505 /* dispatch_read2.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_read2.c; sourceTree = ""; }; 6E326ABD1C22A577002A6505 /* dispatch_io_net.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_io_net.c; sourceTree = ""; }; 6E326ABE1C22A577002A6505 /* dispatch_io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_io.c; sourceTree = ""; }; - 6E326AD81C233209002A6505 /* dispatch_sync_gc.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = dispatch_sync_gc.m; sourceTree = ""; }; 6E326AD91C233209002A6505 /* dispatch_sync_on_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_sync_on_main.c; sourceTree = ""; }; 6E326ADC1C234396002A6505 /* dispatch_readsync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_readsync.c; sourceTree = ""; }; 6E326ADE1C23451A002A6505 /* dispatch_concur.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_concur.c; sourceTree = ""; }; @@ -788,7 +854,6 @@ 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose.xcconfig; sourceTree = ""; }; 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_inline_internal.h; sourceTree = ""; }; 6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_deadname.c; sourceTree = ""; }; - 6EC670C61E37E201004F10D6 /* dispatch_network_event_thread.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_network_event_thread.c; sourceTree = ""; }; 6EC670C71E37E201004F10D6 /* perf_mach_async.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_mach_async.c; sourceTree = ""; }; 6EC670C81E37E201004F10D6 /* perf_pipepingpong.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_pipepingpong.c; sourceTree = ""; }; 6EC8DBE61E3E832C0044B652 /* dispatch_channel.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_channel.c; sourceTree = ""; }; @@ -834,7 +899,22 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 9B2A588023A412B400A7BB27 /* eventlink.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = eventlink.c; sourceTree = ""; }; + 9B3713F123D24594001C5C88 /* clock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = clock.h; sourceTree = ""; }; + 9B38A012234C6D0400E6B90F /* workgroup_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_base.h; sourceTree = ""; }; + 9B404D6B255A191A0014912B /* apply_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = apply_private.h; sourceTree = ""; }; 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = "queue-tip.xcodeproj"; path = "tools/queue-tip/queue-tip.xcodeproj"; sourceTree = ""; }; + 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = workgroup_object_private.h; sourceTree = ""; }; + 9B81556E234AF0D200DB5CA3 /* workgroup.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup.h; sourceTree = ""; }; + 9B815576234AFC9800DB5CA3 /* workgroup.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = workgroup.c; path = src/workgroup.c; sourceTree = SOURCE_ROOT; }; + 9B8ED5782350C79100507521 /* workgroup_object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_object.h; sourceTree = ""; }; + 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_interval.h; sourceTree = ""; }; + 9BA656DF236BB55000D13FAE /* workgroup_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_private.h; sourceTree = ""; }; + 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_interval_private.h; sourceTree = ""; }; + 9BA7221023E293CB0058472E /* workgroup_parallel.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_parallel.h; sourceTree = ""; }; + 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eventlink_private.h; sourceTree = ""; }; + 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eventlink_internal.h; sourceTree = ""; }; + 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_internal.h; sourceTree = ""; }; B6095819221DFA2A00F39D1F /* workloop.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workloop.h; sourceTree = ""; }; B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_no_blocks.c; sourceTree = ""; }; B68330BC1EBCF6080003E71C /* dispatch_wl.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_wl.c; sourceTree = ""; }; @@ -851,7 +931,7 @@ C01866BF1C5976C90040FC07 /* run-on-install.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "run-on-install.sh"; sourceTree = ""; }; C901445E1C73A7FE002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = darwin/module.modulemap; sourceTree = ""; }; C90144641C73A845002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = darwin/module.modulemap; sourceTree = ""; }; - C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; tabWidth = 8; }; + C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; }; C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = ddt.xcodeproj; path = tools/ddt/ddt.xcodeproj; sourceTree = ""; }; C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = dispatch_objc.m; sourceTree = ""; }; C9C5F80D143C1771006DC718 /* transform.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = transform.c; sourceTree = ""; }; @@ -879,10 +959,10 @@ E44EBE3B1251659900645D88 /* init.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = init.c; sourceTree = ""; }; E44F9DA816543F79001DCD38 /* introspection_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_internal.h; sourceTree = ""; }; E454569214746F1B00106147 /* object_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_private.h; sourceTree = ""; }; - E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; + E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; - E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = io_private.h; path = private/io_private.h; sourceTree = SOURCE_ROOT; tabWidth = 8; }; + E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io_private.h; sourceTree = ""; }; E48EC97B1835BADD00EAC4F1 /* yield.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = yield.h; sourceTree = ""; }; E49BB6F21E70748100868613 /* libdispatch_armv81.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_armv81.a; sourceTree = BUILT_PRODUCTS_DIR; }; E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -899,6 +979,20 @@ E4BA743813A8900B0095BDF1 /* dispatch_read.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_read.3; sourceTree = ""; }; E4BA743913A8911B0095BDF1 /* getprogname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = getprogname.h; sourceTree = ""; }; E4C1ED6E1263E714000D3C8B /* data_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_internal.h; sourceTree = ""; }; + E4C97EFF263868F800628947 /* dispatch_once.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_once.c; sourceTree = ""; }; + E4C97F04263868F800628947 /* dispatch_async_and_wait.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_async_and_wait.c; sourceTree = ""; }; + E4C97F05263868F800628947 /* os_workgroup_multilang.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_multilang.c; sourceTree = ""; }; + E4C97F06263868F800628947 /* os_eventlink.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_eventlink.c; sourceTree = ""; }; + E4C97F07263868F800628947 /* os_workgroup_basic.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_basic.c; sourceTree = ""; }; + E4C97F08263868F800628947 /* dispatch_qos_cf.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_qos_cf.c; sourceTree = ""; }; + E4C97F09263868F800628947 /* os_workgroup_empty2.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_empty2.c; sourceTree = ""; }; + E4C97F0A263868F800628947 /* os_workgroup_entitled.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_entitled.c; sourceTree = ""; }; + E4C97F0B263868F800628947 /* dispatch_plusplus.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = dispatch_plusplus.cpp; sourceTree = ""; }; + E4C97F0C263868F800628947 /* os_eventlink_empty.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_eventlink_empty.c; sourceTree = ""; }; + E4C97F0D263868F800628947 /* os_workgroup_empty.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_empty.c; sourceTree = ""; }; + E4C97F0E263868F800628947 /* dispatch_mach.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_mach.c; sourceTree = ""; }; + E4C97F0F263868F800628947 /* dispatch_workloop.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_workloop.c; sourceTree = ""; }; + E4C97F10263868F800628947 /* dispatch_cooperative.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_cooperative.c; sourceTree = ""; }; E4D76A9218E325D200B1F98B /* block.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = block.h; sourceTree = ""; }; E4EB4A2614C35ECE00AA0FA9 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-headers.sh"; sourceTree = ""; }; @@ -953,6 +1047,7 @@ E44DB71E11D2FF080074F2AD /* Build Support */, 6E9B6AE21BB39793009E324D /* OS Public Headers */, E4EB4A2914C35F1800AA0FA9 /* OS Private Headers */, + 9BCAF77023A8544100E4F685 /* OS Project Headers */, FC7BEDAA0E83625200161930 /* Dispatch Public Headers */, FC7BEDAF0E83626100161930 /* Dispatch Private Headers */, FC7BEDB60E8363DC00161930 /* Dispatch Project Headers */, @@ -979,6 +1074,7 @@ E43A724F1AF85BBC00BAA921 /* block.cpp */, 5AAB45BF10D30B79004407EA /* data.c */, E420866F16027AE500EEE210 /* data.m */, + 9B2A588023A412B400A7BB27 /* eventlink.c */, E44EBE3B1251659900645D88 /* init.c */, E4B515DC164B32E000E003AF /* introspection.c */, 5A27262510F26F1900751FBC /* io.c */, @@ -994,6 +1090,7 @@ C9C5F80D143C1771006DC718 /* transform.c */, 6E9955CE1C3B218E0071D40C /* venture.c */, E44A8E6A1805C3E0009FFDB6 /* voucher.c */, + 9B815576234AFC9800DB5CA3 /* workgroup.c */, 6E9C6CA220F9848000EA81C0 /* yield.c */, 6EA283D01CAB93270041B2E0 /* libdispatch.codes */, 6E29394C1FB9526E00FDAC90 /* libdispatch.plist */, @@ -1057,7 +1154,13 @@ 6E9B6AE21BB39793009E324D /* OS Public Headers */ = { isa = PBXGroup; children = ( + 9B38A012234C6D0400E6B90F /* workgroup_base.h */, + 9BA7221023E293CB0058472E /* workgroup_parallel.h */, + 9B81556E234AF0D200DB5CA3 /* workgroup.h */, + 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */, + 9B8ED5782350C79100507521 /* workgroup_object.h */, E4EB4A2614C35ECE00AA0FA9 /* object.h */, + 9B3713F123D24594001C5C88 /* clock.h */, ); name = "OS Public Headers"; path = os; @@ -1094,6 +1197,7 @@ 6E8E4EC31C1A57760004F5CC /* dispatch_after.c */, 92F3FE8F1BEC686300025962 /* dispatch_api.c */, 6E67D8D31C16C20B00FC98AC /* dispatch_apply.c */, + E4C97F04263868F800628947 /* dispatch_async_and_wait.c */, 6E9926711D01295F000CB89A /* dispatch_block.c */, 924D8EAA1C116B9F002AC2BC /* dispatch_c99.c */, 6E326AB11C224830002A6505 /* dispatch_cascade.c */, @@ -1101,6 +1205,7 @@ 6EC8DBE61E3E832C0044B652 /* dispatch_channel.c */, 6E326ADE1C23451A002A6505 /* dispatch_concur.c */, 6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */, + E4C97F10263868F800628947 /* dispatch_cooperative.c */, 6E8E4EC71C1A61680004F5CC /* dispatch_data.m */, 6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */, 6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */, @@ -1109,15 +1214,18 @@ 6E326ABD1C22A577002A6505 /* dispatch_io_net.c */, 6E326ABE1C22A577002A6505 /* dispatch_io.c */, 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */, - 6EC670C61E37E201004F10D6 /* dispatch_network_event_thread.c */, + E4C97F0E263868F800628947 /* dispatch_mach.c */, B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */, C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */, + E4C97EFF263868F800628947 /* dispatch_once.c */, 6E67D9131C17676D00FC98AC /* dispatch_overcommit.c */, 6E67D9151C1768B300FC98AC /* dispatch_pingpong.c */, + E4C97F0B263868F800628947 /* dispatch_plusplus.cpp */, 6E326B441C239B61002A6505 /* dispatch_priority.c */, 6E326AB51C225477002A6505 /* dispatch_proc.c */, B6FA01801F0AD522004479BF /* dispatch_pthread_root_queue.c */, 6E326AB31C224870002A6505 /* dispatch_qos.c */, + E4C97F08263868F800628947 /* dispatch_qos_cf.c */, B6AE9A4A1D7F53B300AC007F /* dispatch_queue_create.c */, 6E67D9111C17669C00FC98AC /* dispatch_queue_finalizer.c */, 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */, @@ -1131,7 +1239,6 @@ 6E326AE01C234780002A6505 /* dispatch_starfish.c */, 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */, 6E326B121C239431002A6505 /* dispatch_suspend_timer.c */, - 6E326AD81C233209002A6505 /* dispatch_sync_gc.m */, 6E326AD91C233209002A6505 /* dispatch_sync_on_main.c */, 6E326B131C239431002A6505 /* dispatch_timer_bit.c */, 6E326B151C239431002A6505 /* dispatch_timer_set_time.c */, @@ -1143,8 +1250,16 @@ 6E8E4EC91C1A670B0004F5CC /* dispatch_vm.c */, 6E326AB71C225FCA002A6505 /* dispatch_vnode.c */, B68330BC1EBCF6080003E71C /* dispatch_wl.c */, + E4C97F0F263868F800628947 /* dispatch_workloop.c */, 6E67D9171C17BA7200FC98AC /* nsoperation.m */, + E4C97F0C263868F800628947 /* os_eventlink_empty.c */, + E4C97F06263868F800628947 /* os_eventlink.c */, 6E4FC9D11C84123600520351 /* os_venture_basic.c */, + E4C97F07263868F800628947 /* os_workgroup_basic.c */, + E4C97F0D263868F800628947 /* os_workgroup_empty.c */, + E4C97F09263868F800628947 /* os_workgroup_empty2.c */, + E4C97F0A263868F800628947 /* os_workgroup_entitled.c */, + E4C97F05263868F800628947 /* os_workgroup_multilang.c */, B6AE9A561D7F53C100AC007F /* perf_async_bench.m */, B6AE9A581D7F53CB00AC007F /* perf_bench.m */, 6EC670C71E37E201004F10D6 /* perf_mach_async.c */, @@ -1166,6 +1281,16 @@ name = Products; sourceTree = ""; }; + 9BCAF77023A8544100E4F685 /* OS Project Headers */ = { + isa = PBXGroup; + children = ( + 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */, + 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */, + ); + name = "OS Project Headers"; + path = src; + sourceTree = ""; + }; C6A0FF2B0290797F04C91782 /* Documentation */ = { isa = PBXGroup; children = ( @@ -1266,9 +1391,13 @@ isa = PBXGroup; children = ( E454569214746F1B00106147 /* object_private.h */, + 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */, 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */, 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */, 6E9955571C3AF7710071D40C /* venture_private.h */, + 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */, + 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */, + 9BA656DF236BB55000D13FAE /* workgroup_private.h */, E44A8E711805C473009FFDB6 /* voucher_private.h */, E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */, ); @@ -1320,19 +1449,20 @@ FC7BEDAF0E83626100161930 /* Dispatch Private Headers */ = { isa = PBXGroup; children = ( - FC7BED930E8361E600161930 /* private.h */, + 9B404D6B255A191A0014912B /* apply_private.h */, + 961B99350F3E83980006BC96 /* benchmark.h */, + 6EC8DC261E3E84610044B652 /* channel_private.h */, C913AC0E143BD34800B78976 /* data_private.h */, E48AF55916E70FD9004105FF /* io_private.h */, + 2BE17C6318EA305E002CA4E8 /* layout_private.h */, + E4ECBAA415253C25002C313C /* mach_private.h */, + C90144641C73A845002638FC /* module.modulemap */, + FC7BED930E8361E600161930 /* private.h */, 96BC39BC0F3EBAB100C59689 /* queue_private.h */, - 6E70181C1F4EB51B0077C1DC /* workloop_private.h */, - 6EC8DC261E3E84610044B652 /* channel_private.h */, FCEF047F0F5661960067401F /* source_private.h */, - E4ECBAA415253C25002C313C /* mach_private.h */, B683588A1FA77F4900AA0D58 /* time_private.h */, - C90144641C73A845002638FC /* module.modulemap */, - 961B99350F3E83980006BC96 /* benchmark.h */, + 6E70181C1F4EB51B0077C1DC /* workloop_private.h */, E4B515D7164B2DFB00E003AF /* introspection_private.h */, - 2BE17C6318EA305E002CA4E8 /* layout_private.h */, ); name = "Dispatch Private Headers"; path = private; @@ -1342,20 +1472,20 @@ isa = PBXGroup; children = ( 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */, - FC7BED8F0E8361E600161930 /* internal.h */, - E44757D917F4572600B82CA1 /* inline_internal.h */, E4C1ED6E1263E714000D3C8B /* data_internal.h */, + E44757D917F4572600B82CA1 /* inline_internal.h */, + FC7BED8F0E8361E600161930 /* internal.h */, 5A0095A110F274B0000E2A31 /* io_internal.h */, 6E4BACC91D48A89500B562AE /* mach_internal.h */, 965ECC200F3EAB71004DDD89 /* object_internal.h */, 96929D950F3EA2170041FF5D /* queue_internal.h */, 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */, + 96929D830F3EA1020041FF5D /* shims.h */, FC0B34780FA2851C0080FFA0 /* source_internal.h */, + E422A0D412A557B5005E5BDB /* trace.h */, 6E9956061C3B21AA0071D40C /* venture_internal.h */, E44A8E7418066276009FFDB6 /* voucher_internal.h */, - E422A0D412A557B5005E5BDB /* trace.h */, E44F9DA816543F79001DCD38 /* introspection_internal.h */, - 96929D830F3EA1020041FF5D /* shims.h */, 6E5ACCAE1D3BF27F007DA2B4 /* event */, 6EF0B2661BA8C43D007FA4F6 /* firehose */, FC1832A0109923B3003403D5 /* shims */, @@ -1392,43 +1522,57 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 9BA7221523E293CB0058472E /* workgroup_parallel.h in Headers */, + 9B8ED5792350C79100507521 /* workgroup_object.h in Headers */, FC7BEDA50E8361E600161930 /* dispatch.h in Headers */, 72CC94300ECCD8750031B751 /* base.h in Headers */, 961B99500F3E85C30006BC96 /* object.h in Headers */, - E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */, + 9B9DB6F9234ECE92003F962B /* workgroup_interval.h in Headers */, 6EC8DC271E3E84610044B652 /* channel_private.h in Headers */, FC7BED9A0E8361E600161930 /* queue.h in Headers */, + 9BE3E57B23CE6325006FE059 /* workgroup.h in Headers */, FC7BED9C0E8361E600161930 /* source.h in Headers */, 6E9955581C3AF7710071D40C /* venture_private.h in Headers */, E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */, 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */, FC5C9C1E0EADABE3006E462D /* group.h in Headers */, - 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */, + 9BCAF76F23A8540A00E4F685 /* eventlink_private.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, 5AAB45C410D30CC7004407EA /* io.h in Headers */, - E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */, C90144651C73A8A3002638FC /* module.modulemap in Headers */, - E4630253176162D400E11F4C /* atomic_sfb.h in Headers */, 5AAB45C610D30D0C004407EA /* data.h in Headers */, - 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */, + E4053A5D26EAF12D00362F72 /* workgroup_base.h in Headers */, 96032E4D0F5CC8D100241C5F /* time.h in Headers */, FC7BEDA20E8361E600161930 /* private.h in Headers */, E4D76A9318E325D200B1F98B /* block.h in Headers */, + 9BA656E4236BB55000D13FAE /* workgroup_private.h in Headers */, C913AC0F143BD34800B78976 /* data_private.h in Headers */, 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, + 9B404D6C255A191A0014912B /* apply_private.h in Headers */, C90144661C73A9F6002638FC /* module.modulemap in Headers */, FCEF04800F5661960067401F /* source_private.h in Headers */, - F7DC045B2060BBBE00C90737 /* target.h in Headers */, + 9BA656E6236BB56700D13FAE /* workgroup_interval_private.h in Headers */, 961B99360F3E83980006BC96 /* benchmark.h in Headers */, - FC7BED9E0E8361E600161930 /* internal.h in Headers */, + E4053A5A26EAF06C00362F72 /* workgroup_object_private.h in Headers */, B609581E221DFA2A00F39D1F /* workloop.h in Headers */, 6E7018211F4EB51B0077C1DC /* workloop_private.h in Headers */, + 9B3713F623D24594001C5C88 /* clock.h in Headers */, + E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */, + E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */, + E454569314746F1B00106147 /* object_private.h in Headers */, 965ECC210F3EAB71004DDD89 /* object_internal.h in Headers */, + FC7BED9E0E8361E600161930 /* internal.h in Headers */, + E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */, 96929D960F3EA2170041FF5D /* queue_internal.h in Headers */, + E4630253176162D400E11F4C /* atomic_sfb.h in Headers */, + E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */, FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */, + 9BCAF79623AAEDF700E4F685 /* workgroup_internal.h in Headers */, + 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */, + 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */, + F7DC045B2060BBBE00C90737 /* target.h in Headers */, 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */, E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */, - E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */, 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */, FC1832A8109923C7003403D5 /* tsd.h in Headers */, 6EA793891D458A5800929B1B /* event_config.h in Headers */, @@ -1448,10 +1592,9 @@ 6E5ACCBA1D3C4D0B007DA2B4 /* event_internal.h in Headers */, 6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */, E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, - E454569314746F1B00106147 /* object_private.h in Headers */, + 9BCAF79423AAEDED00E4F685 /* eventlink_internal.h in Headers */, B683588F1FA77F5A00AA0D58 /* time_private.h in Headers */, 6E5662E11F8C2E3E00BC2474 /* workqueue_internal.h in Headers */, - E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */, E48AF55A16E70FD9004105FF /* io_private.h in Headers */, E4ECBAA515253C25002C313C /* mach_private.h in Headers */, 2BBF5A60154B64D8002B20F9 /* allocator_internal.h in Headers */, @@ -1463,6 +1606,7 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 9BA7221723E294140058472E /* workgroup_parallel.h in Headers */, E43B88322241F19000215272 /* dispatch.h in Headers */, E43B88332241F19000215272 /* base.h in Headers */, E43B88342241F19000215272 /* object.h in Headers */, @@ -1473,12 +1617,18 @@ E43B88392241F19000215272 /* venture_private.h in Headers */, E43B883A2241F19000215272 /* voucher_activity_private.h in Headers */, E43B883B2241F19000215272 /* semaphore.h in Headers */, + 9BE5254A238747ED0041C2A0 /* workgroup_interval.h in Headers */, + 9BE5254B238747ED0041C2A0 /* workgroup_object.h in Headers */, + 9BE5254C238747ED0041C2A0 /* workgroup_base.h in Headers */, + 9BE52545238747D30041C2A0 /* workgroup.h in Headers */, E43B883C2241F19000215272 /* group.h in Headers */, E43B883D2241F19000215272 /* priority.h in Headers */, E43B883E2241F19000215272 /* once.h in Headers */, E43B883F2241F19000215272 /* io.h in Headers */, + 9BFD342F23C94F6D00B08420 /* eventlink_private.h in Headers */, E43B88402241F19000215272 /* voucher_internal.h in Headers */, E43B88412241F19000215272 /* module.modulemap in Headers */, + E4053A6226EAF1B000362F72 /* workgroup_internal.h in Headers */, E43B88422241F19000215272 /* atomic_sfb.h in Headers */, E43B88432241F19000215272 /* data.h in Headers */, E43B88442241F19000215272 /* firehose_internal.h in Headers */, @@ -1486,9 +1636,13 @@ E43B88462241F19000215272 /* private.h in Headers */, E43B88472241F19000215272 /* block.h in Headers */, E43B88482241F19000215272 /* data_private.h in Headers */, + 9B404DC0255A1E7D0014912B /* apply_private.h in Headers */, E43B88492241F19000215272 /* queue_private.h in Headers */, E43B884A2241F19000215272 /* module.modulemap in Headers */, + 9BE5254D238747F90041C2A0 /* workgroup_interval_private.h in Headers */, + 9BE5254E238747F90041C2A0 /* workgroup_private.h in Headers */, E43B884B2241F19000215272 /* source_private.h in Headers */, + 9BFD343C23CD032800B08420 /* eventlink_internal.h in Headers */, E43B884C2241F19000215272 /* target.h in Headers */, E43B884D2241F19000215272 /* benchmark.h in Headers */, E43B884E2241F19000215272 /* internal.h in Headers */, @@ -1499,6 +1653,7 @@ E43B88522241F19000215272 /* source_internal.h in Headers */, E43B88532241F19000215272 /* semaphore_internal.h in Headers */, E43B88542241F19000215272 /* data_internal.h in Headers */, + E4053A5C26EAF07900362F72 /* workgroup_object_private.h in Headers */, E43B88552241F19000215272 /* voucher_private.h in Headers */, E43B88562241F19000215272 /* io_internal.h in Headers */, E43B88572241F19000215272 /* tsd.h in Headers */, @@ -1525,6 +1680,7 @@ E43B886C2241F19000215272 /* object.h in Headers */, E43B886D2241F19000215272 /* io_private.h in Headers */, E43B886E2241F19000215272 /* mach_private.h in Headers */, + E4053A5F26EAF16700362F72 /* clock.h in Headers */, E43B886F2241F19000215272 /* allocator_internal.h in Headers */, E43B88702241F19000215272 /* introspection_internal.h in Headers */, ); @@ -1534,11 +1690,20 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 9BA7221623E293FD0058472E /* workgroup_parallel.h in Headers */, 6EC8DC281E3E847A0044B652 /* channel_private.h in Headers */, E49F24AB125D57FA0057C971 /* dispatch.h in Headers */, E49F24AC125D57FA0057C971 /* base.h in Headers */, + E4053A6326EAF25500362F72 /* mach_internal.h in Headers */, 6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */, 6E7018221F4EB5220077C1DC /* workloop_private.h in Headers */, + 9BE3E57723CE62E9006FE059 /* workgroup_interval.h in Headers */, + 9BE3E57823CE62E9006FE059 /* workgroup_object.h in Headers */, + E4053A6526EAF27A00362F72 /* target.h in Headers */, + 9BE3E57923CE62E9006FE059 /* workgroup_base.h in Headers */, + E4053A5E26EAF16600362F72 /* clock.h in Headers */, + 9BE3E57A23CE62E9006FE059 /* workgroup.h in Headers */, + E4053A6A26EAF4BD00362F72 /* module.modulemap in Headers */, E49F24AD125D57FA0057C971 /* object.h in Headers */, E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */, E49F24AE125D57FA0057C971 /* queue.h in Headers */, @@ -1547,17 +1712,23 @@ E4B3C3FF18C50D0E0039F49F /* voucher_activity_private.h in Headers */, E49F24B0125D57FA0057C971 /* semaphore.h in Headers */, E49F24B1125D57FA0057C971 /* group.h in Headers */, + 9BFD342C23C94F2500B08420 /* eventlink_private.h in Headers */, E49F24B2125D57FA0057C971 /* once.h in Headers */, E49F24B3125D57FA0057C971 /* io.h in Headers */, 6E5662E21F8C2E4F00BC2474 /* workqueue_internal.h in Headers */, + E4053A7226EAF67D00362F72 /* module.modulemap in Headers */, E44A8E7618066276009FFDB6 /* voucher_internal.h in Headers */, E4630252176162D300E11F4C /* atomic_sfb.h in Headers */, E49F24B4125D57FA0057C971 /* data.h in Headers */, E49F24B5125D57FA0057C971 /* time.h in Headers */, + 9BE3E58323CE637F006FE059 /* workgroup_interval_private.h in Headers */, E49F24B6125D57FA0057C971 /* private.h in Headers */, + 9BE3E58423CE637F006FE059 /* workgroup_private.h in Headers */, + 9B404DAA255A1E6F0014912B /* apply_private.h in Headers */, E4D76A9418E325D200B1F98B /* block.h in Headers */, E49F24B7125D57FA0057C971 /* queue_private.h in Headers */, E49F24B8125D57FA0057C971 /* source_private.h in Headers */, + 9BFD342D23C94F3500B08420 /* eventlink_internal.h in Headers */, E49F24B9125D57FA0057C971 /* benchmark.h in Headers */, E49F24BA125D57FA0057C971 /* internal.h in Headers */, E49F24BC125D57FA0057C971 /* object_internal.h in Headers */, @@ -1582,6 +1753,7 @@ E49F24C6125D57FA0057C971 /* config.h in Headers */, E422A0D612A557B5005E5BDB /* trace.h in Headers */, 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */, + E4053A6926EAF2A800362F72 /* priority.h in Headers */, 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */, E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */, E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */, @@ -1590,6 +1762,8 @@ E4EB4A2814C35ECE00AA0FA9 /* object.h in Headers */, E4ECBAA615253D17002C313C /* mach_private.h in Headers */, E48AF55B16E72D44004105FF /* io_private.h in Headers */, + E4053A5B26EAF07700362F72 /* workgroup_object_private.h in Headers */, + E4053A6026EAF1A600362F72 /* workgroup_internal.h in Headers */, 2BBF5A61154B64D8002B20F9 /* allocator_internal.h in Headers */, E43A710615783F7E0012D38D /* data_private.h in Headers */, E44F9DAD1654400E001DCD38 /* introspection_internal.h in Headers */, @@ -1602,6 +1776,13 @@ files = ( E4B515D8164B2DFB00E003AF /* introspection_private.h in Headers */, E44F9DAF16544026001DCD38 /* internal.h in Headers */, + E4053A6B26EAF54F00362F72 /* workgroup_base.h in Headers */, + E4053A6C26EAF55000362F72 /* workgroup_parallel.h in Headers */, + E4053A6D26EAF55000362F72 /* workgroup.h in Headers */, + E4053A6E26EAF55000362F72 /* workgroup_interval.h in Headers */, + E4053A6F26EAF55000362F72 /* workgroup_object.h in Headers */, + E4053A7026EAF55000362F72 /* object.h in Headers */, + E4053A7126EAF55000362F72 /* clock.h in Headers */, E421E5F91716ADA10090DC9B /* introspection.h in Headers */, 6E5662E31F8C2E5100BC2474 /* workqueue_internal.h in Headers */, E44F9DB216544032001DCD38 /* object_internal.h in Headers */, @@ -1609,9 +1790,11 @@ 6ED64B531BBD8A2300C35F4D /* firehose_buffer_internal.h in Headers */, E44F9DB51654403F001DCD38 /* source_internal.h in Headers */, E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */, + E4053A6726EAF2A000362F72 /* time.h in Headers */, E44F9DB01654402B001DCD38 /* data_internal.h in Headers */, 6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */, 6E9956081C3B21B30071D40C /* venture_internal.h in Headers */, + E4053A6826EAF2A700362F72 /* priority.h in Headers */, E44F9DB11654402E001DCD38 /* io_internal.h in Headers */, E4630251176162D200E11F4C /* atomic_sfb.h in Headers */, E44F9DBE1654405B001DCD38 /* tsd.h in Headers */, @@ -1625,12 +1808,12 @@ E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */, E44F9DBF165440EF001DCD38 /* config.h in Headers */, E44A8E7718066276009FFDB6 /* voucher_internal.h in Headers */, + E4053A6426EAF25600362F72 /* mach_internal.h in Headers */, E44F9DB616544043001DCD38 /* trace.h in Headers */, E44F9DB916544056001DCD38 /* getprogname.h in Headers */, E48EC97E1835BADD00EAC4F1 /* yield.h in Headers */, E44F9DBA1654405B001DCD38 /* hw_config.h in Headers */, - E44F9DC116544115001DCD38 /* object_private.h in Headers */, - E44F9DC016544115001DCD38 /* object.h in Headers */, + E4053A6626EAF27B00362F72 /* target.h in Headers */, E44F9DAE16544022001DCD38 /* allocator_internal.h in Headers */, E44F9DAB16543F94001DCD38 /* introspection_internal.h in Headers */, ); @@ -1737,6 +1920,7 @@ E43B88312241F19000215272 /* Headers */, E43B88712241F19000215272 /* Sources */, E43B88922241F19000215272 /* Install Headers */, + 9BE52551238748C60041C2A0 /* ShellScript */, ); buildRules = ( ); @@ -1826,7 +2010,6 @@ 08FB7793FE84155DC02AAC07 /* Project object */ = { isa = PBXProject; attributes = { - BuildIndependentTargetsInParallel = YES; DefaultBuildSystemTypeForWorkspace = Latest; LastUpgradeCheck = 1100; TargetAttributes = { @@ -2009,6 +2192,27 @@ shellScript = ". \"${SRCROOT}/xcodescripts/check-order.sh\"\n"; showEnvVarsInLog = 0; }; + 9BE52551238748C60041C2A0 /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/postprocess-headers.sh", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_object.h", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_interval.h", + ); + outputFileListPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; + showEnvVarsInLog = 0; + }; E4128EB213B9612700ABB2CB /* Postprocess Headers */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 8; @@ -2016,13 +2220,16 @@ ); inputPaths = ( "$(SRCROOT)/xcodescripts/postprocess-headers.sh", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_object.h", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_interval.h", + "$(DSTROOT)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/eventlink_private.h", ); name = "Postprocess Headers"; outputPaths = ( ); runOnlyForDeploymentPostprocessing = 1; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E421E5FC1716B8E10090DC9B /* Install DTrace Header */ = { @@ -2051,18 +2258,23 @@ inputPaths = ( "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/workgroup.h", + "$(SRCROOT)/os/workgroup_base.h", + "$(SRCROOT)/os/workgroup_interval.h", + "$(SRCROOT)/os/workgroup_object.h", + "$(SRCROOT)/os/workgroup_parallel.h", + "$(SRCROOT)/os/clock.h", "$(SRCROOT)/os/object_private.h", "$(SRCROOT)/os/venture_private.h", "$(SRCROOT)/os/voucher_private.h", "$(SRCROOT)/os/voucher_activity_private.h", + "$(SRCROOT)/os/workgroup_private.h", + "$(SRCROOT)/os/workgroup_object_private.h", + "$(SRCROOT)/os/workgroup_interval_private.h", + "$(SRCROOT)/os/eventlink_private.h", ); name = "Install Headers"; outputPaths = ( - "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; @@ -2076,6 +2288,10 @@ ); inputPaths = ( "$(SRCROOT)/xcodescripts/postprocess-headers.sh", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_object.h", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_interval.h", + "$(DSTROOT)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/eventlink_private.h", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_parallel.h", ); name = "Postprocess Headers"; outputPaths = ( @@ -2123,7 +2339,7 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\""; + shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E49F24D7125D57FA0057C971 /* Install Manpages */ = { @@ -2150,22 +2366,27 @@ inputPaths = ( "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/workgroup.h", + "$(SRCROOT)/os/workgroup_base.h", + "$(SRCROOT)/os/workgroup_interval.h", + "$(SRCROOT)/os/workgroup_object.h", + "$(SRCROOT)/os/workgroup_parallel.h", + "$(SRCROOT)/os/clock.h", "$(SRCROOT)/os/object_private.h", "$(SRCROOT)/os/venture_private.h", "$(SRCROOT)/os/voucher_private.h", "$(SRCROOT)/os/voucher_activity_private.h", + "$(SRCROOT)/os/workgroup_private.h", + "$(SRCROOT)/os/workgroup_interval_private.h", + "$(SRCROOT)/os/workgroup_object_private.h", + "$(SRCROOT)/os/eventlink_private.h", ); name = "Install Headers"; outputPaths = ( - "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E4EB4A3014C3A14000AA0FA9 /* Install Headers */ = { @@ -2176,22 +2397,28 @@ inputPaths = ( "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/workgroup.h", + "$(SRCROOT)/os/workgroup_base.h", + "$(SRCROOT)/os/workgroup_interval.h", + "$(SRCROOT)/os/workgroup_object.h", + "$(SRCROOT)/os/workgroup_parallel.h", + "$(SRCROOT)/os/clock.h", "$(SRCROOT)/os/object_private.h", "$(SRCROOT)/os/venture_private.h", "$(SRCROOT)/os/voucher_private.h", "$(SRCROOT)/os/voucher_activity_private.h", + "$(SRCROOT)/os/workgroup_interval_private.h", + "$(SRCROOT)/os/workgroup_private.h", + "$(SRCROOT)/os/workgroup_interval_private.h", + "$(SRCROOT)/os/eventlink_private.h", + "$(SRCROOT)/os/workgroup_object_private.h", ); name = "Install Headers"; outputPaths = ( - "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E4EC121712514715000DDBD1 /* Mig Headers */ = { @@ -2216,7 +2443,7 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_3}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_3}\"\n"; showEnvVarsInLog = 0; }; E4EC122512514715000DDBD1 /* Symlink normal variant */ = { @@ -2232,7 +2459,7 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\""; + shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E4FB8E8F218CD68A004B7A25 /* Install Plists */ = { @@ -2282,6 +2509,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BFD343023C94F8C00B08420 /* eventlink.c in Sources */, + 9BE3E57623CE62D8006FE059 /* workgroup.c in Sources */, 6E9C6CAC20F9848E00EA81C0 /* yield.c in Sources */, C01866A61C5973210040FC07 /* protocol.defs in Sources */, C01866AB1C5973210040FC07 /* firehose.defs in Sources */, @@ -2328,9 +2557,11 @@ 6EF2CAAC1C8899D5001ABE83 /* lock.c in Sources */, 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */, 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */, + 9B2A588123A412B400A7BB27 /* eventlink.c in Sources */, FC7BED990E8361E600161930 /* queue.c in Sources */, 9676A0E10F3E755D00713ADB /* apply.c in Sources */, 96A8AA870F41E7A400CD570B /* source.c in Sources */, + 9B81557B234AFC9800DB5CA3 /* workgroup.c in Sources */, 6E9C6CA720F9848100EA81C0 /* yield.c in Sources */, 6E4BACBD1D48A41500B562AE /* mach.c in Sources */, 6EA962971D48622600759D53 /* event.c in Sources */, @@ -2353,6 +2584,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77523A8552600E4F685 /* eventlink.c in Sources */, + 9BE525502387480F0041C2A0 /* workgroup.c in Sources */, E43B88722241F19000215272 /* provider.d in Sources */, E43B88732241F19000215272 /* protocol.defs in Sources */, E43B88742241F19000215272 /* firehose.defs in Sources */, @@ -2390,6 +2623,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77323A8551300E4F685 /* eventlink.c in Sources */, + 9BE3E57523CE62C9006FE059 /* workgroup.c in Sources */, E49BB6D11E70748100868613 /* provider.d in Sources */, E49BB6D21E70748100868613 /* protocol.defs in Sources */, E49BB6D41E70748100868613 /* firehose.defs in Sources */, @@ -2427,6 +2662,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77223A8550B00E4F685 /* eventlink.c in Sources */, + 9BE3E56F23CE62BB006FE059 /* workgroup.c in Sources */, E43570BA126E93380097AB9F /* provider.d in Sources */, E49F24C8125D57FA0057C971 /* protocol.defs in Sources */, 6ED64B461BBD89AF00C35F4D /* firehose.defs in Sources */, @@ -2464,6 +2701,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77423A8551E00E4F685 /* eventlink.c in Sources */, + 9B8ED5A6235183D100507521 /* workgroup.c in Sources */, E4B515BD164B2DA300E003AF /* provider.d in Sources */, E4B515BE164B2DA300E003AF /* protocol.defs in Sources */, 6ED64B481BBD89B100C35F4D /* firehose.defs in Sources */, @@ -2502,6 +2741,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77123A8550100E4F685 /* eventlink.c in Sources */, + 9BE3E57423CE62C2006FE059 /* workgroup.c in Sources */, E417A38512A472C5004D659D /* provider.d in Sources */, E44EBE5612517EBE00645D88 /* protocol.defs in Sources */, 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */, diff --git a/man/dispatch.3 b/man/dispatch.3 index 6e5cfed48..b1c4309d4 100644 --- a/man/dispatch.3 +++ b/man/dispatch.3 @@ -13,14 +13,16 @@ concurrent execution via the core functions described in .Xr dispatch_async 3 and .Xr dispatch_apply 3 . .Pp -Dispatch queues are the basic units of organization of blocks. Several queues -are created by default, and applications may create additional queues for their -own use. See +Dispatch queues are the basic units of organization of blocks. +Several queues are created by default, and applications may create additional +queues for their own use. +See .Xr dispatch_queue_create 3 for more information. .Pp Dispatch groups allow applications to track the progress of blocks submitted to -queues and take action when the blocks complete. See +queues and take action when the blocks complete. +See .Xr dispatch_group_create 3 for more information. .Pp diff --git a/man/dispatch_after.3 b/man/dispatch_after.3 index db34af0e3..7463d1c5f 100644 --- a/man/dispatch_after.3 +++ b/man/dispatch_after.3 @@ -40,6 +40,15 @@ and the time at which the function is called, with the leeway capped to at least .Pp For a more detailed description about submitting blocks to queues, see .Xr dispatch_async 3 . +.Sh FUNDAMENTALS +The +.Fn dispatch_after +function is a wrapper around +.Fn dispatch_after_f . +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_async 3 , +.Xr dispatch_time 3 .Sh CAVEATS .Fn dispatch_after retains the passed queue. @@ -57,13 +66,3 @@ The result of passing as the .Fa when parameter is undefined. -.Pp -.Sh FUNDAMENTALS -The -.Fn dispatch_after -function is a wrapper around -.Fn dispatch_after_f . -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_async 3 , -.Xr dispatch_time 3 diff --git a/man/dispatch_api.3 b/man/dispatch_api.3 index 912338672..c82425051 100644 --- a/man/dispatch_api.3 +++ b/man/dispatch_api.3 @@ -11,15 +11,17 @@ consider when designing and implementing API in terms of dispatch queues and blocks. .Pp A general recommendation is to allow both a callback block and target dispatch -queue to be specified. This gives the application the greatest flexibility in -handling asynchronous events. +queue to be specified. +This gives the application the greatest flexibility in handling asynchronous +events. .Pp It's also recommended that interfaces take only a single block as the last -parameter. This is both for consistency across projects, as well as the visual -aesthetics of multiline blocks that are declared inline. The dispatch queue to -which the block will be submitted should immediately precede the block argument -(second-to-last argument). For example: -.Pp +parameter. +This is both for consistency across projects, as well as the visual aesthetics +of multiline blocks that are declared inline. +The dispatch queue to which the block will be submitted should immediately +precede the block argument (second-to-last argument). +For example: .Bd -literal -offset indent read_async(file, callback_queue, ^{ printf("received callback.\\n"); @@ -34,10 +36,8 @@ pointer, and a new last parameter is added, which is the function to call. The function based callback should pass the context pointer as the first argument, and the subsequent arguments should be identical to the block based variant (albeit offset by one in order). -.Pp -It is also important to use consistent naming. The dispatch API, for example, -uses the suffix "_f" for function based variants. -.Pp +It is also important to use consistent naming. +The dispatch API, for example, uses the suffix "_f" for function based variants. .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_async 3 , diff --git a/man/dispatch_apply.3 b/man/dispatch_apply.3 index 57c99a8a7..7f3651dfd 100644 --- a/man/dispatch_apply.3 +++ b/man/dispatch_apply.3 @@ -30,14 +30,14 @@ dispatch_apply(iterations, DISPATCH_APPLY_AUTO, ^(size_t idx) { }); .Ed .Pp -Although any queue can be used, it is strongly recommended to use +Although any queue can be used, it is strongly recommended to use .Vt DISPATCH_APPLY_AUTO -as the -.Vt queue +as the +.Vt queue argument to both .Fn dispatch_apply and -.Fn dispatch_apply_f , +.Fn dispatch_apply_f , as shown in the example above, since this allows the system to automatically use worker threads that match the configuration of the current thread as closely as possible. No assumptions should be made about which global concurrent queue will be used. @@ -75,7 +75,8 @@ for (i = count - (count % STRIDE); i < count; i++) { .Ed .Sh IMPLIED REFERENCES Synchronous functions within the dispatch framework hold an implied reference -on the target queue. In other words, the synchronous function borrows the +on the target queue. +In other words, the synchronous function borrows the reference of the calling function (this is valid because the calling function is blocked waiting for the result of the synchronous function, and therefore cannot modify the reference count of the target queue until after the @@ -95,7 +96,7 @@ or .Fn dispatch_async_f will incur more overhead and does not express the desired parallel execution semantics to the system, so may not create an optimal number of worker threads for a parallel workload. -For this reason, prefer to use +For this reason, prefer to use .Fn dispatch_apply or .Fn dispatch_apply_f @@ -105,6 +106,10 @@ The .Fn dispatch_apply function is a wrapper around .Fn dispatch_apply_f . +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_async 3 , +.Xr dispatch_queue_create 3 .Sh CAVEATS Unlike .Fn dispatch_async , @@ -112,11 +117,7 @@ a block submitted to .Fn dispatch_apply is expected to be either independent or dependent .Em only -on work already performed in lower-indexed invocations of the block. If -the block's index dependency is non-linear, it is recommended to -use a for-loop around invocations of +on work already performed in lower-indexed invocations of the block. +If the block's index dependency is non-linear, it is recommended to use a +for-loop around invocations of .Fn dispatch_async . -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_async 3 , -.Xr dispatch_queue_create 3 diff --git a/man/dispatch_async.3 b/man/dispatch_async.3 index 99c532d40..bac733139 100644 --- a/man/dispatch_async.3 +++ b/man/dispatch_async.3 @@ -31,20 +31,23 @@ and .Fn dispatch_sync functions schedule blocks for concurrent execution within the .Xr dispatch 3 -framework. Blocks are submitted to a queue which dictates the policy for their -execution. See +framework. +Blocks are submitted to a queue which dictates the policy for their execution. +See .Xr dispatch_queue_create 3 for more information about creating dispatch queues. .Pp These functions support efficient temporal synchronization, background -concurrency and data-level concurrency. These same functions can also be used -for efficient notification of the completion of asynchronous blocks (a.k.a. -callbacks). +concurrency and data-level concurrency. +These same functions can also be used for efficient notification of the +completion of asynchronous blocks (a.k.a. callbacks). .Sh TEMPORAL SYNCHRONIZATION Synchronization is often required when multiple threads of execution access -shared data concurrently. The simplest form of synchronization is -mutual-exclusion (a lock), whereby different subsystems execute concurrently -until a shared critical section is entered. In the +shared data concurrently. +The simplest form of synchronization is mutual-exclusion (a lock), whereby +different subsystems execute concurrently until a shared critical section is +entered. +In the .Xr pthread 3 family of procedures, temporal synchronization is accomplished like so: .Bd -literal -offset indent @@ -60,7 +63,8 @@ assert(r == 0); The .Fn dispatch_sync function may be used with a serial queue to accomplish the same style of -synchronization. For example: +synchronization. +For example: .Bd -literal -offset indent dispatch_sync(my_queue, ^{ // critical section @@ -74,19 +78,21 @@ left without restoring the queue to a reentrant state. The .Fn dispatch_async function may be used to implement deferred critical sections when the result -of the block is not needed locally. Deferred critical sections have the same -synchronization properties as the above code, but are non-blocking and -therefore more efficient to perform. For example: +of the block is not needed locally. +Deferred critical sections have the same synchronization properties as the above +code, but are non-blocking and therefore more efficient to perform. +For example: .Bd -literal dispatch_async(my_queue, ^{ // critical section }); .Ed .Sh BACKGROUND CONCURRENCY -.The +The .Fn dispatch_async function may be used to execute trivial background tasks on a global concurrent -queue. For example: +queue. +For example: .Bd -literal dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0), ^{ // background operation @@ -98,8 +104,9 @@ This approach is an efficient replacement for .Sh COMPLETION CALLBACKS Completion callbacks can be accomplished via nested calls to the .Fn dispatch_async -function. It is important to remember to retain the destination queue before the -first call to +function. +It is important to remember to retain the destination queue before the first +call to .Fn dispatch_async , and to release that queue at the end of the completion callback to ensure the destination queue is not deallocated while the completion callback is pending. @@ -130,21 +137,24 @@ async_read(object_t obj, .Sh RECURSIVE LOCKS While .Fn dispatch_sync -can replace a lock, it cannot replace a recursive lock. Unlike locks, queues -support both asynchronous and synchronous operations, and those operations are -ordered by definition. A recursive call to +can replace a lock, it cannot replace a recursive lock. +Unlike locks, queues support both asynchronous and synchronous operations, and +those operations are ordered by definition. +A recursive call to .Fn dispatch_sync causes a simple deadlock as the currently executing block waits for the next block to complete, but the next block will not start until the currently running block completes. .Pp -As the dispatch framework was designed, we studied recursive locks. We found -that the vast majority of recursive locks are deployed retroactively when -ill-defined lock hierarchies are discovered. As a consequence, the adoption of -recursive locks often mutates obvious bugs into obscure ones. This study also -revealed an insight: if reentrancy is unavoidable, then reader/writer locks are -preferable to recursive locks. Disciplined use of reader/writer locks enable -reentrancy only when reentrancy is safe (the "read" side of the lock). +As the dispatch framework was designed, we studied recursive locks. +We found that the vast majority of recursive locks are deployed retroactively +when ill-defined lock hierarchies are discovered. +As a consequence, the adoption of recursive locks often mutates obvious bugs +into obscure ones. +This study also revealed an insight: if reentrancy is unavoidable, then +reader/writer locks are preferable to recursive locks. +Disciplined use of reader/writer locks enable reentrancy only when reentrancy is +safe (the "read" side of the lock). .Pp Nevertheless, if it is absolutely necessary, what follows is an imperfect way of implementing recursive locks using the dispatch framework: @@ -168,17 +178,17 @@ calls .Fn dispatch_sync against queue B which runs on thread Y which recursively calls .Fn dispatch_sync -against queue A, which deadlocks both examples. This is bug-for-bug compatible -with nontrivial pthread usage. In fact, nontrivial reentrancy is impossible to -support in recursive locks once the ultimate level of reentrancy is deployed -(IPC or RPC). +against queue A, which deadlocks both examples. +This is bug-for-bug compatible with nontrivial pthread usage. +In fact, nontrivial reentrancy is impossible to support in recursive locks once +the ultimate level of reentrancy is deployed (IPC or RPC). .Sh IMPLIED REFERENCES Synchronous functions within the dispatch framework hold an implied reference -on the target queue. In other words, the synchronous function borrows the -reference of the calling function (this is valid because the calling function -is blocked waiting for the result of the synchronous function, and therefore -cannot modify the reference count of the target queue until after the -synchronous function has returned). +on the target queue. +In other words, the synchronous function borrows the reference of the calling +function (this is valid because the calling function is blocked waiting for the +result of the synchronous function, and therefore cannot modify the reference +count of the target queue until after the synchronous function has returned). For example: .Bd -literal queue = dispatch_queue_create("com.example.queue", NULL); @@ -199,9 +209,11 @@ Conceptually, is a convenient wrapper around .Fn dispatch_async with the addition of a semaphore to wait for completion of the block, and a -wrapper around the block to signal its completion. See +wrapper around the block to signal its completion. +See .Xr dispatch_semaphore_create 3 -for more information about dispatch semaphores. The actual implementation of the +for more information about dispatch semaphores. +The actual implementation of the .Fn dispatch_sync function may be optimized and differ from the above description. .Pp @@ -226,7 +238,6 @@ parameter is passed to the .Fa function when it is invoked on the target .Fa queue . -.Pp .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_apply 3 , diff --git a/man/dispatch_data_create.3 b/man/dispatch_data_create.3 index b3a216e4f..b1a396e47 100644 --- a/man/dispatch_data_create.3 +++ b/man/dispatch_data_create.3 @@ -55,10 +55,12 @@ .Vt dispatch_data_t dispatch_data_empty ; .Sh DESCRIPTION Dispatch data objects are opaque containers of bytes that represent one or more -regions of memory. They are created either from memory buffers managed by the -application or the system or from other dispatch data objects. Dispatch data -objects are immutable and the memory regions they represent are required to -remain unchanged for the lifetime of all data objects that reference them. +regions of memory. +They are created either from memory buffers managed by the application or the +system or from other dispatch data objects. +Dispatch data objects are immutable and the memory regions they represent are +required to remain unchanged for the lifetime of all data objects that reference +them. Dispatch data objects avoid copying the represented memory as much as possible. Multiple data objects can represent the same memory regions or subsections thereof. @@ -76,8 +78,8 @@ block will be submitted to the specified when the object reaches the end of its lifecycle, indicating that the system no longer references the .Fa buffer . -This allows the application to deallocate -the associated storage. The +This allows the application to deallocate the associated storage. +The .Fa queue argument is ignored if one of the following predefined destructors is passed: .Bl -tag -width DISPATCH_DATA_DESTRUCTOR_DEFAULT -compact -offset indent @@ -111,26 +113,29 @@ function creates a new data object by mapping the memory represented by the provided .Fa data object as a single contiguous memory region (moving or copying memory as -necessary). If the +necessary). +If the .Fa buffer_ptr and .Fa size_ptr references are not .Dv NULL , they are filled with the location and extent of the contiguous region, allowing -direct read access to the mapped memory. These values are valid only as long as -the newly created object has not been released. +direct read access to the mapped memory. +These values are valid only as long as the newly created object has not been +released. .Sh ACCESS The .Fn dispatch_data_apply function provides read access to represented memory without requiring it to be -mapped as a single contiguous region. It traverses the memory regions -represented by the +mapped as a single contiguous region. +It traverses the memory regions represented by the .Fa data argument in logical order, invokes the specified .Fa applier block for each region and returns a boolean indicating whether traversal -completed successfully. The +completed successfully. +The .Fa applier block is passed the following arguments for each memory region and returns a boolean indicating whether traversal should continue: @@ -170,7 +175,8 @@ specified by the argument among the regions represented by the provided .Fa data object and returns a newly created copy of the data object representing that -region. The variable specified by the +region. +The variable specified by the .Fa offset_ptr argument is filled with the logical position where the returned object starts in the @@ -198,17 +204,19 @@ Data objects passed as arguments to a dispatch data .Sy create or .Sy copy -function can be released when the function returns. The newly created object -holds implicit references to their constituent memory regions as necessary. +function can be released when the function returns. +The newly created object holds implicit references to their constituent memory +regions as necessary. .Pp The functions .Fn dispatch_data_create_map and .Fn dispatch_data_apply return an interior pointer to represented memory that is only valid as long as -an associated object has not been released. When Objective-C Automated -Reference Counting is enabled, care needs to be taken if that object is held in -a variable with automatic storage. It may need to be annotated with the +an associated object has not been released. +When Objective-C Automated Reference Counting is enabled, care needs to be taken +if that object is held in a variable with automatic storage. +It may need to be annotated with the .Li objc_precise_lifetime attribute, or stored in a .Li __strong @@ -216,5 +224,5 @@ instance variable instead, to ensure that the object is not released prematurely before memory accesses via the interor pointer have been completed. .Sh SEE ALSO .Xr dispatch 3 , -.Xr dispatch_object 3 , -.Xr dispatch_io_read 3 +.Xr dispatch_io_read 3 , +.Xr dispatch_object 3 diff --git a/man/dispatch_group_create.3 b/man/dispatch_group_create.3 index d82391e82..954df2117 100644 --- a/man/dispatch_group_create.3 +++ b/man/dispatch_group_create.3 @@ -68,7 +68,8 @@ has elapsed. If the .Fa group becomes empty within the specified amount of time, the function will return zero -indicating success. Otherwise, a non-zero return code will be returned. +indicating success. +Otherwise, a non-zero return code will be returned. When .Va DISPATCH_TIME_FOREVER is passed as the @@ -93,7 +94,8 @@ notification is pending, therefore it is valid to release the .Fa group after setting a notification block. The group will be empty at the time the notification block is submitted to the -target queue. The group may either be released with +target queue. +The group may either be released with .Fn dispatch_release or reused for additional operations. .Pp @@ -141,12 +143,19 @@ functions are wrappers around and .Fn dispatch_group_notify_f respectively. +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_async 3 , +.Xr dispatch_object 3 , +.Xr dispatch_queue_create 3 , +.Xr dispatch_semaphore_create 3 , +.Xr dispatch_time 3 .Sh CAVEATS In order to ensure deterministic behavior, it is recommended to call .Fn dispatch_group_wait -only once all blocks have been submitted to the group. If it is later -determined that new blocks should be run, it is recommended not to reuse an -already-running group, but to create a new group. +only once all blocks have been submitted to the group. +If it is later determined that new blocks should be run, it is recommended not +to reuse an already-running group, but to create a new group. .Pp .Fn dispatch_group_wait returns as soon as there are exactly zero @@ -155,26 +164,21 @@ blocks associated with a group (more precisely, as soon as every .Fn dispatch_group_enter call has been balanced by a .Fn dispatch_group_leave -call). If one thread waits for a group while another thread submits -new blocks to the group, then the count of associated blocks might -momentarily reach zero before all blocks have been submitted. If this happens, +call). +If one thread waits for a group while another thread submits new blocks to the +group, then the count of associated blocks might momentarily reach zero before +all blocks have been submitted. +If this happens, .Fn dispatch_group_wait will return too early: some blocks associated with the group have finished, but some have not yet been submitted or run. .Pp However, as a special case, a block associated with a group may submit new -blocks associated with its own group. In this case, the behavior is -deterministic: a waiting thread will +blocks associated with its own group. +In this case, the behavior is deterministic: a waiting thread will .Em not wake up until the newly submitted blocks have also finished. .Pp All of the foregoing also applies to -.Fn dispath_group_notify +.Fn dispatch_group_notify as well, with "block to be submitted" substituted for "waiting thread". -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_async 3 , -.Xr dispatch_object 3 , -.Xr dispatch_queue_create 3 , -.Xr dispatch_semaphore_create 3 , -.Xr dispatch_time 3 diff --git a/man/dispatch_io_create.3 b/man/dispatch_io_create.3 index 83e551401..7e2f99879 100644 --- a/man/dispatch_io_create.3 +++ b/man/dispatch_io_create.3 @@ -57,18 +57,22 @@ .Fc .Sh DESCRIPTION The dispatch I/O framework is an API for asynchronous read and write I/O -operations. It is an application of the ideas and idioms present in the +operations. +It is an application of the ideas and idioms present in the .Xr dispatch 3 -framework to device I/O. Dispatch I/O enables an application to more easily -avoid blocking I/O operations and allows it to more directly express its I/O -requirements than by using the raw POSIX file API. Dispatch I/O will make a -best effort to optimize how and when asynchronous I/O operations are performed -based on the capabilities of the targeted device. +framework to device I/O. +Dispatch I/O enables an application to more easily avoid blocking I/O operations +and allows it to more directly express its I/O requirements than by using the +raw POSIX file API. +Dispatch I/O will make a best effort to optimize how and when asynchronous I/O +operations are performed based on the capabilities of the targeted device. .Pp This page provides details on how to create and configure dispatch I/O -channels. Reading from and writing to these channels is covered in the +channels. +Reading from and writing to these channels is covered in the .Xr dispatch_io_read 3 -page. The dispatch I/O framework also provides the convenience functions +page. +The dispatch I/O framework also provides the convenience functions .Xr dispatch_read 3 and .Xr dispatch_write 3 @@ -82,16 +86,17 @@ Dispatch I/O channels can have one of the following types: .Bl -tag -width DISPATCH_IO_STREAM -compact -offset indent .It DISPATCH_IO_STREAM channels that represent a stream of bytes and do not support reads and writes -at arbitrary offsets, such as pipes or sockets. Channels of this type perform -read and write operations sequentially at the current file pointer position and -ignore any offset specified. Depending on the underlying file descriptor, read -operations may be performed simultaneously with write operations. +at arbitrary offsets, such as pipes or sockets. +Channels of this type perform read and write operations sequentially at the +current file pointer position and ignore any offset specified. +Depending on the underlying file descriptor, read operations may be performed +simultaneously with write operations. .It DISPATCH_IO_RANDOM -channels that represent random access files on disk. Only supported for -seekable file descriptors and paths. Channels of this type may perform -submitted read and write operations concurrently at the specified offset -(interpreted relative to the position of the file pointer when the channel was -created). +channels that represent random access files on disk. +Only supported for seekable file descriptors and paths. +Channels of this type may perform submitted read and write operations +concurrently at the specified offset (interpreted relative to the position of +the file pointer when the channel was created). .El .Sh CHANNEL OPENING AND CLOSING The @@ -102,13 +107,13 @@ functions create a dispatch I/O channel of provided .Fa type from a file descriptor .Fa fd -or an absolute pathname, respectively. They can be thought of as analogous to -the +or an absolute pathname, respectively. +They can be thought of as analogous to the .Xr fdopen 3 POSIX function and the .Xr fopen 3 -function in the standard C library. For a channel created from a pathname, the -provided +function in the standard C library. +For a channel created from a pathname, the provided .Fa path , .Fa oflag and @@ -122,20 +127,22 @@ The provided block will be submitted to the specified .Fa queue when all I/O operations on the channel have completed and it is closed or -reaches the end of its lifecycle. If an error occurs during channel creation, -the +reaches the end of its lifecycle. +If an error occurs during channel creation, the .Fa cleanup_handler block will be submitted immediately and passed an .Fa error -parameter with the POSIX error encountered. If an invalid +parameter with the POSIX error encountered. +If an invalid .Fa type or a non-absolute .Fa path argument is specified, these functions will return NULL and the .Fa cleanup_handler -will not be invoked. After successfully creating a dispatch I/O channel from a -file descriptor, the application must take care not to modify that file -descriptor until the associated +will not be invoked. +After successfully creating a dispatch I/O channel from a file descriptor, the +application must take care not to modify that file descriptor until the +associated .Fa cleanup_handler is invoked, see .Sx "FILEDESCRIPTOR OWNERSHIP" @@ -143,14 +150,15 @@ for details. .Pp The .Fn dispatch_io_close -function closes a dispatch I/O channel to new submissions of I/O operations. If +function closes a dispatch I/O channel to new submissions of I/O operations. +If .Dv DISPATCH_IO_STOP is passed in the .Fa flags parameter, the system will in addition not perform the I/O operations already submitted to the channel that are still pending and will make a best effort to -interrupt any ongoing operations. Handlers for operations so affected will be -passed the +interrupt any ongoing operations. +Handlers for operations so affected will be passed the .Er ECANCELED error code, along with any partial results. .Sh CHANNEL CONFIGURATION @@ -164,8 +172,7 @@ and .Fn dispatch_io_set_low_water functions configure the water mark settings of a .Fa channel . -The system will read -or write at least the number of bytes specified by +The system will read or write at least the number of bytes specified by .Fa low_water before submitting an I/O handler with partial results, and will make a best effort to submit an I/O handler as soon as the number of bytes read or written @@ -176,17 +183,18 @@ The .Fn dispatch_io_set_interval function configures the time .Fa interval -at which I/O handlers are submitted (measured in nanoseconds). If +at which I/O handlers are submitted (measured in nanoseconds). +If .Dv DISPATCH_IO_STRICT_INTERVAL is passed in the .Fa flags parameter, the interval will be strictly observed even if there is an insufficient amount of data to deliver; otherwise delivery will be skipped for intervals where the amount of available data is inferior to the channel's -low-water mark. Note that the system may defer enqueueing interval I/O handlers +low-water mark. +Note that the system may defer enqueueing interval I/O handlers by a small unspecified amount of leeway in order to align with other system activity for improved system performance or power consumption. -.Pp .Sh DATA DELIVERY The size of data objects passed to I/O handlers for a channel will never be larger than the high-water mark set on the channel; it will also never be @@ -202,53 +210,57 @@ the channel has an interval with the flag set .El Bear in mind that dispatch I/O channels will typically deliver amounts of data -significantly higher than the low-water mark. The default value for the -low-water mark is unspecified, but must be assumed to allow intermediate -handler invocations. The default value for the high-water mark is -unlimited (i.e.\& +significantly higher than the low-water mark. +The default value for the low-water mark is unspecified, but must be assumed to +allow intermediate handler invocations. +The default value for the high-water mark is unlimited (i.e.\& .Dv SIZE_MAX ) . Channels that require intermediate results of fixed size should have both the -low-water and the high-water mark set to that size. Channels that do not wish -to receive any intermediate results should have the low-water mark set to +low-water and the high-water mark set to that size. +Channels that do not wish to receive any intermediate results should have the +low-water mark set to .Dv SIZE_MAX . -.Pp .Sh FILEDESCRIPTOR OWNERSHIP When an application creates a dispatch I/O channel from a file descriptor with the .Fn dispatch_io_create function, the system takes control of that file descriptor until the channel is closed, an error occurs on the file descriptor or all references to the channel -are released. At that time the channel's cleanup handler will be enqueued and -control over the file descriptor relinquished, making it safe for the -application to +are released. +At that time the channel's cleanup handler will be enqueued and control over the +file descriptor relinquished, making it safe for the application to .Xr close 2 -the file descriptor. While a file descriptor is under the control of a dispatch -I/O channel, file descriptor flags such as +the file descriptor. +While a file descriptor is under the control of a dispatch I/O channel, file +descriptor flags such as .Dv O_NONBLOCK -will be modified by the system on behalf of the application. It is an error for -the application to modify a file descriptor directly while it is under the -control of a dispatch I/O channel, but it may create further I/O channels -from that file descriptor or use the +will be modified by the system on behalf of the application. +It is an error for the application to modify a file descriptor directly while it +is under the control of a dispatch I/O channel, but it may create further I/O +channels from that file descriptor or use the .Xr dispatch_read 3 and .Xr dispatch_write 3 -convenience functions with that file descriptor. If multiple I/O channels have +convenience functions with that file descriptor. +If multiple I/O channels have been created from the same file descriptor, all the associated cleanup handlers will be submitted together once the last channel has been closed resp.\& all -references to those channels have been released. If convenience functions have -also been used on that file descriptor, submission of their handlers will be -tied to the submission of the channel cleanup handlers as well. -.Pp +references to those channels have been released. +If convenience functions have also been used on that file descriptor, submission +of their handlers will be tied to the submission of the channel cleanup handlers +as well. .Sh BARRIER OPERATIONS The .Fn dispatch_io_barrier -function schedules a barrier operation on an I/O channel. The specified barrier -block will be run once, after all current I/O operations (such as -.Xr read 2 or +function schedules a barrier operation on an I/O channel. +The specified barrier block will be run once, after all current I/O operations +(such as +.Xr read 2 +or .Xr write 2 ) on the underlying -file descriptor have finished. No new I/O operations will start until the -barrier block finishes. +file descriptor have finished. +No new I/O operations will start until the barrier block finishes. .Pp The barrier block may operate on the underlying file descriptor with functions like @@ -266,17 +278,17 @@ There is no synchronization between a barrier block and any .Xr dispatch_io_read 3 or .Xr dispatch_io_write 3 -handler blocks; they may be running at the same time. The barrier block itself -is responsible for any required synchronization. +handler blocks; they may be running at the same time. +The barrier block itself is responsible for any required synchronization. .Sh MEMORY MODEL Dispatch I/O channel objects are retained and released via calls to .Fn dispatch_retain and .Fn dispatch_release . .Sh SEE ALSO +.Xr open 2 , .Xr dispatch 3 , .Xr dispatch_io_read 3 , .Xr dispatch_object 3 , .Xr dispatch_read 3 , -.Xr fopen 3 , -.Xr open 2 +.Xr fopen 3 diff --git a/man/dispatch_io_read.3 b/man/dispatch_io_read.3 index 26a11e894..3cff4faf8 100644 --- a/man/dispatch_io_read.3 +++ b/man/dispatch_io_read.3 @@ -26,30 +26,34 @@ .Fc .Sh DESCRIPTION The dispatch I/O framework is an API for asynchronous read and write I/O -operations. It is an application of the ideas and idioms present in the +operations. +It is an application of the ideas and idioms present in the .Xr dispatch 3 -framework to device I/O. Dispatch I/O enables an application to more easily -avoid blocking I/O operations and allows it to more directly express its I/O -requirements than by using the raw POSIX file API. Dispatch I/O will make a -best effort to optimize how and when asynchronous I/O operations are performed -based on the capabilities of the targeted device. +framework to device I/O. +Dispatch I/O enables an application to more easily avoid blocking I/O operations +and allows it to more directly express its I/O requirements than by using the +raw POSIX file API. +Dispatch I/O will make a best effort to optimize how and when asynchronous I/O +operations are performed based on the capabilities of the targeted device. .Pp This page provides details on how to read from and write to dispatch I/O -channels. Creation and configuration of these channels is covered in the +channels. +Creation and configuration of these channels is covered in the .Xr dispatch_io_create 3 -page. The dispatch I/O framework also provides the convenience functions +page. +The dispatch I/O framework also provides the convenience functions .Xr dispatch_read 3 and .Xr dispatch_write 3 for uses that do not require the full functionality provided by I/O channels. -.Pp .Sh FUNDAMENTALS The .Fn dispatch_io_read and .Fn dispatch_io_write functions are used to perform asynchronous read and write operations on -dispatch I/O channels. They can be thought of as asynchronous versions of the +dispatch I/O channels. +They can be thought of as asynchronous versions of the .Xr fread 3 and .Xr fwrite 3 @@ -68,7 +72,8 @@ been read since the handler's previous invocation. .Pp The .Va offset -parameter indicates where the read operation should begin. For a channel of +parameter indicates where the read operation should begin. +For a channel of .Dv DISPATCH_IO_RANDOM type it is interpreted relative to the position of the file pointer when the channel was created, for a channel of @@ -79,7 +84,8 @@ pointer position. The .Va length parameter indicates the number of bytes that should be read from the I/O -channel. Pass +channel. +Pass .Dv SIZE_MAX to keep reading until EOF is encountered (for a channel created from a disk-based file this happens when reading past the end of the physical file). @@ -97,14 +103,14 @@ remains to be written as part of this I/O operation. .Pp The .Va offset -parameter indicates where the write operation should begin. It is interpreted -as for read operations above. +parameter indicates where the write operation should begin. +It is interpreted as for read operations above. .Pp The .Va data parameter specifies the location and amount of data to be written, encapsulated -as a dispatch data object. The object is retained by the system until the write -operation is complete. +as a dispatch data object. +The object is retained by the system until the write operation is complete. .Sh I/O HANDLER BLOCKS Dispatch I/O handler blocks submitted to a channel via the .Fn dispatch_io_read @@ -113,9 +119,9 @@ or functions will be executed one or more times depending on system load and the channel's configuration settings (see .Xr dispatch_io_create 3 -for details). The handler block need not be reentrant safe, -no new I/O handler instance is submitted until the previously enqueued handler -block has returned. +for details). +The handler block need not be reentrant safe, no new I/O handler instance is +submitted until the previously enqueued handler block has returned. .Pp The dispatch .Va data @@ -129,12 +135,14 @@ for details). Once an I/O handler block is invoked with the .Va done flag set, the associated I/O operation is complete and that handler block will -not be run again. If an unrecoverable error occurs while performing the I/O -operation, the handler block will be submitted with the +not be run again. +If an unrecoverable error occurs while performing the I/O operation, the handler +block will be submitted with the .Va done flag set and the appropriate POSIX error code in the .Va error -parameter. An invocation of a handler block with the +parameter. +An invocation of a handler block with the .Va done flag set, zero .Va error diff --git a/man/dispatch_object.3 b/man/dispatch_object.3 index cddcf32aa..03c29b030 100644 --- a/man/dispatch_object.3 +++ b/man/dispatch_object.3 @@ -53,13 +53,13 @@ and respectively. .Pp The dispatch framework does not guarantee that any given client has the last or -only reference to a given object. Objects may be retained internally by the -system. +only reference to a given object. +Objects may be retained internally by the system. .Ss INTEGRATION WITH OBJECTIVE-C .Bd -filled -offset indent When building with an Objective-C or Objective-C++ compiler, dispatch objects -are declared as Objective-C types. This results in the following differences -compared to building as plain C/C++: +are declared as Objective-C types. +This results in the following differences compared to building as plain C/C++: .Bl -dash .It if Objective-C Automated Reference Counting is enabled, dispatch objects are @@ -72,13 +72,15 @@ functions will produce build errors. .Em Note : when ARC is enabled, care needs to be taken with dispatch API returning an interior pointer that is only valid as long as an associated object has not -been released. If that object is held in a variable with automatic storage, it -may need to be annotated with the +been released. +If that object is held in a variable with automatic storage, it may need to be +annotated with the .Li objc_precise_lifetime attribute, or stored in a .Li __strong instance variable instead, to ensure that the object is not prematurely -released. The functions returning interior pointers are +released. +The functions returning interior pointers are .Xr dispatch_data_create_map 3 and .Xr dispatch_data_apply 3 . @@ -116,10 +118,9 @@ preprocessor macro to When building with a plain C/C++ compiler or when integration with Objective-C is disabled, dispatch objects are .Em not -automatically retained and released when captured by a block. Therefore, when a -dispatch object is captured by a block that will be executed asynchronously, -the object must be manually retained and released: -.Pp +automatically retained and released when captured by a block. +Therefore, when a dispatch object is captured by a block that will be executed +asynchronously, the object must be manually retained and released: .Bd -literal -offset indent dispatch_retain(object); dispatch_async(queue, ^{ @@ -129,13 +130,15 @@ dispatch_async(queue, ^{ .Ed .Sh ACTIVATION Dispatch objects such as queues and sources may be created in an inactive -state. Objects in this state must be activated before any blocks -associated with them will be invoked. Calling +state. +Objects in this state must be activated before any blocks associated with them +will be invoked. +Calling .Fn dispatch_activate on an active object has no effect. .Pp -Changing attributes such as the target queue or a source handler is no longer permitted -once the object has been activated (see +Changing attributes such as the target queue or a source handler is no longer +permitted once the object has been activated (see .Xr dispatch_set_target_queue 3 , .Xr dispatch_source_set_event_handler 3 ). .Sh SUSPENSION @@ -144,7 +147,8 @@ or resumed with the functions .Fn dispatch_suspend and .Fn dispatch_resume -respectively. Other dispatch objects do not support suspension. +respectively. +Other dispatch objects do not support suspension. .Pp The dispatch framework always checks the suspension status before executing a block, but such changes never affect a block during execution (non-preemptive). @@ -155,18 +159,20 @@ a dispatch source is undefined. .Pp .Em Important : suspension applies to all aspects of the dispatch object life cycle, including -the finalizer function and cancellation handler. Suspending an object causes it -to be retained and resuming an object causes it to be released. Therefore it is -important to balance calls to +the finalizer function and cancellation handler. +Suspending an object causes it to be retained and resuming an object causes it +to be released. +Therefore it is important to balance calls to .Fn dispatch_suspend and .Fn dispatch_resume such that the dispatch object is fully resumed when the last reference is -released. The result of releasing all references to a dispatch object while in +released. +The result of releasing all references to a dispatch object while in an inactive or suspended state is undefined. .Sh CONTEXT POINTERS -Dispatch objects support supplemental context pointers. The value of the -context pointer may be retrieved and updated with +Dispatch objects support supplemental context pointers. +The value of the context pointer may be retrieved and updated with .Fn dispatch_get_context and .Fn dispatch_set_context @@ -176,8 +182,8 @@ The specifies an optional per-object finalizer function that is invoked asynchronously if the context pointer is not NULL when the last reference to the object is released. -This gives the -application an opportunity to free the context data associated with the object. +This gives the application an opportunity to free the context data associated +with the object. The finalizer will be run on the object's target queue. .Sh SEE ALSO .Xr dispatch 3 , diff --git a/man/dispatch_once.3 b/man/dispatch_once.3 index 2118a23bb..0875bc54c 100644 --- a/man/dispatch_once.3 +++ b/man/dispatch_once.3 @@ -36,7 +36,6 @@ FILE *getlogfile(void) return logfile; } .Ed -.Pp .Sh FUNDAMENTALS The .Fn dispatch_once diff --git a/man/dispatch_queue_create.3 b/man/dispatch_queue_create.3 index 833e564a0..3eeb4d366 100644 --- a/man/dispatch_queue_create.3 +++ b/man/dispatch_queue_create.3 @@ -49,11 +49,13 @@ All blocks submitted to dispatch queues are dequeued in FIFO order. Queues created with the .Dv DISPATCH_QUEUE_SERIAL attribute wait for the previously dequeued block to complete before dequeuing -the next block. A queue with this FIFO completion behavior is usually simply -described as a "serial queue." All memory writes performed by a block dispatched -to a serial queue are guaranteed to be visible to subsequent blocks dispatched -to the same queue. Queues are not bound to any specific thread of execution and -blocks submitted to independent queues may execute concurrently. +the next block. +A queue with this FIFO completion behavior is usually simply described as a +"serial queue". +All memory writes performed by a block dispatched to a serial queue are +guaranteed to be visible to subsequent blocks dispatched to the same queue. +Queues are not bound to any specific thread of execution and blocks submitted to +independent queues may execute concurrently. .Pp Queues created with the .Dv DISPATCH_QUEUE_CONCURRENT @@ -62,15 +64,17 @@ submitted with the dispatch barrier API. .Sh CREATION Queues are created with the .Fn dispatch_queue_create -function. Queues, like all dispatch objects, are reference counted and newly -created queues have a reference count of one. +function. +Queues, like all dispatch objects, are reference counted and newly created +queues have a reference count of one. .Pp The optional .Fa label argument is used to describe the purpose of the queue and is useful during -debugging and performance analysis. If a label is provided, it is copied. -By convention, clients should pass a reverse DNS style label. For example: -.Pp +debugging and performance analysis. +If a label is provided, it is copied. +By convention, clients should pass a reverse DNS style label. +For example: .Bd -literal -offset indent my_queue = dispatch_queue_create("com.example.subsystem.taskXYZ", DISPATCH_QUEUE_SERIAL); @@ -98,12 +102,14 @@ Queues may be temporarily suspended and resumed with the functions .Fn dispatch_suspend and .Fn dispatch_resume -respectively. Suspension is checked prior to block execution and is +respectively. +Suspension is checked prior to block execution and is .Em not preemptive. .Sh MAIN QUEUE The dispatch framework provides a default serial queue for the application to -use. This queue is accessed via the +use. +This queue is accessed via the .Fn dispatch_get_main_queue function. .Pp @@ -111,17 +117,20 @@ Programs must call .Fn dispatch_main at the end of .Fn main -in order to process blocks submitted to the main queue. (See the +in order to process blocks submitted to the main queue. +(See the .Sx COMPATIBILITY -section for exceptions.) The +section for exceptions.) +The .Fn dispatch_main function never returns. .Sh GLOBAL CONCURRENT QUEUES Unlike the main queue or queues allocated with .Fn dispatch_queue_create , the global concurrent queues schedule blocks as soon as threads become -available (non-FIFO completion order). Four global concurrent queues are -provided, representing the following priority bands: +available (non-FIFO completion order). +Four global concurrent queues are provided, representing the following priority +bands: .Bl -bullet -compact -offset indent .It DISPATCH_QUEUE_PRIORITY_HIGH @@ -136,33 +145,34 @@ DISPATCH_QUEUE_PRIORITY_BACKGROUND The priority of a global concurrent queue controls the scheduling priority of the threads created by the system to invoke the blocks submitted to that queue. Global queues with lower priority will be scheduled for execution after all -global queues with higher priority have been scheduled. Additionally, items on -the background priority global queue will execute on threads with background -state as described in +global queues with higher priority have been scheduled. +Additionally, items on the background priority global queue will execute on +threads with background state as described in .Xr setpriority 2 (i.e.\& disk I/O is throttled and the thread's scheduling priority is set to lowest value). .Pp Use the .Fn dispatch_get_global_queue -function to obtain the global queue of given priority. The +function to obtain the global queue of given priority. +The .Fa flags -argument is reserved for future use and must be zero. Passing any value other -than zero may result in a NULL return value. +argument is reserved for future use and must be zero. +Passing any value other than zero may result in a NULL return value. .Sh TARGET QUEUE The .Fn dispatch_set_target_queue -function updates the target queue of the given dispatch object. The target -queue of an object is responsible for processing the object. +function updates the target queue of the given dispatch object. +The target queue of an object is responsible for processing the object. .Pp The new target queue is retained by the given object before the previous target -queue is released. The new target queue setting will take effect between block -executions on the object, but not in the middle of any existing block executions -(non-preemptive). +queue is released. +The new target queue setting will take effect between block executions on the +object, but not in the middle of any existing block executions (non-preemptive). .Pp The default target queue of all dispatch objects created by the application is -the default priority global concurrent queue. To reset an object's target queue -to the default, pass the +the default priority global concurrent queue. +To reset an object's target queue to the default, pass the .Dv DISPATCH_TARGET_QUEUE_DEFAULT constant to .Fn dispatch_set_target_queue . @@ -179,12 +189,14 @@ will not be invoked concurrently with blocks submitted to the target queue or to any other queue with that same target queue. .Pp The target queue of a dispatch source specifies where its event handler and -cancellation handler blocks will be submitted. See +cancellation handler blocks will be submitted. +See .Xr dispatch_source_create 3 for more information about dispatch sources. .Pp The target queue of a dispatch I/O channel specifies the priority of the global -queue where its I/O operations are executed. See +queue where its I/O operations are executed. +See .Xr dispatch_io_create 3 for more information about dispatch I/O channels. .Pp @@ -207,24 +219,27 @@ The following functions are deprecated and will be removed in a future release: .El .Pp .Fn dispatch_get_current_queue -always returns a valid queue. When called from within a block -submitted to a dispatch queue, that queue will be returned. If this function is -called from the main thread before +always returns a valid queue. +When called from within a block submitted to a dispatch queue, that queue will +be returned. +If this function is called from the main thread before .Fn dispatch_main is called, then the result of .Fn dispatch_get_main_queue -is returned. In all other cases, the default target queue will be returned. +is returned. +In all other cases, the default target queue will be returned. .Pp The use of .Fn dispatch_get_current_queue -is strongly discouraged except for debugging and logging purposes. Code must not -make any assumptions about the queue returned, unless it is one of the global -queues or a queue the code has itself created. The returned queue may have -arbitrary policies that may surprise code that tries to schedule work with the -queue. The list of policies includes, but is not limited to, queue width (i.e. -serial vs. concurrent), scheduling priority, security credential or filesystem -configuration. This function is deprecated and will be removed in a future -release. +is strongly discouraged except for debugging and logging purposes. +Code must not make any assumptions about the queue returned, unless it is one of +the global queues or a queue the code has itself created. +The returned queue may have arbitrary policies that may surprise code that tries +to schedule work with the queue. +The list of policies includes, but is not limited to, queue width (i.e. serial +vs. concurrent), scheduling priority, security credential or filesystem +configuration. +This function is deprecated and will be removed in a future release. .Pp It is equally unsafe for code to assume that synchronous execution onto a queue is safe from deadlock if that queue is not the one returned by @@ -234,17 +249,21 @@ The result of .Fn dispatch_get_main_queue may or may not equal the result of .Fn dispatch_get_current_queue -when called on the main thread. Comparing the two is not a valid way to test -whether code is executing on the main thread. Foundation/AppKit programs should -use [NSThread isMainThread]. POSIX programs may use +when called on the main thread. +Comparing the two is not a valid way to test whether code is executing on the +main thread. +Foundation/AppKit programs should use [NSThread isMainThread]. +POSIX programs may use .Xr pthread_main_np 3 . .Pp .Fn dispatch_get_current_queue may return a queue owned by a different subsystem which has already had all -external references to it released. While such a queue will continue to exist +external references to it released. +While such a queue will continue to exist until all blocks submitted to it have completed, attempting to retain it is -forbidden and will trigger an assertion. If Objective-C Automatic Reference -Counting is enabled, any use of the object returned by +forbidden and will trigger an assertion. +If Objective-C Automatic Reference Counting is enabled, any use of the object +returned by .Fn dispatch_get_current_queue will cause retain calls to be automatically generated, so the use of .Fn dispatch_get_current_queue @@ -258,17 +277,20 @@ However, blocks submitted to the main queue in applications using .Fn dispatch_main are not guaranteed to execute on the main thread. .Pp -The dispatch framework is a pure C level API. As a result, it does not catch -exceptions generated by higher level languages such as Objective-C or C++. +The dispatch framework is a pure C level API. +As a result, it does not catch exceptions generated by higher level languages +such as Objective-C or C++. Applications .Em MUST catch all exceptions before returning from a block submitted to a dispatch queue; otherwise the process will be terminated with an uncaught exception. .Pp The dispatch framework manages the relationship between dispatch queues and -threads of execution. As a result, applications +threads of execution. +As a result, applications .Em MUST NOT -delete or mutate objects that they did not create. The following interfaces +delete or mutate objects that they did not create. +The following interfaces .Em MUST NOT be called by blocks submitted to a dispatch queue: .Bl -bullet -offset indent @@ -323,17 +345,19 @@ invocations of blocks submitted to a dispatch queue: While the result of .Fn pthread_self may change between invocations of blocks, the value will not change during the -execution of any single block. Because the underlying thread may change beteween -block invocations on a single queue, using per-thread data as an out-of-band -return value is error prone. In other words, the result of calling +execution of any single block. +Because the underlying thread may change beteween block invocations on a single +queue, using per-thread data as an out-of-band return value is error prone. +In other words, the result of calling .Fn pthread_setspecific and .Fn pthread_getspecific -is well defined within a signle block, but not across multiple blocks. Also, -one cannot make any assumptions about when the destructor passed to +is well defined within a signle block, but not across multiple blocks. +Also, one cannot make any assumptions about when the destructor passed to .Fn pthread_key_create -is called. The destructor may be called between the invocation of blocks on -the same queue, or during the idle state of a process. +is called. +The destructor may be called between the invocation of blocks on the same queue, +or during the idle state of a process. .Pp The following example code correctly handles per-thread return values: .Bd -literal -offset indent @@ -350,20 +374,19 @@ printf("kill(1,0) returned %d and errno %d\n", r, e); Note that in the above example .Va errno is a per-thread variable and must be copied out explicitly as the block may be -invoked on different thread of execution than the caller. Another example of -per-thread data that would need to be copied is the use of +invoked on different thread of execution than the caller. +Another example of per-thread data that would need to be copied is the use of .Fn getpwnam instead of .Fn getpwnam_r . .Pp As an optimization, .Fn dispatch_sync -invokes the block on the current thread when possible. In this case, the thread -specific data such as +invokes the block on the current thread when possible. +In this case, the thread specific data such as .Va errno -may persist from the block until back to the caller. Great care should be taken -not to accidentally rely on this side-effect. -.Pp +may persist from the block until back to the caller. +Great care should be taken not to accidentally rely on this side-effect. .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_async 3 , diff --git a/man/dispatch_read.3 b/man/dispatch_read.3 index 38e88dea8..42e915f54 100644 --- a/man/dispatch_read.3 +++ b/man/dispatch_read.3 @@ -27,19 +27,19 @@ The .Fn dispatch_read and .Fn dispatch_write -functions asynchronously read from and write to POSIX file descriptors. They -can be thought of as asynchronous, callback-based versions of the +functions asynchronously read from and write to POSIX file descriptors. +They can be thought of as asynchronous, callback-based versions of the .Fn fread and .Fn fwrite -functions provided by the standard C library. They are convenience functions -based on the +functions provided by the standard C library. +They are convenience functions based on the .Xr dispatch_io_read 3 and .Xr dispatch_io_write 3 -functions, intended for simple one-shot read or write requests. Multiple -request on the same file desciptor are better handled with the full underlying -dispatch I/O channel functions. +functions, intended for simple one-shot read or write requests. +Multiple request on the same file desciptor are better handled with the full +underlying dispatch I/O channel functions. .Sh BEHAVIOR The .Fn dispatch_read @@ -48,20 +48,21 @@ function schedules an asynchronous read operation on the file descriptor Once the file descriptor is readable, the system will read as much data as is currently available, up to the specified .Va length , -starting at the current file pointer position. The given +starting at the current file pointer position. +The given .Va handler block will be submitted to .Va queue -when the operation completes or an error occurs. The block will be passed a -dispatch +when the operation completes or an error occurs. +The block will be passed a dispatch .Va data -object with the result of the read operation. If an error occurred while -reading from the file descriptor, the +object with the result of the read operation. +If an error occurred while reading from the file descriptor, the .Va error parameter to the block will be set to the appropriate POSIX error code and .Va data -will contain any data that could be read successfully. If the file pointer -position is at end-of-file, emtpy +will contain any data that could be read successfully. +If the file pointer position is at end-of-file, emtpy .Va data and zero .Va error @@ -75,23 +76,31 @@ The system will attempt to write the entire contents of the provided .Va data object to .Va fd -at the current file pointer position. The given +at the current file pointer position. +The given .Va handler block will be submitted to .Va queue -when the operation completes or an error occurs. If the write operation -completed successfully, the +when the operation completes or an error occurs. +If the write operation completed successfully, the .Va error parameter to the block will be set to zero, otherwise it will be set to the appropriate POSIX error code and the .Va data parameter will contain any data that could not be written. +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_data_create 3 , +.Xr dispatch_io_create 3 , +.Xr dispatch_io_read 3 , +.Xr fread 3 .Sh CAVEATS The .Va data object passed to a .Va handler -block is released by the system when the block returns. If +block is released by the system when the block returns. +If .Va data is needed outside of the handler block, it must concatenate, copy, or retain it. @@ -101,7 +110,8 @@ descriptor .Va fd , the system takes control of that file descriptor until the .Va handler -block is executed. During this time the application must not manipulate +block is executed. +During this time the application must not manipulate .Va fd directly, in particular it is only safe to close .Va fd @@ -110,14 +120,9 @@ from the handler block (or after it has returned). If multiple asynchronous read or write operations are submitted to the same file descriptor, they will be performed in order, but their handlers will only be submitted once all operations have completed and control over the file -descriptor has been relinquished. For details on this and on the interaction -with dispatch I/O channels created from the same file descriptor, see +descriptor has been relinquished. +For details on this and on the interaction with dispatch I/O channels created +from the same file descriptor, see .Sx FILEDESCRIPTOR OWNERSHIP in .Xr dispatch_io_create 3 . -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_data_create 3 , -.Xr dispatch_io_create 3 , -.Xr dispatch_io_read 3 , -.Xr fread 3 diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 index c6023cdae..7f0a5430a 100644 --- a/man/dispatch_semaphore_create.3 +++ b/man/dispatch_semaphore_create.3 @@ -26,8 +26,9 @@ Dispatch semaphores are used to synchronize threads. .Pp The .Fn dispatch_semaphore_wait -function decrements the semaphore. If the resulting value is less than zero, -it waits for a signal from a thread that increments the semaphore by calling +function decrements the semaphore. +If the resulting value is less than zero, it waits for a signal from a thread +that increments the semaphore by calling .Fn dispatch_semaphore_signal before returning. The @@ -36,13 +37,15 @@ parameter is creatable with the .Xr dispatch_time 3 or .Xr dispatch_walltime 3 -functions. If the timeout is reached without a signal being received, the semaphore -is re-incremented before the function returns. +functions. +If the timeout is reached without a signal being received, the semaphore is +re-incremented before the function returns. .Pp The .Fn dispatch_semaphore_signal -function increments the counting semaphore. If the previous value was less than zero, -it wakes one of the threads that are waiting in +function increments the counting semaphore. +If the previous value was less than zero, it wakes one of the threads that are +waiting in .Fn dispatch_semaphore_wait before returning. .Sh COMPLETION SYNCHRONIZATION @@ -98,8 +101,8 @@ Otherwise, zero is returned. .Pp The .Fn dispatch_semaphore_wait -function returns zero upon success and non-zero after the timeout expires. If -the timeout is DISPATCH_TIME_FOREVER, then +function returns zero upon success and non-zero after the timeout expires. +If the timeout is DISPATCH_TIME_FOREVER, then .Fn dispatch_semaphore_wait waits forever and always returns zero. .Sh MEMORY MODEL @@ -107,17 +110,18 @@ Dispatch semaphores are retained and released via calls to .Fn dispatch_retain and .Fn dispatch_release . +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_object 3 .Sh CAVEATS Unbalanced dispatch semaphores cannot be released. For a given semaphore, the count at the time .Fn dispatch_release is called must be equal to or larger than the -count the semaphore was created with. In other words, at the time of releasing -the semaphore, there must have been at least as many +count the semaphore was created with. +In other words, at the time of releasing the semaphore, there must have been at +least as many .Fn dispatch_semaphore_signal calls as there were successful .Fn dispatch_semaphore_wait calls that did not timeout. -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_object 3 diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index 313b6e723..b54d3da8a 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -91,18 +91,20 @@ with calls to .Fn dispatch_retain and .Fn dispatch_release -respectively. The +respectively. +The .Fa queue parameter specifies the target queue of the new source object, it will -be retained by the source object. Pass the +be retained by the source object. +Pass the .Dv DISPATCH_TARGET_QUEUE_DEFAULT constant to use the default target queue (the default priority global concurrent queue). .Pp -Newly created sources are created in a suspended state. After the source has -been configured by setting an event handler, cancellation handler, registration -handler, context, -etc., the source must be activated by a call to +Newly created sources are created in a suspended state. +After the source has been configured by setting an event handler, cancellation +handler, registration handler, context, etc., the source must be activated by a +call to .Fn dispatch_resume before any events will be delivered. .Pp @@ -151,8 +153,8 @@ The .Fn dispatch_source_get_handle function returns the underlying handle to the dispatch source (i.e. file descriptor, -mach port, process identifer, etc.). The result of this function may be cast -directly to the underlying type. +mach port, process identifer, etc.). +The result of this function may be cast directly to the underlying type. .Pp The .Fn dispatch_source_get_mask @@ -174,8 +176,10 @@ function is intended for use with the .Vt DISPATCH_SOURCE_TYPE_DATA_OR and .Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE -source types. The result of using this function with any other source type is -undefined. Data merging is performed according to the source type: +source types. +The result of using this function with any other source type is +undefined. +Data merging is performed according to the source type: .Bl -tag -width "XXDISPATCH_SOURCE_TYPE_DATA_REPLACE" -compact -offset indent .It \(bu DISPATCH_SOURCE_TYPE_DATA_ADD .Vt data @@ -189,7 +193,8 @@ atomically replaces the source's data. .El .Pp If the source data value resulting from the merge operation is 0, the source -handler will not be invoked. This can happen if: +handler will not be invoked. +This can happen if: .Bl -bullet -compact -offset indent .It the atomic addition wraps for sources of type @@ -198,14 +203,14 @@ the atomic addition wraps for sources of type 0 is merged for sources of type .Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE . .El -.Pp .Sh SOURCE EVENT HANDLERS In order to receive events from the dispatch source, an event handler should be specified via .Fn dispatch_source_set_event_handler . The event handler block is submitted to the source's target queue when the state -of the underlying system handle changes, or when an event occurs. If a source -is resumed with no event handler block set, events will be quietly ignored. +of the underlying system handle changes, or when an event occurs. +If a source is resumed with no event handler block set, events will be quietly +ignored. If the event handler block is changed while the source is suspended, or from a block running on a serial queue that is the source's target queue, then the next event handler invocation will use the new block. @@ -215,8 +220,9 @@ queues using .Fn dispatch_suspend and .Fn dispatch_resume -on the dispatch source directly. The data describing events which occur while a -source is suspended are coalesced and delivered once the source is resumed. +on the dispatch source directly. +The data describing events which occur while a source is suspended are coalesced +and delivered once the source is resumed. .Pp The .Fa handler @@ -235,11 +241,11 @@ To unset the event handler, call and pass NULL as .Fa function . This unsets the event handler regardless of whether the handler -was a function pointer or a block. Registration and cancellation handlers -(see below) may be unset in the same way, but as noted below, a cancellation -handler may be required. +was a function pointer or a block. +Registration and cancellation handlers (see below) may be unset in the same way, +but as noted below, a cancellation handler may be required. .Sh REGISTRATION -When +When .Fn dispatch_resume is called on a suspended or newly created source, there may be a brief delay before the source is ready to receive events from the underlying system handle. @@ -248,29 +254,33 @@ missed. .Pp Once the dispatch source is registered with the underlying system and is ready to process all events its optional registration handler will be submitted to -its target queue. This registration handler may be specified via +its target queue. +This registration handler may be specified via .Fn dispatch_source_set_registration_handler . .Pp The event handler will not be called until the registration handler finishes. If the source is canceled (see below) before it is registered, its registration handler will not be called. -.Pp .Sh CANCELLATION The .Fn dispatch_source_cancel function asynchronously cancels the dispatch source, preventing any further -invocation of its event handler block. Cancellation does not interrupt a -currently executing handler block (non-preemptive). If a source is canceled -before the first time it is resumed, its event handler will never be called. +invocation of its event handler block. +Cancellation does not interrupt a currently executing handler block +(non-preemptive). +If a source is canceled before the first time it is resumed, its event handler +will never be called. (In this case, note that the source must be resumed before it can be released.) .Pp The .Fn dispatch_source_testcancel function may be used to determine whether the specified source has been -canceled. A non-zero value will be returned if the source is canceled. +canceled. +A non-zero value will be returned if the source is canceled. .Pp When a dispatch source is canceled its optional cancellation handler will be -submitted to its target queue. The cancellation handler may be specified via +submitted to its target queue. +The cancellation handler may be specified via .Fn dispatch_source_set_cancel_handler . This cancellation handler is invoked only once, and only as a direct consequence of calling @@ -278,12 +288,11 @@ of calling .Pp .Em Important: a cancellation handler is required for file descriptor and mach port based -sources in order to safely close the descriptor or destroy the port. Closing the -descriptor or port before the cancellation handler has run may result in a race -condition: if a new descriptor is allocated with the same value as the recently -closed descriptor while the source's event handler is still running, the event -handler may read/write data to the wrong descriptor. -.Pp +sources in order to safely close the descriptor or destroy the port. +Closing the descriptor or port before the cancellation handler has run may +result in a race condition: if a new descriptor is allocated with the same value +as the recently closed descriptor while the source's event handler is still +running, the event handler may read/write data to the wrong descriptor. .Sh DISPATCH SOURCE TYPES The following section contains a summary of supported dispatch event types and the interpretation of their parameters and returned data. @@ -297,9 +306,11 @@ handler via a call to .Fn dispatch_source_merge_data . The data will be merged with the source's pending data via an atomic add or atomic bitwise OR, or direct replacement (based on the source's type), and the -event handler block will be submitted to the source's target queue. The +event handler block will be submitted to the source's target queue. +The .Fa data -is application defined. These sources have no +is application defined. +These sources have no .Fa handle or .Fa mask @@ -322,7 +333,8 @@ The data returned by .Fn dispatch_source_get_data is a bitmask that indicates which of the events in the .Fa mask -were observed. Note that because this source type will request notifications on +were observed. +Note that because this source type will request notifications on the provided port, it should not be mixed with the use of .Fn mach_port_request_notification on the same port. @@ -341,9 +353,11 @@ on the mach port is waiting to be received. .Vt DISPATCH_SOURCE_TYPE_MEMORYPRESSURE .Pp Sources of this type monitor the system memory pressure condition for state -changes. The +changes. +The .Fa handle -is unused and should be zero. The +is unused and should be zero. +The .Fa mask may be one or more of the following: .Bl -tag -width "XXDISPATCH_MEMORYPRESSURE_CRITICAL" -compact -offset indent @@ -412,14 +426,15 @@ is unused and should be zero. .Pp The data returned by .Fn dispatch_source_get_data -is an estimated number of bytes available to be read from the descriptor. This -estimate should be treated as a suggested +is an estimated number of bytes available to be read from the descriptor. +This estimate should be treated as a suggested .Em minimum -read buffer size. There are no guarantees that a complete read of this size -will be performed. +read buffer size. +There are no guarantees that a complete read of this size will be performed. .Pp Users of this source type are strongly encouraged to perform non-blocking I/O -and handle any truncated reads or error conditions that may occur. See +and handle any truncated reads or error conditions that may occur. +See .Xr fcntl 2 for additional information about setting the .Vt O_NONBLOCK @@ -427,7 +442,8 @@ flag on a file descriptor. .Pp .Vt DISPATCH_SOURCE_TYPE_SIGNAL .Pp -Sources of this type monitor signals delivered to the current process. The +Sources of this type monitor signals delivered to the current process. +The .Fa handle is the signal number to monitor (int) and the .Fa mask @@ -445,11 +461,13 @@ of execution; therefore the handler block is not limited to the use of signal safe interfaces defined in .Xr sigaction 2 . Furthermore, multiple observers of a given signal are supported; thus allowing -applications and libraries to cooperate safely. However, a dispatch source +applications and libraries to cooperate safely. +However, a dispatch source .Em does not install a signal handler or otherwise alter the behavior of signal delivery. Therefore, applications must ignore or at least catch any signal that terminates -a process by default. For example, near the top of +a process by default. +For example, near the top of .Fn main : .Bd -literal -offset ident signal(SIGTERM, SIG_IGN); @@ -458,7 +476,8 @@ signal(SIGTERM, SIG_IGN); .Vt DISPATCH_SOURCE_TYPE_TIMER .Pp Sources of this type periodically submit the event handler block to the target -queue. The +queue. +The .Fa handle argument is unused and should be zero. .Pp @@ -469,7 +488,8 @@ event handler block. .Pp The timer parameters are configured with the .Fn dispatch_source_set_timer -function. Once this function returns, any pending source data accumulated for +function. +Once this function returns, any pending source data accumulated for the previous timer parameters has been cleared; the next fire of the timer will occur at .Fa start , @@ -478,8 +498,8 @@ and every nanoseconds thereafter until the timer source is canceled. .Pp Any fire of the timer may be delayed by the system in order to improve power -consumption and system performance. The upper limit to the allowable delay may -be configured with the +consumption and system performance. +The upper limit to the allowable delay may be configured with the .Fa leeway argument, the lower limit is under the control of the system. .Pp @@ -487,7 +507,8 @@ For the initial timer fire at .Fa start , the upper limit to the allowable delay is set to .Fa leeway -nanoseconds. For the subsequent timer fires at +nanoseconds. +For the subsequent timer fires at .Fa start .Li "+ N *" .Fa interval , @@ -498,14 +519,16 @@ the upper limit is .Li "/ 2 )" . .Pp The lower limit to the allowable delay may vary with process state such as -visibility of application UI. If the specified timer source was created with a +visibility of application UI. +If the specified timer source was created with a .Fa mask of .Vt DISPATCH_TIMER_STRICT , the system will make a best effort to strictly observe the provided .Fa leeway -value even if it is smaller than the current lower limit. Note that a minimal -amount of delay is to be expected even if this flag is specified. +value even if it is smaller than the current lower limit. +Note that a minimal amount of delay is to be expected even if this flag is +specified. .Pp The .Fa start @@ -575,12 +598,12 @@ is the file descriptor (int) to monitor and the is unused and should be zero. .Pp Users of this source type are strongly encouraged to perform non-blocking I/O -and handle any truncated reads or error conditions that may occur. See +and handle any truncated reads or error conditions that may occur. +See .Xr fcntl 2 for additional information about setting the .Vt O_NONBLOCK flag on a file descriptor. -.Pp .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_object 3 , diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 index 2536e0e9f..635f7d909 100644 --- a/man/dispatch_time.3 +++ b/man/dispatch_time.3 @@ -34,8 +34,9 @@ type is a semi-opaque integer, with only the special values .Vt DISPATCH_WALLTIME_NOW and .Vt DISPATCH_TIME_FOREVER -being externally defined. All other values are represented using an internal -format that is not safe for integer arithmetic or comparison. +being externally defined. +All other values are represented using an internal format that is not safe for +integer arithmetic or comparison. The internal format is subject to change. .Pp The @@ -52,8 +53,8 @@ Otherwise, if .Fa base is .Vt DISPATCH_TIME_NOW , -then the current time of the default host clock is used. On Apple platforms, -the value of the default host clock is obtained from +then the current time of the default host clock is used. +On Apple platforms, the value of the default host clock is obtained from .Vt mach_absolute_time() . .Pp The @@ -61,7 +62,8 @@ The function is useful for creating a milestone relative to a fixed point in time using the wall clock, as specified by the optional .Fa base -parameter. If +parameter. +If .Fa base is NULL, then the current time of the wall clock is used. .Vt dispatch_walltime(NULL, offset) @@ -78,7 +80,8 @@ parameter. .Pp Overflow causes .Vt DISPATCH_TIME_FOREVER -to be returned. When +to be returned. +When .Fa base is .Vt DISPATCH_TIME_FOREVER , diff --git a/os/clock.h b/os/clock.h new file mode 100644 index 000000000..665e1d871 --- /dev/null +++ b/os/clock.h @@ -0,0 +1,18 @@ +#ifndef __OS_CLOCK__ +#define __OS_CLOCK__ + +#include +#include + +/* + * @typedef os_clockid_t + * + * @abstract + * Describes the kind of clock that the workgroup timestamp parameters are + * specified in + */ +OS_ENUM(os_clockid, uint32_t, + OS_CLOCK_MACH_ABSOLUTE_TIME = 32, +); + +#endif /* __OS_CLOCK__ */ diff --git a/os/eventlink_private.h b/os/eventlink_private.h new file mode 100644 index 000000000..eb55a745b --- /dev/null +++ b/os/eventlink_private.h @@ -0,0 +1,296 @@ +#ifndef __OS_EVENTLINK__ +#define __OS_EVENTLINK__ + +#include +#include +#include + +__BEGIN_DECLS + +OS_OBJECT_ASSUME_NONNULL_BEGIN + +/*! + * @typedef os_eventlink_t + * + * @abstract + * A reference counted os_object representing a directed paired link of "wake" events + * between two designated threads, the link `source` and the link `target`. + * The target thread may optionally inherit properties of the source thread upon + * return from wait (such as membership in a workgroup). + * + * @discussion + * Threads explicitly associate themselves with an an eventlink, only one source + * and one target may exist per eventlink. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_eventlink_s *os_eventlink_t; +#else +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_OBJECT_DECL_CLASS(os_eventlink); +#endif + +/*! + * @function os_eventlink_create + * + * @abstract + * Creates an inactive refcounted os_object representing an os_eventlink_t. + * + * This function creates only 1 endpoint of an eventlink object. The other + * endpoint of the eventlink needs to be created from this eventlink object + * using one of the other creator functions - + * os_eventlink_create_remote_with_eventlink() or + * os_eventlink_create_with_port() + */ +OS_EXPORT OS_OBJECT_RETURNS_RETAINED +os_eventlink_t _Nullable +os_eventlink_create(const char *name); + +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) + +/* TODO: API for the future when we make a variant of eventlink that does + * copyin */ + +/*! + * @typedef os_eventlink_shared_data_t + * + * @abstract + * Pointer to an opaque structure identifying the data that is used to + * synchronize between the two endpoints of an eventlink. + * + * It is the client's responsibility to allocate this structure such that both + * threads on the two endpoints of the eventlink can synchronize with it ie. If + * the eventlink is between 2 threads in 2 processes, os_eventlink_shared_data_t + * should be allocated in shared memory between the two processes. + */ +typedef struct os_eventlink_shared_data_s { + uint64_t local_count; + uint64_t remote_count; +} os_eventlink_shared_data_s, *os_eventlink_shared_data_t; +#define OS_EVENTLINK_SHARED_DATA_INITIALIZER { 0 } + +/*! + * @function os_eventlink_set_shared_data + * + * @abstract + * Associates a shared data structure with the os_eventlink. + * + * As a performance enhancement, clients may choose to provide an opaque shared + * data structure in memory visible to both ends of the eventlink based on the + * usage pattern of the os eventlink. + * + * Passing in NULL for shared data is recommended if the eventlink is to be used + * for the typical RPC ping-pong case whereby one side of the eventlink is + * always blocked waiting on a signal from the other side. In this case, each + * signal causes a single wakeup. + * + * Passing in shared data is recommended when one side of the eventlink is not + * necessarily always waiting for the other's signal in order to work. Passing + * in the shared data allows for more efficient signalling - potentially without + * any system calls. + */ +int +os_eventlink_set_shared_data(os_eventlink_t eventlink, + os_eventlink_shared_data_t data); + +#endif + +/*! + * @function os_eventlink_activate + * + * @abstract + * Activates the os_eventlink object for use. No further configuration can be + * done on the eventlink object after it has been activated. This API is not + * real-time safe. + * + * If an error is encountered, errno is set and returned. + */ +OS_EXPORT OS_OBJECT_WARN_UNUSED_RESULT +int +os_eventlink_activate(os_eventlink_t eventlink); + +/*! + * @function os_eventlink_extract_remote_port + * + * @abstract + * Returns a reference to a send right representing the remote endpoint of the + * eventlink. This port is to be passed to os_eventlink_create_with_port() to + * create an eventlink object. + * + * Calling this function multiple times on an eventlink object will result in an + * error. + * + * @param eventlink + * An eventlink returns from a previous call to os_eventlink_create(). This + * evenlink must have been activated. + */ +OS_EXPORT OS_OBJECT_WARN_UNUSED_RESULT +int +os_eventlink_extract_remote_port(os_eventlink_t eventlink, mach_port_t *port_out); + +/*! + * @function os_eventlink_create_with_port + * + * @abstract + * Creates an inactive eventlink from a port returned from a previous call to + * os_eventlink_extract_remote_port. This function does not consume a reference + * on the specified send right. + */ +OS_EXPORT OS_OBJECT_RETURNS_RETAINED +os_eventlink_t _Nullable +os_eventlink_create_with_port(const char *name, mach_port_t mach_port); + +/*! + * @function os_eventlink_create_remote_with_eventlink + * + * @abstract + * Creates an inactive refcounted os_object representing an os_eventlink_t + * remote endpoint. Each eventlink has exactly one remote endpoint that can be + * created from it. Calling this function on an eventlink object returned from + * os_eventlink_create(), more than once will return in an error. + * + * @param eventlink + * An eventlink returned from a previous call to os_eventlink_create(). This + * eventlink must have been activated. + */ +OS_EXPORT OS_OBJECT_RETURNS_RETAINED +os_eventlink_t _Nullable +os_eventlink_create_remote_with_eventlink(const char *name, os_eventlink_t eventlink); + +/*! + * @function os_eventlink_associate + * + * @abstract + * Associate a thread with the eventlink endpoint provided. The eventlink + * provided should be activated before this call. This API is not real + * time safe. + * + * If a thread is already associated with the eventlink, errno is set and + * returned. + */ + +OS_ENUM(os_eventlink_associate_options, uint64_t, + OE_ASSOCIATE_CURRENT_THREAD = 0, + OE_ASSOCIATE_ON_WAIT = 0x1, +); + +OS_EXPORT OS_OBJECT_WARN_UNUSED_RESULT +int +os_eventlink_associate(os_eventlink_t eventlink, + os_eventlink_associate_options_t options); + +/*! + * @function os_eventlink_disassociate + * + * @abstract + * Disassociate the current thread with the eventlink endpoint provided. This + * API is not real time safe. + * + * If the current thread is not associated with the eventlink via a previous + * call to os_eventlink_associate, errno is set and returned. + */ +OS_EXPORT +int +os_eventlink_disassociate(os_eventlink_t eventlink); + +/*! + * @function os_eventlink_wait + * + * @abstract + * Wait on the eventlink endpoint for a signal from the other endpoint. If there + * are outstanding signals, this function will consume them and return + * immediately. + * + * Upon receiving a signal, the function returns the number of signals that have + * been consumed by the waiter in the out parameter if specified. + * + * If the eventlink has not been previously associated via a call to + * os_eventlink_associate or if there is a mismatch between the associated + * thread and the current thread, the process will abort. This API call is + * real-time safe. + */ +OS_EXPORT +int +os_eventlink_wait(os_eventlink_t eventlink, uint64_t * _Nullable signals_consumed_out); + +/*! + * @function os_eventlink_wait_until + * + * @abstract + * Wait on the eventlink endpoint for a signal or until the timeout specified is + * hit. If there are outstanding signals, this function will consume them and + * return immediately. + * + * Upon success, the function returns the number of signals that have been + * consumed by the waiter in the out parameter, if provided. If the timeout is + * hit, then 0 signals are said to have been consumed by the waiter. This API + * call is real time safe. + */ +OS_EXPORT +int +os_eventlink_wait_until(os_eventlink_t eventlink, os_clockid_t clock, + uint64_t timeout, uint64_t * _Nullable signals_consumed_out); + +/*! + * @function os_eventlink_signal + * + * @abstract + * Signal the other endpoint of an eventlink. This API call is real time safe. + * + * If an error is encountered, errno will be set and returned. + */ +OS_EXPORT +int +os_eventlink_signal(os_eventlink_t eventlink); + +/*! + * @function os_eventlink_signal_and_wait + * + * @abstract + * Signals on an eventlink endpoint and then proceeds to wait on it until the + * eventlink is signalled. Returns the number of signals consumed by the waiter + * through the out parameter if provided. This API call is real time safe. + */ +OS_EXPORT +int +os_eventlink_signal_and_wait(os_eventlink_t eventlink, uint64_t * _Nullable signals_consumed_out); + +/*! + * @function os_eventlink_signal_and_wait_until + * + * @abstract + * Signals on an eventlink endpoint and then proceeds to wait on it until the + * evenlink is signalled or the timeout is hit. Returns the number of signals + * consumed by the waiter through the out parameter if provided, with 0 + * indicating that a timeout has been hit. This API call is real time safe. + */ +OS_EXPORT +int +os_eventlink_signal_and_wait_until(os_eventlink_t eventlink, os_clockid_t clock, + uint64_t timeout, uint64_t * _Nullable signals_consumed_out); + +/* + * @function os_eventlink_cancel + * + * @abstract + * Invalidates an eventlink. The only follow up actions possible on the eventlink + * after it has been invalidated, are to disassociate from the eventlink and + * dispose of it. + * + * If the eventlink had a remote endpoint created, the remote side will get an + * ECANCELED when it tries to wait or signal on it. Existing waiters on the + * eventlink will get the same result as well. The only valid follow up + * actions possible on a remote endpoint are to disassociate from the eventlink + * and dispose of it. + * + * This API is idempotent. It is not required to call this API before dropping + * the last reference count of an eventlink. + */ +OS_EXPORT +void +os_eventlink_cancel(os_eventlink_t eventlink); + +OS_OBJECT_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_EVENTLINK__ */ diff --git a/os/firehose_buffer_private.h b/os/firehose_buffer_private.h index a633bf408..7ee0541ba 100644 --- a/os/firehose_buffer_private.h +++ b/os/firehose_buffer_private.h @@ -78,7 +78,7 @@ __firehose_buffer_tracepoint_flush(firehose_tracepoint_t vat, firehose_buffer_t __firehose_buffer_create(size_t *size); -void +bool __firehose_merge_updates(firehose_push_reply_t update); int @@ -97,9 +97,11 @@ static inline const uint8_t * _firehose_tracepoint_reader_init(firehose_chunk_t fc, const uint8_t **endptr) { const uint8_t *start = fc->fc_data; - const uint8_t *end = fc->fc_start + fc->fc_pos.fcp_next_entry_offs; + const uint8_t *end; - if (end > fc->fc_start + FIREHOSE_CHUNK_SIZE) { + if (fc->fc_pos.fcp_next_entry_offs <= FIREHOSE_CHUNK_SIZE) { + end = fc->fc_start + fc->fc_pos.fcp_next_entry_offs; + } else { end = start; } *endptr = end; diff --git a/os/object.h b/os/object.h index 2979de891..e2ce3f467 100644 --- a/os/object.h +++ b/os/object.h @@ -98,6 +98,15 @@ #endif // __swift__ #endif // OS_OBJECT_SWIFT3 +#if __has_feature(assume_nonnull) +#define OS_OBJECT_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") +#define OS_OBJECT_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") +#else +#define OS_OBJECT_ASSUME_NONNULL_BEGIN +#define OS_OBJECT_ASSUME_NONNULL_END +#endif +#define OS_OBJECT_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__)) + #if OS_OBJECT_USE_OBJC #import #if __has_attribute(objc_independent_class) @@ -116,9 +125,9 @@ #define OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, proto) \ OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL_IMPL( \ OS_OBJECT_CLASS(name), OS_OBJECT_CLASS(proto)) -#define OS_OBJECT_DECL_IMPL(name, ...) \ +#define OS_OBJECT_DECL_IMPL(name, adhere, ...) \ OS_OBJECT_DECL_PROTOCOL(name, __VA_ARGS__) \ - typedef NSObject \ + typedef adhere \ * OS_OBJC_INDEPENDENT_CLASS name##_t #define OS_OBJECT_DECL_BASE(name, ...) \ @interface OS_OBJECT_CLASS(name) : __VA_ARGS__ \ @@ -129,9 +138,9 @@ typedef OS_OBJECT_CLASS(name) \ * OS_OBJC_INDEPENDENT_CLASS name##_t #define OS_OBJECT_DECL(name, ...) \ - OS_OBJECT_DECL_IMPL(name, ) + OS_OBJECT_DECL_IMPL(name, NSObject, ) #define OS_OBJECT_DECL_SUBCLASS(name, super) \ - OS_OBJECT_DECL_IMPL(name, ) + OS_OBJECT_DECL_IMPL(name, NSObject, ) #if __has_attribute(ns_returns_retained) #define OS_OBJECT_RETURNS_RETAINED __attribute__((__ns_returns_retained__)) #else @@ -149,6 +158,8 @@ #define OS_OBJECT_BRIDGE #define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT #endif + + #if __has_attribute(objc_runtime_visible) && \ ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \ __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_12) || \ @@ -163,7 +174,7 @@ /* * To provide backward deployment of ObjC objects in Swift on pre-10.12 * SDKs, OS_object classes can be marked as OS_OBJECT_OBJC_RUNTIME_VISIBLE. - * When compiling with a deployment target earlier than OS X 10.12 (iOS 10.0, + * When compiling with a deployment target earlier than OS X 10.12 (iOS 10.0, * tvOS 10.0, watchOS 3.0) the Swift compiler will only refer to this type at * runtime (using the ObjC runtime). */ @@ -187,9 +198,9 @@ #define OS_OBJECT_DECL_SUBCLASS_SWIFT(name, super) \ OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ OS_OBJECT_DECL_IMPL_CLASS(name, OS_OBJECT_CLASS(super)) +#endif // OS_OBJECT_SWIFT3 OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE OS_OBJECT_DECL_BASE(object, NSObject); -#endif // OS_OBJECT_SWIFT3 #else /*! @parseOnly */ #define OS_OBJECT_RETURNS_RETAINED @@ -215,6 +226,27 @@ OS_OBJECT_DECL_BASE(object, NSObject); typedef struct name##_s *name##_t #endif +#if OS_OBJECT_USE_OBJC +/* Declares a class of the specific name and exposes the interface and typedefs + * name##_t to the pointer to the class */ +#define OS_OBJECT_SHOW_CLASS(name, ...) \ + OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ + OS_OBJECT_DECL_IMPL_CLASS(name, ## __VA_ARGS__ ) +/* Declares a subclass of the same name, and + * subclass adheres to protocol specified. Typedefs baseclass * to subclass##_t */ +#define OS_OBJECT_SHOW_SUBCLASS(subclass_name, super, proto_name) \ + OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ + OS_OBJECT_DECL_BASE(subclass_name, OS_OBJECT_CLASS(super)); \ + typedef OS_OBJECT_CLASS(super) \ + * OS_OBJC_INDEPENDENT_CLASS subclass_name##_t +#else /* Plain C */ +#define OS_OBJECT_DECL_PROTOCOL(name, ...) +#define OS_OBJECT_SHOW_CLASS(name, ...) \ + typedef struct name##_s *name##_t +#define OS_OBJECT_SHOW_SUBCLASS(name, super, ...) \ + typedef super##_t name##_t +#endif + #define OS_OBJECT_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object)) __BEGIN_DECLS diff --git a/os/object_private.h b/os/object_private.h index 003369ecc..0d58e8650 100644 --- a/os/object_private.h +++ b/os/object_private.h @@ -30,6 +30,12 @@ #include #include #include +#if __has_include() +#include +#endif +#ifndef __ptrauth_objc_isa_pointer +#define __ptrauth_objc_isa_pointer +#endif #if __GNUC__ #define OS_OBJECT_NOTHROW __attribute__((__nothrow__)) @@ -63,7 +69,7 @@ #define _OS_OBJECT_GLOBAL_REFCNT INT_MAX #define _OS_OBJECT_HEADER(isa, ref_cnt, xref_cnt) \ - isa; /* must be pointer-sized */ \ + isa; /* must be pointer-sized and use __ptrauth_objc_isa_pointer */ \ int volatile ref_cnt; \ int volatile xref_cnt @@ -100,7 +106,7 @@ #if OS_OBJECT_USE_OBJC #define OS_OBJECT_USES_XREF_DISPOSE() \ - (oneway void)release { \ - _os_object_release(self); \ + _os_object_release((OS_object *) self); \ } #endif @@ -129,9 +135,7 @@ typedef OS_OBJECT_CLASS(object) *_os_object_t; #define _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) \ OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) #elif OS_OBJECT_USE_OBJC -API_AVAILABLE(macos(10.8), ios(6.0)) -OS_OBJECT_EXPORT -@interface OS_OBJECT_CLASS(object) : NSObject +@interface OS_OBJECT_CLASS(object) (OSObjectPrivate) // Note: objects who want _xref_dispose to be called need // to use OS_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose; @@ -158,7 +162,7 @@ API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t -_os_object_alloc(const void *cls, size_t size); +_os_object_alloc(const void * _Nullable cls, size_t size); API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW diff --git a/os/voucher_private.h b/os/voucher_private.h index ad4e31274..3e72c919a 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -24,6 +24,9 @@ #if __APPLE__ #include #include + +#include +#define OS_VOUCHER_TSD_KEY __PTK_LIBDISPATCH_KEY8 #endif #if __has_include() #include @@ -101,12 +104,41 @@ OS_OBJECT_DECL_CLASS(voucher); * @result * The previously adopted voucher object. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT_NEEDS_RELEASE OS_NOTHROW voucher_t _Nullable voucher_adopt(voucher_t _Nullable voucher OS_OBJECT_CONSUMED); +/*! + * @function voucher_needs_adopt + * + * @abstract + * An inline check to determine if the input voucher matches the one + * on the current thread. This can be used to shortcircuit calls to + * voucher_adopt() and avoid a cross library jump. If this function returns + * true, then the client should make sure to follow up with a voucher_adopt() + * call. + * + * This check must only be in code that ships with the operating system since + * the TSD key assignment is not ABI. + * + * @param voucher + * The input voucher being tested + */ + +SPI_AVAILABLE(macos(12.0), ios(15.0)) +__header_always_inline bool +voucher_needs_adopt(voucher_t _Nullable voucher) +{ +#if __APPLE__ + if (_pthread_has_direct_tsd()) { + return (((void *) voucher) != _pthread_getspecific_direct(OS_VOUCHER_TSD_KEY)); + } +#endif + return true; +} + /*! * @function voucher_copy * @@ -117,7 +149,7 @@ voucher_adopt(voucher_t _Nullable voucher OS_OBJECT_CONSUMED); * @result * The currently adopted voucher object. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_copy(void); @@ -136,7 +168,7 @@ voucher_copy(void); * @result * A copy of the currently adopted voucher object, with importance removed. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_copy_without_importance(void); @@ -162,7 +194,7 @@ voucher_copy_without_importance(void); * * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_NOTHROW void voucher_replace_default_voucher(void); @@ -180,7 +212,7 @@ voucher_replace_default_voucher(void); * * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_NOTHROW void voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); @@ -264,7 +296,7 @@ voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -345,7 +377,7 @@ dispatch_block_create_with_voucher(dispatch_block_flags_t flags, * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL5 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -363,7 +395,7 @@ dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, * @abstract * Deprecated, do not use, will abort process if called. */ -API_DEPRECATED("removed SPI", \ +SPI_DEPRECATED("removed SPI", \ macos(10.11,10.13), ios(9.0,11.0), watchos(2.0,4.0), tvos(9.0,11.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW @@ -397,7 +429,7 @@ dispatch_queue_create_with_accounting_override_voucher( * The newly created voucher object or NULL if the message was not carrying a * mach voucher. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_create_with_mach_msg(mach_msg_header_t *msg); @@ -437,14 +469,14 @@ voucher_create_with_mach_msg(mach_msg_header_t *msg); * * @param max_hex_data * The maximum number of bytes of hex data to be formatted for voucher content - * that is not of type MACH_VOUCHER_ATTR_KEY_ATM, MACH_VOUCHER_ATTR_KEY_BANK - * or MACH_VOUCHER_ATTR_KEY_IMPORTANCE. + * that is not of type MACH_VOUCHER_ATTR_KEY_BANK or + * MACH_VOUCHER_ATTR_KEY_IMPORTANCE. * * @result * The offset of the first byte in the buffer following the formatted voucher * representation. */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +SPI_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW DISPATCH_COLD size_t voucher_kvoucher_debug(mach_port_t task, mach_port_name_t voucher, char *buf, @@ -479,7 +511,7 @@ struct proc_persona_info; * or the persona identifier of the current process * or PERSONA_ID_NONE */ -API_AVAILABLE(macos(10.14), ios(9.2)) +SPI_AVAILABLE(macos(10.15), ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW uid_t voucher_get_current_persona(void); @@ -502,7 +534,7 @@ voucher_get_current_persona(void); * 0 on success: currently adopted voucher has a PERSONA_TOKEN * -1 on failure: persona_info is untouched/uninitialized */ -API_AVAILABLE(macos(10.14), ios(9.2)) +SPI_AVAILABLE(macos(10.15), ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 int voucher_get_current_persona_originator_info( @@ -526,12 +558,23 @@ voucher_get_current_persona_originator_info( * 0 on success: currently adopted voucher has a PERSONA_TOKEN * -1 on failure: persona_info is untouched/uninitialized */ -API_AVAILABLE(macos(10.14), ios(9.2)) +SPI_AVAILABLE(macos(10.15), ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 int voucher_get_current_persona_proximate_info( struct proc_persona_info *persona_info); +/*! + * @function voucher_process_can_use_arbitrary_personas + * + * @abstract + * Returns true if the current process is able to use arbitrary personas + */ +SPI_AVAILABLE(macos(12.0), ios(15.0)) +OS_VOUCHER_EXPORT OS_WARN_RESULT +bool +voucher_process_can_use_arbitrary_personas(void); + /*! * @function voucher_copy_with_persona_mach_voucher * @@ -578,7 +621,7 @@ voucher_copy_with_persona_mach_voucher( * KERN_RESOURCE_SHORTAGE: mach voucher creation failed due to * lack of free space */ -API_AVAILABLE(macos(10.14), ios(12)) +SPI_AVAILABLE(macos(10.15), ios(12)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 kern_return_t mach_voucher_persona_self(mach_voucher_t *persona_mach_voucher); diff --git a/os/workgroup.h b/os/workgroup.h new file mode 100644 index 000000000..96b870c10 --- /dev/null +++ b/os/workgroup.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_WORKGROUP__ +#define __OS_WORKGROUP__ + +#ifndef __DISPATCH_BUILDING_DISPATCH__ +#ifndef __OS_WORKGROUP_INDIRECT__ +#define __OS_WORKGROUP_INDIRECT__ +#endif /* __OS_WORKGROUP_INDIRECT__ */ + +#include +#include +#include +#include + +#undef __OS_WORKGROUP_INDIRECT__ +#endif /* __DISPATCH_BUILDING_DISPATCH__ */ + +#endif /* __OS_WORKGROUP__ */ diff --git a/os/workgroup_base.h b/os/workgroup_base.h new file mode 100644 index 000000000..3983f002a --- /dev/null +++ b/os/workgroup_base.h @@ -0,0 +1,78 @@ +#ifndef __OS_WORKGROUP_BASE__ +#define __OS_WORKGROUP_BASE__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#endif + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#if __has_feature(assume_nonnull) +#define OS_WORKGROUP_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") +#define OS_WORKGROUP_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") +#else +#define OS_WORKGROUP_ASSUME_NONNULL_BEGIN +#define OS_WORKGROUP_ASSUME_NONNULL_END +#endif +#define OS_WORKGROUP_WARN_RESULT __attribute__((__warn_unused_result__)) +#define OS_WORKGROUP_EXPORT OS_EXPORT +#define OS_WORKGROUP_RETURNS_RETAINED OS_OBJECT_RETURNS_RETAINED + +#define OS_WORKGROUP_DECL(name, swift_name) \ + OS_SWIFT_NAME(swift_name) \ + OS_OBJECT_SHOW_CLASS(name, OS_OBJECT_CLASS(object)) + +#if OS_OBJECT_USE_OBJC +#define OS_WORKGROUP_SUBCLASS_DECL_PROTO(name, swift_name, ...) \ + OS_SWIFT_NAME(swift_name) \ + OS_OBJECT_DECL_PROTOCOL(name ## __VA_ARGS__ ) +#else +#define OS_WORKGROUP_SUBCLASS_DECL_PROTO(name, swift_name, ...) +#endif + +#define OS_WORKGROUP_SUBCLASS_DECL(name, super, swift_name, ...) \ + OS_SWIFT_NAME(swift_name) \ + OS_OBJECT_SHOW_SUBCLASS(name, super, name, ## __VA_ARGS__) + +#if defined(__LP64__) +#define __OS_WORKGROUP_ATTR_SIZE__ 60 +#define __OS_WORKGROUP_INTERVAL_DATA_SIZE__ 56 +#define __OS_WORKGROUP_JOIN_TOKEN_SIZE__ 36 +#else +#define __OS_WORKGROUP_ATTR_SIZE__ 60 +#define __OS_WORKGROUP_INTERVAL_DATA_SIZE__ 56 +#define __OS_WORKGROUP_JOIN_TOKEN_SIZE__ 28 +#endif + +#define _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT 0x2FA863B4 +#define _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT 0x2FA863C4 + +struct OS_REFINED_FOR_SWIFT os_workgroup_attr_opaque_s { + uint32_t sig; + char opaque[__OS_WORKGROUP_ATTR_SIZE__]; +}; + +#define _OS_WORKGROUP_INTERVAL_DATA_SIG_INIT 0x52A74C4D +struct OS_REFINED_FOR_SWIFT os_workgroup_interval_data_opaque_s { + uint32_t sig; + char opaque[__OS_WORKGROUP_INTERVAL_DATA_SIZE__]; +}; + +struct OS_REFINED_FOR_SWIFT os_workgroup_join_token_opaque_s { + uint32_t sig; + char opaque[__OS_WORKGROUP_JOIN_TOKEN_SIZE__]; +}; + +#endif /* __OS_WORKGROUP_BASE__ */ diff --git a/os/workgroup_interval.h b/os/workgroup_interval.h new file mode 100644 index 000000000..b056f82cf --- /dev/null +++ b/os/workgroup_interval.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_WORKGROUP_INTERVAL__ +#define __OS_WORKGROUP_INTERVAL__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/*! + * @typedef os_workgroup_interval_t + * + * @abstract + * A subclass of an os_workgroup_t for tracking work performed as part of + * a repeating interval-driven workload. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_interval_s *os_workgroup_interval_t; +#else +OS_WORKGROUP_SUBCLASS_DECL_PROTO(os_workgroup_interval, Repeatable); +OS_WORKGROUP_SUBCLASS_DECL(os_workgroup_interval, os_workgroup, WorkGroupInterval); +#endif + +/* During the first instance of this API, the only supported interval + * workgroups are for audio workloads. Please refer to the AudioToolbox + * framework for more information. + */ + +/* + * @typedef os_workgroup_interval_data, os_workgroup_interval_data_t + * + * @abstract + * An opaque structure containing additional configuration for the workgroup + * interval. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) +typedef struct os_workgroup_interval_data_s os_workgroup_interval_data_s; +typedef struct os_workgroup_interval_data_s *os_workgroup_interval_data_t; +#else +typedef struct os_workgroup_interval_data_opaque_s os_workgroup_interval_data_s; +typedef struct os_workgroup_interval_data_opaque_s *os_workgroup_interval_data_t; +#endif +#define OS_WORKGROUP_INTERVAL_DATA_INITIALIZER \ + { .sig = _OS_WORKGROUP_INTERVAL_DATA_SIG_INIT } + +/*! + * @function os_workgroup_interval_start + * + * @abstract + * Indicates to the system that the member threads of this + * os_workgroup_interval_t have begun working on an instance of the repeatable + * interval workload with the specified timestamps. This function is real time + * safe. + * + * This function will set and return an errno in the following cases: + * + * - The current thread is not a member of the os_workgroup_interval_t + * - The os_workgroup_interval_t has been cancelled + * - The timestamps passed in are malformed + * - os_workgroup_interval_start() was previously called on the + * os_workgroup_interval_t without an intervening os_workgroup_interval_finish() + * - A concurrent workgroup interval configuration operation is taking place. + * + * @param start + * Start timestamp specified in the os_clockid_t with which the + * os_workgroup_interval_t was created. This is generally a time in the past and + * indicates when the workgroup started working on an interval period + * + * @param deadline + * Deadline timestamp specified in the os_clockid_t with which the + * os_workgroup_interval_t was created. This specifies the deadline which the + * interval period would like to meet. + * + * @param data + * This field is currently unused and should be NULL + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_interval_start(os_workgroup_interval_t wg, uint64_t start, uint64_t + deadline, os_workgroup_interval_data_t _Nullable data); + +/*! + * @function os_workgroup_interval_update + * + * @abstract + * Updates an already started interval workgroup to have the new + * deadline specified. This function is real time safe. + * + * This function will return an error in the following cases: + * - The current thread is not a member of the os_workgroup_interval_t + * - The os_workgroup_interval_t has been cancelled + * - The timestamp passed in is malformed + * - os_workgroup_interval_start() was not previously called on the + * os_workgroup_interval_t or was already matched with an + * os_workgroup_interval_finish() + * - A concurrent workgroup interval configuration operation is taking place + * + * @param deadline + * Timestamp specified in the os_clockid_t with + * which the os_workgroup_interval_t was created. + * + * @param data + * This field is currently unused and should be NULL + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_interval_update(os_workgroup_interval_t wg, uint64_t deadline, + os_workgroup_interval_data_t _Nullable data); + +/*! + * @function os_workgroup_interval_finish + * + * @abstract + * Indicates to the system that the member threads of + * this os_workgroup_interval_t have finished working on the current instance + * of the interval workload. This function is real time safe. + * + * This function will return an error in the following cases: + * - The current thread is not a member of the os_workgroup_interval_t + * - os_workgroup_interval_start() was not previously called on the + * os_workgroup_interval_t or was already matched with an + * os_workgroup_interval_finish() + * - A concurrent workgroup interval configuration operation is taking place. + * + * @param data + * This field is currently unused and should be NULL + * + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_interval_finish(os_workgroup_interval_t wg, + os_workgroup_interval_data_t _Nullable data); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_WORKGROUP_INTERVAL__ */ diff --git a/os/workgroup_interval_private.h b/os/workgroup_interval_private.h new file mode 100644 index 000000000..48ddc7301 --- /dev/null +++ b/os/workgroup_interval_private.h @@ -0,0 +1,188 @@ +#ifndef __OS_WORKGROUP_INTERVAL_PRIVATE__ +#define __OS_WORKGROUP_INTERVAL_PRIVATE__ + +#ifndef __OS_WORKGROUP_PRIVATE_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/* + * @typedef os_workgroup_interval_type_t + * + * @abstract + * Describes a specialized os_workgroup_interval type the client would like to + * create. + * + * Clients need the 'com.apple.private.kernel.work-interval' entitlement to + * create all workgroups types listed below except the following: + * + * OS_WORKGROUP_INTERVAL_TYPE_DEFAULT, + * OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT, + * OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT, + * + * Note that only real time threads are allowed to join workgroups of type + * OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT and + * OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO. + */ +OS_ENUM(os_workgroup_interval_type, uint16_t, + OS_WORKGROUP_INTERVAL_TYPE_DEFAULT = 0x1, + OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT, + OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT, + + OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO, + OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION, + OS_WORKGROUP_INTERVAL_TYPE_CA_RENDER_SERVER, + OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY, + OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA, + + OS_WORKGROUP_INTERVAL_TYPE_ARKIT, +); + +/* + * @function os_workgroup_attr_set_interval_type + * + * @abstract + * Specifies that the os_workgroup_interval_t to be created should be of a + * specialized type. These types should only be specified when creating an + * os_workgroup_interval_t using the os_workgroup_interval_create or + * os_workgroup_interval_create_with_workload_id APIs - using it with any other + * workgroup creation API will result in an error at creation time. + * + * When used with os_workgroup_interval_create_with_workload_id, the type + * specified via this attribute must match the one configured by the system for + * the provided workload identifier (if that identifier is known). + * + * Setting type OS_WORKGROUP_INTERVAL_TYPE_DEFAULT on an os_workgroup_interval_t + * is a no-op. + * + * EINVAL is returned if the attribute passed in hasn't been initialized. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT +int +os_workgroup_attr_set_interval_type(os_workgroup_attr_t attr, + os_workgroup_interval_type_t type); + +/* + * @function os_workgroup_interval_create + * + * @abstract + * Creates an os_workgroup_interval_t with the specified name and attributes. + * This object tracks a repeatable workload characterized by a start time, end + * time and targeted deadline. Example use cases include audio and graphics + * rendering workloads. + * + * A newly created os_workgroup_interval_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_interval_t + * implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param clockid + * The clockid in which timestamps passed to the os_workgroup_interval_start() + * and os_workgroup_interval_update() functions are specified. + * + * @param attrs + * The requested set of os_workgroup_t attributes. NULL is to be specified for + * the default set of attributes. By default, an interval workgroup + * is nonpropagating with asynchronous work and differentiated from other threads + * in the process (see os_workgroup_attr_flags_t). + * + * The OS_WORKGROUP_ATTR_UNDIFFERENTIATED attribute is invalid to specify for + * interval workgroups. If it isn't or if invalid attributes are specified, this + * function returns NULL and sets errno. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_interval_t _Nullable +os_workgroup_interval_create(const char * _Nullable name, os_clockid_t clock, + os_workgroup_attr_t _Nullable attr); + +/* + * @function os_workgroup_interval_create_with_workload_id + * + * @abstract + * Creates an os_workgroup_interval_t with the specified name and workload + * identifier. + * This object tracks a repeatable workload characterized by a start time, end + * time and targeted deadline. Example use cases include audio and graphics + * rendering workloads. + * + * The newly created os_workgroup_interval_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_interval_t + * implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param workload_id + * A system-defined workload identifier string determining the configuration + * parameters to apply to the workgroup and its member threads. + * Must not be NULL. + * If the specified identifier is known, it must refer to a workload configured + * as being of interval type, or this function will return NULL. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wga` arguments. + * + * @param clockid + * The clockid in which timestamps passed to the os_workgroup_interval_start() + * and os_workgroup_interval_update() functions are specified. + * + * @param wga + * The requested set of os_workgroup_t attributes. NULL is to be specified for + * the default set of attributes. By default, a workgroup created with workload + * identifier is nonpropagating with asynchronous work and differentiated from + * other threads in the process (see os_workgroup_attr_flags_t). + * The interval type specified by the attributes will be used as a fallback in + * case the provided workload identifier is unknown. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wga` arguments. + * + * @discussion + * Rules used for resolution of configuration parameters potentially specified + * by both workload identifier and attributes, applied in order: + * - If the provided attributes are NULL or equal to the default set of + * attributes, no parameters are considered to be explicitly specified via + * attribute. + * - If the provided workload identifier is known, and the provided attributes + * explicitly specify a parameter that is also configured by the identifier, + * the two parameter values must match or this function will fail and return + * an error. + * - If the provided workload identifier is known, the parameters configured by + * the identifier will be used. + * - If the provided workload identifier is unknown, the parameters specified + * via the provided attributes will be used as a fallback. + * - If a given parameter is neither configured by a known workload identifier + * or explicitly specified via an attribute, a system-dependent fallback + * value will be used. + * + * @result + * The newly created workgroup object, or NULL if invalid arguments were + * specified (in which case errno is also set). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_interval_t _Nullable +os_workgroup_interval_create_with_workload_id(const char * _Nullable name, + const char *workload_id, os_clockid_t clock, + os_workgroup_attr_t _Nullable attr); + +/* This SPI is for use by Audio Toolbox only. This function returns a reference + * which is the responsibility of the caller to manage. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t +os_workgroup_interval_copy_current_4AudioToolbox(void); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS +#endif /* __OS_WORKGROUP_INTERVAL_PRIVATE__ */ diff --git a/os/workgroup_object.h b/os/workgroup_object.h new file mode 100644 index 000000000..5c8bd4f1a --- /dev/null +++ b/os/workgroup_object.h @@ -0,0 +1,371 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_WORKGROUP_OBJECT__ +#define __OS_WORKGROUP_OBJECT__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/*! + * @typedef os_workgroup_t + * + * @abstract + * A reference counted os object representing a workload that needs to + * be distinctly recognized and tracked by the system. The workgroup + * tracks a collection of threads all working cooperatively. An os_workgroup + * object - when not an instance of a specific os_workgroup_t subclass - + * represents a generic workload and makes no assumptions about the kind of + * work done. + * + * @discussion + * Threads can explicitly join an os_workgroup_t to mark themselves as + * participants in the workload. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_s *os_workgroup_t; +#else +OS_WORKGROUP_DECL(os_workgroup, WorkGroup); +#endif + + +/* Attribute creation and specification */ + +/*! + * @typedef os_workgroup_attr_t + * + * @abstract + * Pointer to an opaque structure for describing attributes that can be + * configured on a workgroup at creation. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_attr_s os_workgroup_attr_s; +typedef struct os_workgroup_attr_s *os_workgroup_attr_t; +#else +typedef struct os_workgroup_attr_opaque_s os_workgroup_attr_s; +typedef struct os_workgroup_attr_opaque_s *os_workgroup_attr_t; +#endif + +/* os_workgroup_t attributes need to be initialized before use. This initializer + * allows you to create a workgroup with the system default attributes. */ +#define OS_WORKGROUP_ATTR_INITIALIZER_DEFAULT \ + { .sig = _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT } + + + +/* The main use of the workgroup API is through instantiations of the concrete + * subclasses - please refer to os/workgroup_interval.h and + * os/workgroup_parallel.h for more information on creating workgroups. + * + * The functions below operate on all subclasses of os_workgroup_t. + */ + +/*! + * @function os_workgroup_copy_port + * + * @abstract + * Returns a reference to a send right representing this workgroup that is to be + * sent to other processes. This port is to be passed to + * os_workgroup_create_with_port() to create a workgroup object. + * + * It is the client's responsibility to release the send right reference. + * + * If an error is encountered, errno is set and returned. + */ +API_AVAILABLE(macos(10.16)) +SPI_AVAILABLE(ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_copy_port(os_workgroup_t wg, mach_port_t *mach_port_out); + +/*! + * @function os_workgroup_create_with_port + * + * @abstract + * Create an os_workgroup_t object from a send right returned by a previous + * call to os_workgroup_copy_port, potentially in a different process. + * + * A newly created os_workgroup_t has no initial member threads - in particular + * the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param mach_port + * The send right to create the workgroup from. No reference is consumed + * on the specified send right. + */ +API_AVAILABLE(macos(10.16)) +SPI_AVAILABLE(ios(14.0), tvos(14.0), watchos(7.0)) +OS_SWIFT_NAME(WorkGroup.init(__name:port:)) OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_port(const char *_Nullable name, mach_port_t mach_port); + +/*! + * @function os_workgroup_create_with_workgroup + * + * @abstract + * Create a new os_workgroup object from an existing os_workgroup. + * + * The newly created os_workgroup has no initial member threads - in particular + * the creating threaad does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param wg + * The existing workgroup to create a new workgroup object from. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_workgroup(const char * _Nullable name, os_workgroup_t wg); + +/*! + * @typedef os_workgroup_join_token, os_workgroup_join_token_t + * + * @abstract + * An opaque join token which the client needs to pass to os_workgroup_join + * and os_workgroup_leave + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_join_token_s os_workgroup_join_token_s; +typedef struct os_workgroup_join_token_s *os_workgroup_join_token_t; +#else +OS_REFINED_FOR_SWIFT +typedef struct os_workgroup_join_token_opaque_s os_workgroup_join_token_s; +OS_REFINED_FOR_SWIFT +typedef struct os_workgroup_join_token_opaque_s *os_workgroup_join_token_t; +#endif + + +/*! + * @function os_workgroup_join + * + * @abstract + * Joins the current thread to the specified workgroup and populates the join + * token that has been passed in. This API is real-time safe. + * + * @param wg + * The workgroup that the current thread would like to join + * + * @param token_out + * Pointer to a client allocated struct which the function will populate + * with the join token. This token must be passed in by the thread when it calls + * os_workgroup_leave(). + * + * Errors will be returned in the following cases: + * + * EALREADY The thread is already part of a workgroup that the specified + * workgroup does not nest with + * EINVAL The workgroup has been cancelled + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_join(os_workgroup_t wg, os_workgroup_join_token_t token_out); + +/*! + * @function os_workgroup_leave + * + * @abstract + * This removes the current thread from a workgroup it has previously + * joined. Threads must leave all workgroups in the reverse order that they + * have joined them. Failing to do so before exiting will result in undefined + * behavior. + * + * If the join token is malformed, the process will be aborted. + * + * This API is real time safe. + * + * @param wg + * The workgroup that the current thread would like to leave. + * + * @param token + * This is the join token populated by the most recent call to + * os_workgroup_join(). + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +void +os_workgroup_leave(os_workgroup_t wg, os_workgroup_join_token_t token); + +/* Working Arena index of a thread in a workgroup */ +typedef uint32_t os_workgroup_index; +/* Destructor for Working Arena */ +typedef void (*os_workgroup_working_arena_destructor_t)(void * _Nullable); + +/*! + * @function os_workgroup_set_working_arena + * + * @abstract + * Associates a client defined working arena with the workgroup. The arena + * is local to the workgroup object in the process. This is intended for + * distributing a manually managed memory allocation between member threads + * of the workgroup. + * + * This function can be called multiple times and the client specified + * destructor will be called on the previously assigned arena, if any. This + * function can only be called when no threads have currently joined the + * workgroup and all workloops associated with the workgroup are idle. + * + * @param wg + * The workgroup to associate the working arena with + * + * @param arena + * The client managed arena to associate with the workgroup. This value can + * be NULL. + * + * @param max_workers + * The maximum number of threads that will ever query the workgroup for the + * arena and request an index into it. If the arena is not used to partition + * work amongst member threads, then this field can be 0. + * + * @param destructor + * A destructor to call on the previously assigned working arena, if any + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_set_working_arena(os_workgroup_t wg, void * _Nullable arena, + uint32_t max_workers, os_workgroup_working_arena_destructor_t destructor); + +/*! + * @function os_workgroup_get_working_arena + * + * @abstract + * Returns the working arena associated with the workgroup and the current + * thread's index in the workgroup. This function can only be called by a member + * of the workgroup. Multiple calls to this API by a member thread will return + * the same arena and index until the thread leaves the workgroup. + * + * For workloops with an associated workgroup, every work item on the workloop + * will receive the same index in the arena. + * + * This method returns NULL if no arena is set on the workgroup. The index + * returned by this function is zero-based and is namespaced per workgroup + * object in the process. The indices provided are strictly monotonic and never + * reused until a future call to os_workgroup_set_working_arena. + * + * @param wg + * The workgroup to get the working arena from. + * + * @param index_out + * A pointer to a os_workgroup_index which will be populated by the caller's + * index in the workgroup. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +void * _Nullable +os_workgroup_get_working_arena(os_workgroup_t wg, + os_workgroup_index * _Nullable index_out); + +/*! + * @function os_workgroup_cancel + * + * @abstract + * This API invalidates a workgroup and indicates to the system that the + * workload is no longer relevant to the caller. + * + * No new work should be initiated for a cancelled workgroup and + * work that is already underway should periodically check for + * cancellation with os_workgroup_testcancel and initiate cleanup if needed. + * + * Threads currently in the workgroup continue to be tracked together but no + * new threads may join this workgroup - the only possible operation allowed is + * to leave the workgroup. Other actions may have undefined behavior or + * otherwise fail. + * + * This API is idempotent. Cancellation is local to the workgroup object + * it is called on and does not affect other workgroups. + * + * @param wg + * The workgroup that that the thread would like to cancel + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +void +os_workgroup_cancel(os_workgroup_t wg); + +/*! + * @function os_workgroup_testcancel + * + * @abstract + * Returns true if the workgroup object has been cancelled. See also + * os_workgroup_cancel + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +bool +os_workgroup_testcancel(os_workgroup_t wg); + +/*! + * @typedef os_workgroup_max_parallel_threads_attr_t + * + * @abstract + * A pointer to a structure describing the set of properties of a workgroup to + * override with the explicitly specified values in the structure. + * + * See also os_workgroup_max_parallel_threads. + */ +OS_REFINED_FOR_SWIFT +typedef struct os_workgroup_max_parallel_threads_attr_s os_workgroup_mpt_attr_s; +OS_REFINED_FOR_SWIFT +typedef struct os_workgroup_max_parallel_threads_attr_s *os_workgroup_mpt_attr_t; + +/*! + * @function os_workgroup_max_parallel_threads + * + * @abstract + * Returns the system's recommendation for maximum number of threads the client + * should make for a multi-threaded workload in a given workgroup. + * + * This API takes into consideration the current hardware the code is running on + * and the attributes of the workgroup. It does not take into consideration the + * current load of the system and therefore always provides the most optimal + * recommendation for the workload. + * + * @param wg + * The workgroup in which the multi-threaded workload will be performed in. The + * threads performing the multi-threaded workload are expected to join this + * workgroup. + * + * @param attr + * This value is currently unused and should be NULL. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +int +os_workgroup_max_parallel_threads(os_workgroup_t wg, os_workgroup_mpt_attr_t + _Nullable attr); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_WORKGROUP_OBJECT__ */ diff --git a/os/workgroup_object_private.h b/os/workgroup_object_private.h new file mode 100644 index 000000000..ec7ebee71 --- /dev/null +++ b/os/workgroup_object_private.h @@ -0,0 +1,285 @@ +#ifndef __OS_WORKGROUP_OBJECT_PRIVATE__ +#define __OS_WORKGROUP_OBJECT_PRIVATE__ + +#ifndef __OS_WORKGROUP_PRIVATE_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +#include + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/* Attribute creation and specification */ + +/* This is for clients who want to build their own workgroup attribute from + * scratch instead of configuring their attributes on top of the default set of + * attributes */ +#define OS_WORKGROUP_ATTR_INITIALIZER_EMPTY { .sig = _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT } + +/*! + * @enum os_workgroup_attr_flags_t + * + * @abstract A bitfield of flags describing options for workgroup configuration + */ +OS_ENUM(os_workgroup_attr_flags, uint32_t, + /*! + * @const OS_WORKGROUP_ATTR_NONPROPAGATING + * + * Asynchronous work initiated by threads which are members of a + * workgroup with OS_WORKGROUP_ATTR_NONPROPAGATING attribute, will not + * automatically be tracked as part of the workgroup. This applies to work + * initiated by calls such as dispatch_async() that may propagate other + * execution context properties. + * + * os_workgroups which are propagating by default can opt out this behavior + * by specifying the OS_WORKGROUP_ATTR_NONPROPAGATING flag. + */ + OS_WORKGROUP_ATTR_NONPROPAGATING = (1 << 1), + + /*! + * @const OS_WORKGROUP_ATTR_UNDIFFERENTIATED + * + * Member threads of a workgroup with the attribute flag + * OS_WORKGROUP_ATTR_UNDIFFERENTIATED are tracked and measured together with + * other threads in their process by the system for scheduling and + * performance control. + * + * os_workgroups which are tracked separately from other threads in + * the process by default, can opt out of it by specifying the + * OS_WORKGROUP_ATTR_UNDIFFERENTIATED flag. + */ + OS_WORKGROUP_ATTR_UNDIFFERENTIATED = (1 << 2) +); + +/*! + * @function os_workgroup_attr_set_flags + * + * @abstract + * Sets the user specified flags in the workgroup attribute. If invalid + * attributes are specified, this function will set and return an error. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_attr_set_flags(os_workgroup_attr_t wga, + os_workgroup_attr_flags_t flags); + +/*! + * @function os_workgroup_create + * + * @abstract + * Creates an os_workgroup_t with the specified name and attributes. + * A newly created os_workgroup_t has no initial member threads - in particular + * the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param wga + * The requested set of os_workgroup_t attributes. NULL is to be specified for + * the default set of attributes. A workgroup with default attributes is + * propagating with asynchronous work and differentiated from other threads in + * the process (see os_workgroup_attr_flags_t). + * + * The attribute flag OS_WORKGROUP_ATTR_NONPROPAGATING MUST currently be + * specified. If it isn't or if invalid attributes are specified, this function + * will return NULL and set an errno. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create(const char * _Nullable name, + os_workgroup_attr_t _Nullable wga); + +/*! + * @function os_workgroup_create_with_workload_id + * + * @abstract + * Creates an os_workgroup_t with the specified name and workload identifier. + * + * The newly created os_workgroup_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param workload_id + * A system-defined workload identifier string determining the configuration + * parameters to apply to the workgroup and its member threads. + * Must not be NULL. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wga` arguments. + * + * @param wga + * The requested set of os_workgroup_t attributes. NULL is to be specified for + * the default set of attributes. By default, a workgroup created with workload + * identifier is nonpropagating with asynchronous work and differentiated from + * other threads in the process (see os_workgroup_attr_flags_t). + * Currently NULL or the default set of attributes are the only valid + * attributes for this function. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wga` arguments. + * + * @discussion + * Rules used for resolution of configuration parameters potentially specified + * by both workload identifier and attributes, applied in order: + * - If the provided attributes are NULL or equal to the default set of + * attributes, no parameters are considered to be explicitly specified via + * attribute. + * - If the provided workload identifier is known, and the provided attributes + * explicitly specify a parameter that is also configured by the identifier, + * the two parameter values must match or this function will fail and return + * an error. + * - If the provided workload identifier is known, the parameters configured by + * the identifier will be used. + * - If the provided workload identifier is unknown, the parameters specified + * via the provided attributes will be used as a fallback. + * - If a given parameter is neither configured by a known workload identifier + * or explicitly specified via an attribute, a system-dependent fallback + * value will be used. + * + * @result + * The newly created workgroup object, or NULL if invalid arguments were + * specified (in which case errno is also set). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_workload_id(const char * _Nullable name, + const char *workload_id, os_workgroup_attr_t _Nullable wga); + +/*! + * @function os_workgroup_create_with_workload_id_and_port + * + * @abstract + * Create an os_workgroup_t object with the specified name and workload + * identifier from a send right returned by a previous call to + * os_workgroup_copy_port, potentially in a different process. + * + * The newly created os_workgroup_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param workload_id + * A system-defined workload identifier string determining the configuration + * parameters to apply to the workgroup and its member threads. + * Must not be NULL. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `mach_port` arguments. + * + * @param mach_port + * The send right to create the workgroup from. No reference is consumed + * on the specified send right. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `mach_port` arguments. + * + * @discussion + * Rules used for resolution of configuration parameters potentially specified + * by both workload identifier and send right, applied in order: + * - If the provided workload identifier is known, and the provided send right + * references a workgroup that was created with a parameter that is also + * configured by the identifier, the parameter value configured by the + * identifier will be used. For certain parameters such as the kernel + * work_interval type underlying a workgroup interval type, it is required + * that the two parameter values must match, or this function will fail and + * return an error. + * - If the provided workload identifier is known, the parameters configured by + * the identifier will be used. + * - If the provided workload identifier is unknown, the parameters used to + * create the workgroup referenced by the provided send right are used. + * - If a given parameter is neither configured by a known workload identifier + * or was used to create the workgroup referenced by the provided send right, + * a system-dependent fallback value will be used. + * + * @result + * The newly created workgroup object, or NULL if invalid arguments were + * specified (in which case errno is also set). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_workload_id_and_port(const char * _Nullable name, + const char *workload_id, mach_port_t mach_port); + +/*! + * @function os_workgroup_create_with_workload_id_and_workgroup + * + * @abstract + * Create a new os_workgroup object with the specified name and workload + * identifier from an existing os_workgroup. + * + * The newly created os_workgroup_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param workload_id + * A system-defined workload identifier string determining the configuration + * parameters to apply to the workgroup and its member threads. + * Must not be NULL. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wg` arguments. + * + * @param wg + * The existing workgroup to create a new workgroup object from. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wg` arguments. + * + * @discussion + * Rules used for resolution of configuration parameters potentially specified + * by both workload identifier and existing workgroup, applied in order: + * - If the provided workload identifier is known, and the provided workgroup + * was created with a parameter that is also configured by the identifier, + * the parameter value configured by the identifier will be used. For certain + * parameters such as the kernel work_interval type underlying a workgroup + * interval type, it is required that the two parameter values must match, or + * this function will fail and return an error. + * - If the provided workload identifier is known, the parameters configured by + * the identifier will be used. + * - If the provided workload identifier is unknown, the parameters used to + * create the provided workgroup will be used. + * - If a given parameter is neither configured by a known workload identifier + * or was used to create the provided workgroup, a system-dependent fallback + * value will be used. + * + * @result + * The newly created workgroup object, or NULL if invalid arguments were + * specified (in which case errno is also set). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_workload_id_and_workgroup(const char * _Nullable name, + const char *workload_id, os_workgroup_t wg); + +/* To be deprecated once coreaudio adopts */ +#define OS_WORKGROUP_ATTR_INITIALIZER OS_WORKGROUP_ATTR_INITIALIZER_DEFAULT + +typedef uint32_t os_workgroup_index; + +/* Deprecated in favor of os_workgroup_join */ +OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_join_self(os_workgroup_t wg, os_workgroup_join_token_t token_out, + os_workgroup_index *_Nullable id_out); + +/* Deprecated in favor of os_workgroup_leave */ +OS_WORKGROUP_EXPORT +void +os_workgroup_leave_self(os_workgroup_t wg, os_workgroup_join_token_t token); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_WORKGROUP_OBJECT__ */ diff --git a/os/workgroup_parallel.h b/os/workgroup_parallel.h new file mode 100644 index 000000000..2aca7f861 --- /dev/null +++ b/os/workgroup_parallel.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_WORKGROUP_PARALLEL__ +#define __OS_WORKGROUP_PARALLEL__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +#include + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/*! + * @typedef os_workgroup_parallel_t + * + * @abstract + * A subclass of an os_workgroup_t for tracking parallel work. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_s *os_workgroup_parallel_t; +#else +OS_WORKGROUP_SUBCLASS_DECL_PROTO(os_workgroup_parallel, Parallelizable); +OS_WORKGROUP_SUBCLASS_DECL(os_workgroup_parallel, os_workgroup, WorkGroupParallel); +#endif + +/*! + * @function os_workgroup_parallel_create + * + * @abstract + * Creates an os_workgroup_t which tracks a parallel workload. + * A newly created os_workgroup_interval_t has no initial member threads - + * in particular the creating thread does not join the os_workgroup_parallel_t + * implicitly. + * + * See also os_workgroup_max_parallel_threads(). + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param attr + * The requested set of workgroup attributes. NULL is to be specified for the + * default set of attributes. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +OS_SWIFT_NAME(WorkGroupParallel.init(__name:attr:)) +os_workgroup_parallel_t _Nullable +os_workgroup_parallel_create(const char * _Nullable name, + os_workgroup_attr_t _Nullable attr); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_WORKGROUP_PARALLEL__ */ diff --git a/os/workgroup_private.h b/os/workgroup_private.h new file mode 100644 index 000000000..255fd5079 --- /dev/null +++ b/os/workgroup_private.h @@ -0,0 +1,17 @@ +#ifndef __OS_WORKGROUP_PRIVATE__ +#define __OS_WORKGROUP_PRIVATE__ + +#ifndef __DISPATCH_BUILDING_DISPATCH__ + +#ifndef __OS_WORKGROUP_PRIVATE_INDIRECT__ +#define __OS_WORKGROUP_PRIVATE_INDIRECT__ +#endif /* __OS_WORKGROUP_PRIVATE_INDIRECT__ */ + +#include +#include +#include + +#undef __OS_WORKGROUP_PRIVATE_INDIRECT__ +#endif /* __DISPATCH_BUILDING_DISPATCH__ */ + +#endif /* __OS_WORKGROUP_PRIVATE__ */ diff --git a/private/apply_private.h b/private/apply_private.h new file mode 100644 index 000000000..195e5a4de --- /dev/null +++ b/private/apply_private.h @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_APPLY_PRIVATE__ +#define __DISPATCH_APPLY_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +DISPATCH_ASSUME_NONNULL_BEGIN +/*! + * @typedef dispatch_apply_attr_s dispatch_apply_attr_t + * + * @abstract + * Pointer to an opaque structure for describing the workload to be executed by + * dispatch_apply_with_attr. + * + * This struct must be initialized with dispatch_apply_attr_init before use + * and must not be copied once initialized. It must be destroyed with + * dispatch_apply_attr_destroy before going out of scope or being freed, to + * avoid leaking associated system resources. + */ +#define __DISPATCH_APPLY_ATTR_SIZE__ 64 + +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct dispatch_apply_attr_s dispatch_apply_attr_s; +typedef struct dispatch_apply_attr_s *dispatch_apply_attr_t; +#else +struct dispatch_apply_attr_opaque_s { + char opaque[__DISPATCH_APPLY_ATTR_SIZE__]; +}; +typedef struct dispatch_apply_attr_opaque_s dispatch_apply_attr_s; +typedef struct dispatch_apply_attr_opaque_s *dispatch_apply_attr_t; +#endif + +/*! + * @function dispatch_apply_attr_init, dispatch_apply_attr_destroy + * + * @abstract + * Initializer and destructor functions for the attribute structure. The + * attribute structure must be initialized before calling any setters on it. + * + * Every call to dispatch_apply_attr_init must be paired with a corresponding + * call to dispatch_apply_attr_destroy. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_apply_attr_init(dispatch_apply_attr_t attr); + +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_apply_attr_destroy(dispatch_apply_attr_t attr); + +/*! + * @enum dispatch_apply_attr_entity_t + * + * @abstract + * This enum describes an entity in the hardware for which parallelism via + * dispatch_apply is being requested + */ +DISPATCH_ENUM(dispatch_apply_attr_entity, unsigned long, + DISPATCH_APPLY_ATTR_ENTITY_CPU = 1, + DISPATCH_APPLY_ATTR_ENTITY_CLUSTER = 2, +); + +/*! + * @function dispatch_apply_attr_set_parallelism + * + * @param attr + * The dispatch_apply attribute to be modified + * + * @param entity + * The named entity the requested configuration applies to. + * + * @param threads_per_entity + * The number of worker threads to be created per named entity on the system. + * + * @abstract + * Adds a request for the system to start enough worker threads such that + * threads_per_entity number of threads will share each named entity. The + * system will make a best effort to spread such worker threads evenly + * across the available entity. + * + * @notes + * At the present time, the only supported value of threads_per_entity is 1. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT +void +dispatch_apply_attr_set_parallelism(dispatch_apply_attr_t attr, + dispatch_apply_attr_entity_t entity, size_t threads_per_entity); + +/*! + * @typedef dispatch_apply_attr_query_flags_t + * + * @abstract + * Flags that affect calls to dispatch_apply_attr_query(). + * + * @const DISPATCH_APPLY_ATTR_QUERY_FLAGS_MAX_CURRENT_SCOPE + * Modifies DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS so that it takes into + * account the current execution context. This may produce a tighter upper bound + * on the number of worker threads. If dispatch_apply_with_attr is called from + * the current execution context, it is guaranteed that the worker_index will + * not exceed the result of this query. However if the current execution context + * is changed (for example with dispatch or pthread functions) or the current + * scope is left, that guarantee will not hold. + */ +DISPATCH_ENUM(dispatch_apply_attr_query_flags, unsigned long, + DISPATCH_APPLY_ATTR_QUERY_FLAGS_MAX_CURRENT_SCOPE DISPATCH_ENUM_API_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) = 1, +); + +/*! + * @typedef dispatch_apply_attr_query_t + * + * @abstract + * Enumeration indicating question dispatch_apply_attr_query() should answer + * about its arguments. + * + * @const DISPATCH_APPLY_ATTR_QUERY_VALID + * Query if the properties requested by this attribute are invalid or + * unsatisfiable. For example, some properties may describe how the workload will + * use certain hardware resources. On machines which lack that hardware, an + * attribute with those properties may be invalid. + * Passing an invalid attribute to dispatch_apply_with_attr will have undefined + * behaviour. + * If the attribute is valid, the query returns 1. If it is not valid, the query + * returns 0. + * + * @const DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS + * Calculates an upper bound of how many parallel worker threads + * dispatch_apply_with_attr could create when running a workload with the + * specified attribute. This will include the thread calling + * dispatch_apply_with_attr as a worker. This is an upper bound; depending on + * conditions, such as the load of other work on the system and the execution + * context where dispatch_apply_with_attr is called, fewer parallel worker + * threads may actually be created. + * + * A good use of this query is to determine the size of a working arena + * (such as preallocated memory space or other resources) appropriate for the + * the maximum number of workers. This API can be used in coordination + * with the worker_index block argument in dispatch_apply_with_attr to provide + * each parallel worker thread with their own slice of the arena. + * + * @const DISPATCH_APPLY_ATTR_QUERY_LIKELY_WORKERS + * Calculates a good guess of how many parallel worker threads + * dispatch_apply_with_attr would likely create when running a workload with + * the specified attribute. This will include the thread calling + * dispatch_apply_with_attr as a worker. This is only a guess; depending on + * conditions, dispatch_apply_with_attr may actually create more or fewer + * parallel worker threads than this value. + * + * Compared to QUERY_MAXIMUM_WORKERS, this query tries to predict the behavior + * of dispatch_apply_with_attr more faithfully. The number of parallel worker + * threads to be used may be affected by aspects of the current execution context + * like the thread's QOS class, scheduling priority, queue hierarchy, and current + * workloop; as well as transitory aspects of the system like power state and + * computational loads from other tasks. For those reasons, repeating this query + * for the same attribute may produce a different result. + */ +DISPATCH_ENUM(dispatch_apply_attr_query, unsigned long, + DISPATCH_APPLY_ATTR_QUERY_VALID DISPATCH_ENUM_API_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) = 0, + DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS DISPATCH_ENUM_API_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) = 1, + DISPATCH_APPLY_ATTR_QUERY_LIKELY_WORKERS DISPATCH_ENUM_API_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) = 2, +); + +/*! + * @function dispatch_apply_attr_query + * + * @abstract + * Query how dispatch_apply_with_attr will respond to a certain attr, such + * as how the attr may affect its choice of how many parallel worker threads + * to use. + * + * @param attr + * The dispatch_apply attribute describing a workload + * + * @param which + * An enumeration value indicating which question this function should answer + * about its arguments. See dispatch_apply_attr_query_t for possible values and + * explanations. + * + * @param flags + * Flags for the query that describe factors beyond the workload (which + * is described by the attr). See dispatch_apply_attr_query_flags_t for + * valid values. Pass 0 if no flags are needed. + * + * @return + * Returns the numerical answer to the query. See dispatch_apply_attr_query_t. + * Most types of query return 0 if the properties requested by this attribute + * are invalid or unsatisfiable. (Exceptions will described in + * dispatch_apply_attr_query_t entries). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT +size_t +dispatch_apply_attr_query(dispatch_apply_attr_t attr, + dispatch_apply_attr_query_t which, + dispatch_apply_attr_query_flags_t flags); + +/*! + * @function dispatch_apply_with_attr + * + * * @abstract + * Submits a block for parallel invocation, with an attribute structure + * describing the workload. + * + * @discussion + * Submits a block for parallel invocation. The system will try to use worker + * threads that match the configuration of the current thread. The system will + * try to start an appropriate number of worker threads to maximimize + * throughput given the available hardware and current system conditions. An + * attribute structure that describes the nature of the workload may be passed. + * The system will use the attribute's properties to improve its scheduling + * choices, such as how many worker threads to create and how to distribute them + * across processors. + * + * This function waits for all invocations of the task block to complete before + * returning. + * + * Each invocation of the block will be passed 2 arguments: + * - the current index of iteration + * - the index of the worker thread invoking the block + * + * The worker index will be in the range [0, n) + * where n = dispatch_apply_attr_query(attr, DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS, 0) + * + * Worker threads may start in any order. Some worker indexes within the + * permissible range may not actually be used, depending on conditions. + * Generally, one worker thread will use one worker index, but this is not + * guaranteed; worker index MAY NOT match thread one-to-one. No assumptions + * should be made about which CPU a worker runs on. Two invocations of + * the block MAY have different worker indexes even if they run on the same + * thread or the same processor. However, two invocations of the block running + * at the same time WILL NEVER have the same worker index. + * + * When this API is called inside another dispatch_apply_with_attr or + * dispatch_apply, it will execute as a serial loop. + * + * @param iterations + * The number of iterations to perform. + * + * The choice of how to divide a large workload into a number of iterations can + * have substantial effects on the performance of executing that workload. + * If the number of iterations is very small, the system may not effectively + * spread and balance the work across the available hardware. As a rough + * guideline, the number of iterations should be at least three times the maximum + * worker index. On the other hand, a workload should not be finely divided into + * a huge number of iterations, each doing only a miniscule amount of work, since + * there is a small overhead cost of accounting and invocation for each iteration. + * + * @param attr + * The dispatch_apply_attr_t describing specialized properties of the workload. + * This value can be NULL. If non-NULL, the attribute must have been initialized + * with dispatch_apply_attr_init(). + * + * If the attribute requests properties that are invalid or meaningless on this + * system, the function will have undefined behaviour. This is a programming + * error. An attribute's validity can be checked with dispatch_apply_attr_query. + * + * @param block + * The block to be invoked the specified number of iterations. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT +void +dispatch_apply_with_attr(size_t iterations, dispatch_apply_attr_t _Nullable attr, + DISPATCH_NOESCAPE void (^block)(size_t iteration, size_t worker_index)); +#endif + +/*! + * @function dispatch_apply_with_attr_f + * + * * @abstract + * Submits a function for parallel invocation, with an attribute structure + * describing the workload. + * + * @discussion + * See dispatch_apply_with_attr() for details. + * + * @param iterations + * The number of iterations to perform. + * + * @param attr + * The dispatch_apply_attr_t describing specialized properties of the workload. + * This value can be NULL. If non-NULL, the attribute must have been initialized + * with dispatch_apply_attr_init(). + * + * If the attribute requests properties that are invalid or meaningless on this + * system, the function will have undefined behaviour. This is a programming + * error. An attribute's validity can be checked with dispatch_apply_attr_query. + * + * @param context + * The application-defined context parameter to pass to the function. + + * @param work + * The application-defined function to invoke on the specified queue. The first + * parameter passed to this function is the context provided to + * dispatch_apply_with_attr_f(). The second parameter passed to this function is + * the current index of iteration. The third parameter passed to this function is + * the index of the worker thread invoking the function. + * See dispatch_apply_with_attr() for details. + * The result of passing NULL in this parameter is undefined. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT +void +dispatch_apply_with_attr_f(size_t iterations, dispatch_apply_attr_t _Nullable attr, + void *_Nullable context, void (*work)(void *_Nullable context, size_t iteration, size_t worker_index)); + +DISPATCH_ASSUME_NONNULL_END + +__END_DECLS +#endif /* __DISPATCH_APPLY_PRIVATE__ */ diff --git a/private/mach_private.h b/private/mach_private.h index 1474c163a..bed88c0bd 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -34,9 +34,10 @@ __BEGIN_DECLS -#define DISPATCH_MACH_SPI_VERSION 20161026 +#define DISPATCH_MACH_SPI_VERSION 20200229 #include +#include DISPATCH_ASSUME_NONNULL_BEGIN @@ -162,7 +163,7 @@ DISPATCH_ENUM(dispatch_mach_send_flags, unsigned long, * Trailer type of mach message received by dispatch mach channels */ -typedef mach_msg_context_trailer_t dispatch_mach_trailer_t; +typedef mach_msg_mac_trailer_t dispatch_mach_trailer_t; /*! * @constant DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE @@ -370,14 +371,61 @@ dispatch_mach_create_f(const char *_Nullable label, * a "server" peer connection and the no more senders request is armed * immediately. * + * Note that the notification will not be issued if no send right was ever + * made for this connection receive right. + * * @param channel * The mach channel to request no senders notifications on. */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +API_DEPRECATED("Use dispatch_mach_notify_no_senders instead", macos(10.14, 10.16), + ios(12.0, 14.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_mach_request_no_senders(dispatch_mach_t channel); +/*! + * @function dispatch_mach_notify_no_senders + * + * Configure the mach channel to receive no more senders notifications. + * + * @discussion + * This function must be called before dispatch_mach_connect() has been called. + * + * When a checkin message is passed to dispatch_mach_connect() or + * dispatch_mach_reconnect(), the notification is armed after the checkin + * message has been sent successfully. + * + * If no checkin message is passed, then the mach channel is assumed to be + * a "server" peer connection and the no more senders request is armed + * immediately. + * + * Requesting a no-senders notification for a listener mach channel is likely a + * client error since listener connections will likely have short-lived send + * rights (only until a peer connection is established). + * + * @param channel + * The mach channel to request no senders notifications on. + * + * @param made_sendrights + * A boolean representing whether the send right for this connection has been + * made before dispatch_mach_connect() is called. + * + * There are 2 cases of consideration: + * + * a) The client is initiating the peer connection by creating a receive right + * with an inserted send right and shipping the receive right over to the server + * in a checkin message. In this case, the server must specify true for + * made_sendrights when arming for no-senders notification. + * + * b) The server is initiating the connection by creating a mach channel with a + * receive right and using MACH_MSG_TYPE_MAKE_SEND to create a send right in the + * checkin reply for the peer connection. this case, the server should specify + * false for made_sendrights while arming for no-senders notification. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(5.0)) +void +dispatch_mach_notify_no_senders(dispatch_mach_t channel, bool made_sendrights); + /*! * @typedef dispatch_mach_flags_t * @@ -402,7 +450,6 @@ dispatch_mach_request_no_senders(dispatch_mach_t channel); DISPATCH_OPTIONS(dispatch_mach_flags, uint64_t, DMF_NONE = 0x0, DMF_USE_STRICT_REPLY = 0x1, - DMF_REQUEST_NO_SENDERS = 0x2, ); /*! @@ -1229,6 +1276,42 @@ dispatch_mach_handoff_reply(dispatch_queue_t queue, mach_port_t port, dispatch_block_t block); #endif /* __BLOCKS__ */ +#if DISPATCH_MACH_SPI + +/*! + * @function dispatch_mach_msg_get_filter_policy_id + * Returns the message filter policy id from the message trailer. + * This id is added by the kernel during message send and is specific + * to the sender and port on which the message is received.. + * + * @discussion + * This function should only be called from the context of an IPC handler. + * + * @param msg + * The dispatch mach message object to query. It should have a trailer of type dispatch_mach_trailer_t. + * + * @param filter_policy_id + * Return the filter policy id read from the message. + * + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0), bridgeos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_mach_msg_get_filter_policy_id(dispatch_mach_msg_t msg, mach_msg_filter_id *filter_policy_id); + + +/*! + * @function dispatch_mach_can_handoff_4libxpc + * + * Returns whether the code is running in a context where a handoff is possible. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0), bridgeos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +bool +dispatch_mach_can_handoff_4libxpc(void); + +#endif // DISPATCH_MACH_SPI + DISPATCH_ASSUME_NONNULL_END __END_DECLS diff --git a/private/private.h b/private/private.h index b40a36c0f..e49d15c95 100644 --- a/private/private.h +++ b/private/private.h @@ -72,6 +72,7 @@ #include #include #include +#include #undef __DISPATCH_INDIRECT__ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ diff --git a/private/queue_private.h b/private/queue_private.h index 302de4aad..199fcaeed 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -41,10 +41,27 @@ __BEGIN_DECLS * * @constant DISPATCH_QUEUE_OVERCOMMIT * The queue will create a new thread for invoking blocks, regardless of how - * busy the computer is. + * busy the computer is. It is invalid to pass in both the + * DISPATCH_QUEUE_OVERCOMMIT as well as the DISPATCH_QUEUE_COOPERATIVE + * flags. + * + * @constant DISPATCH_QUEUE_COOPERATIVE + * The queue will not bring up threads beyond a specific limit even if + * there are pending work items on the queue. + * + * The width of the queue is determined based on the hardware the code is + * running on and may change dynamically depending on the load of the system. + * Blocking any thread working on this queue will therefore reduce the + * throughput of the queue as a whole. Work running on this queue should be + * able to make progress till completion even if just 1 thread is available to + * process this queue. + * + * It is invalid to pass in both the DISPATCH_QUEUE_OVERCOMMIT as well as the + * DISPATCH_QUEUE_COOPERATIVE flags. */ enum { DISPATCH_QUEUE_OVERCOMMIT = 0x2ull, + DISPATCH_QUEUE_COOPERATIVE = 0x4ull, }; /*! @@ -149,12 +166,16 @@ dispatch_set_qos_class_fallback(dispatch_object_t object, #define DISPATCH_QUEUE_FLAGS_MASK (DISPATCH_QUEUE_OVERCOMMIT) +#if __APPLE__ +# define DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR +#else // __APPLE__ // On FreeBSD pthread_attr_t is a typedef to a pointer type #if defined(__FreeBSD__) # define DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR _Nullable -#else +#else // defined(__FreeBSD__) # define DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR -#endif +#endif // defined(__FreeBSD__) +#endif // __APPLE__ /*! * @function dispatch_queue_attr_make_with_overcommit @@ -448,6 +469,76 @@ DISPATCH_EXPORT void _dispatch_install_thread_detach_callback(void (*cb)(void)); #endif +/* The SPIs below are for the use of the Swift Concurrency Runtime ONLY */ + +DISPATCH_OPTIONS(dispatch_swift_job_invoke_flags, uint32_t, + /*! + * @const DISPATCH_SWIFT_JOB_INVOKE_NONE + * + * No specific requirements for how the object invokes itself. + */ + DISPATCH_SWIFT_JOB_INVOKE_NONE, + + /*! + * @const DISPATCH_SWIFT_JOB_INVOKE_COOPERATIVE + * + * This swift job is invoked on a cooperative queue. It should periodically + * check dispatch_swift_job_should_yield() to determine if the object + * ought to yield the thread to other objects in the cooperative queue + */ + DISPATCH_SWIFT_JOB_INVOKE_COOPERATIVE, +); + +/*! + * @function dispatch_swift_job_should_yield() + * + * @abstract + * This function is only to be called by the Swift concurrency runtime. + * + * If this function returns true, then the currently draining object + * should reach the next safest stopping point, perform necessary cleanups, and + * return from its invocation. + * + * If more work is present, it should reenqueue itself using the + * dispatch_enqueue_swift_job SPI. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0)) +DISPATCH_EXPORT +bool +dispatch_swift_job_should_yield(void); + +/*! + * @function dispatch_async_swift_job + * + * @abstract + * This function is only to be called by the Swift concurrency runtime to + * enqueue work to run on dispatch's thread pool. + * + * @param queue + * The queue onto which to enqueue the swift object. All enqueues are + * asynchronous and do not block the thread. + * + * @param swift_job + * The swift concurrency runtime job that is to be enqueued into dispatch. This + * object needs to adhere to a specific structure and have a specific vtable + * layout that dispatch expects. + * + * The refcount and lifetime of the object is managed by the enqueuer and who + * needs need to make sure that it is live for the duration it is enqueued on + * the dispatch queue. + * + * The swift job can only be enqueued on a single queue at any + * given time. + * + * @param qos + * The QoS of at which the object should be enqueued. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 +void +dispatch_async_swift_job(dispatch_queue_t queue, void *swift_job, + qos_class_t qos); + __END_DECLS DISPATCH_ASSUME_NONNULL_END diff --git a/private/source_private.h b/private/source_private.h index bd5e47ebc..fab9b9854 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -240,6 +240,9 @@ enum { * @constant DISPATCH_VFS_NEARLOWDISK * Filesystem is nearly full (below NEARLOWDISK level). * + * @constant DISPATCH_VFS_SERVEREVENT + * Server issued a notification/warning + * * @constant DISPATCH_VFS_DESIREDDISK * Filesystem has exceeded the DESIREDDISK level * @@ -257,6 +260,7 @@ enum { DISPATCH_VFS_NOTRESPLOCK = 0x0080, DISPATCH_VFS_UPDATE = 0x0100, DISPATCH_VFS_VERYLOWDISK = 0x0200, + DISPATCH_VFS_SERVEREVENT = 0x0800, DISPATCH_VFS_QUOTA = 0x1000, DISPATCH_VFS_NEARLOWDISK = 0x2000, DISPATCH_VFS_DESIREDDISK = 0x4000, @@ -654,7 +658,7 @@ typedef struct dispatch_source_extended_data_s { * argument, the remaining space in data will have been populated with zeroes. */ API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW size_t dispatch_source_get_extended_data(dispatch_source_t source, diff --git a/private/time_private.h b/private/time_private.h index ae341e6d6..e8dd1accf 100644 --- a/private/time_private.h +++ b/private/time_private.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +__BEGIN_DECLS + /* * @constant DISPATCH_MONOTONICTIME_NOW * A dispatch_time_t value that corresponds to the current value of the @@ -83,5 +85,38 @@ enum { #endif // __APPLE__ +/*! + * @function dispatch_time_to_nsecs + * + * @abstract + * Returns the clock and nanoseconds of a given dispatch_time_t. + * + * @discussion + * This interface allows to decode dispatch_time_t which allows to compare them + * provided they are for the same "clock_id". + * + * @param time + * The dispatch_time_t value to parse. + * + * @param clock + * A pointer to the clockid for this time. + * + * @param nsecs + * A pointer to the decoded number of nanoseconds for the passed in time + * relative to the epoch for this clock ID. + * + * @result + * Returns true if the dispatch_time_t value was valid. + * Returns false if the dispatch_time_t value was invalid, + * or DISPATCH_TIME_FOREVER. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW +bool +dispatch_time_to_nsecs(dispatch_time_t time, + dispatch_clockid_t *clock, uint64_t *nsecs); + +__END_DECLS + #endif diff --git a/src/apply.c b/src/apply.c index 9c7d60ffd..160874f4c 100644 --- a/src/apply.c +++ b/src/apply.c @@ -21,11 +21,97 @@ #include "internal.h" typedef void (*dispatch_apply_function_t)(void *, size_t); + static char const * const _dispatch_apply_key = "apply"; #define DISPATCH_APPLY_INVOKE_REDIRECT 0x1 #define DISPATCH_APPLY_INVOKE_WAIT 0x2 +/* flags for da_dc->dc_data + * + * continuation func is a dispatch_apply_function_t (args: item) + */ +#define DA_FLAG_APPLY 0x01ul +// contin func is a dispatch_apply_attr_function_t (args: item, worker idx) +#define DA_FLAG_APPLY_WITH_ATTR 0x02ul + +#if __LP64__ +/* Our continuation allocator is a bit more performant than the default system + * malloc (especially with our per-thread cache), so let's use it if we can. + * On 32-bit platforms, dispatch_apply_s is bigger than dispatch_continuation_s + * so we can't use the cont allocator, but we're okay with the slight perf + * degradation there. + */ +#define DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR 1 +dispatch_static_assert(sizeof(struct dispatch_apply_s) <= sizeof(struct dispatch_continuation_s), + "Apply struct should fit inside continuation struct so we can borrow the continuation allocator"); +#else // __LP64__ +#define DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR 0 +#endif // __LP64__ + +DISPATCH_ALWAYS_INLINE DISPATCH_MALLOC +static inline dispatch_apply_t +_dispatch_apply_alloc(void) +{ + dispatch_apply_t da; +#if DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + da = (__typeof__(da))_dispatch_continuation_alloc(); +#else // DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + da = _dispatch_calloc(1, sizeof(*da)); +#endif // DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + return da; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_apply_free(dispatch_apply_t da) +{ +#if DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + _dispatch_continuation_free((dispatch_continuation_t)da); +#else // DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + free(da); +#endif // DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR +} + +static void _dispatch_apply_da_copy_attr(dispatch_apply_t, dispatch_apply_attr_t _Nullable); +static bool _dispatch_attr_is_initialized(dispatch_apply_attr_t attr); + +static void +_dispatch_apply_set_attr_behavior(dispatch_apply_attr_t _Nullable attr, size_t worker_index) +{ + if (!attr) { + return; + } + if (attr->per_cluster_parallelism > 0) { + _dispatch_attr_apply_cluster_set(worker_index, attr->per_cluster_parallelism); + } +} + +static void +_dispatch_apply_clear_attr_behavior(dispatch_apply_attr_t _Nullable attr, size_t worker_index) +{ + if (!attr) { + return; + } + if (attr->per_cluster_parallelism > 0) { + _dispatch_attr_apply_cluster_clear(worker_index, attr->per_cluster_parallelism); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_apply_destroy(dispatch_apply_t da) +{ +#if DISPATCH_INTROSPECTION + _dispatch_continuation_free(da->da_dc); +#endif + if (da->da_attr) { + dispatch_apply_attr_destroy(da->da_attr); + free(da->da_attr); + } + _dispatch_apply_free(da); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) @@ -33,12 +119,34 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) size_t const iter = da->da_iterations; size_t idx, done = 0; + /* workers start over time but never quit until the job is done, so + * we can allocate an index simply by incrementing + */ + uint32_t worker_index = 0; + worker_index = os_atomic_inc_orig2o(da, da_worker_index, relaxed); + + _dispatch_apply_set_attr_behavior(da->da_attr, worker_index); + idx = os_atomic_inc_orig2o(da, da_index, acquire); if (unlikely(idx >= iter)) goto out; - - // da_dc is only safe to access once the 'index lock' has been acquired - dispatch_apply_function_t const func = (void *)da->da_dc->dc_func; + /* + * da_dc is only safe to access once the 'index lock' has been acquired + * because it lives on the stack of the thread calling dispatch_apply. + * + * da lives until the last worker thread has finished (protected by + * da_thr_cnt), but da_dc only lives until the calling thread returns + * after the last work item is complete, which may be sooner than that. + * (In fact, the calling thread could do all the workitems itself and + * return before the worker threads even start.) + * + * Therefore the increment (reserving a valid workitem index from + * da_index) protects our access to da_dc. + * + * We also need an acquire barrier, and this is a good place to have one. + */ + dispatch_function_t const func = da->da_dc->dc_func; void *const da_ctxt = da->da_dc->dc_ctxt; + uintptr_t apply_flags = (uintptr_t)da->da_dc->dc_data; _dispatch_perfmon_workitem_dec(); // this unit executes many items @@ -52,7 +160,7 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) dispatch_thread_frame_s dtf; dispatch_priority_t old_dbp = 0; if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) { - dispatch_queue_t dq = da->da_dc->dc_data; + dispatch_queue_t dq = da->da_dc->dc_other; _dispatch_thread_frame_push(&dtf, dq); old_dbp = _dispatch_set_basepri(dq->dq_priority); } @@ -61,7 +169,13 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) // Striding is the responsibility of the caller. do { dispatch_invoke_with_autoreleasepool(flags, { - _dispatch_client_callout2(da_ctxt, idx, func); + if (apply_flags & DA_FLAG_APPLY) { + _dispatch_client_callout2(da_ctxt, idx, (dispatch_apply_function_t)func); + } else if (apply_flags & DA_FLAG_APPLY_WITH_ATTR) { + _dispatch_client_callout3_a(da_ctxt, idx, worker_index, (dispatch_apply_attr_function_t)func); + } else { + DISPATCH_INTERNAL_CRASH(apply_flags, "apply continuation has invalid flags"); + } _dispatch_perfmon_workitem_inc(); done++; idx = os_atomic_inc_orig2o(da, da_index, relaxed); @@ -75,21 +189,21 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) _dispatch_thread_context_pop(&apply_ctxt); - // The thread that finished the last workitem wakes up the possibly waiting - // thread that called dispatch_apply. They could be one and the same. - if (!os_atomic_sub2o(da, da_todo, done, release)) { + /* The thread that finished the last workitem wakes up the possibly waiting + * thread that called dispatch_apply. They could be one and the same. + */ + if (os_atomic_sub2o(da, da_todo, done, release) == 0) { _dispatch_thread_event_signal(&da->da_event); } out: + _dispatch_apply_clear_attr_behavior(da->da_attr, worker_index); + if (invoke_flags & DISPATCH_APPLY_INVOKE_WAIT) { _dispatch_thread_event_wait(&da->da_event); _dispatch_thread_event_destroy(&da->da_event); } if (os_atomic_dec2o(da, da_thr_cnt, release) == 0) { -#if DISPATCH_INTROSPECTION - _dispatch_continuation_free(da->da_dc); -#endif - _dispatch_continuation_free((dispatch_continuation_t)da); + _dispatch_apply_destroy(da); } } @@ -138,19 +252,25 @@ _dispatch_apply_serial(void *ctxt) dispatch_invoke_flags_t flags; size_t idx = 0; + // no need yet for _set_attr_behavior() for serial applies _dispatch_perfmon_workitem_dec(); // this unit executes many items - flags = _dispatch_apply_autorelease_frequency(dc->dc_data); + flags = _dispatch_apply_autorelease_frequency(dc->dc_other); do { dispatch_invoke_with_autoreleasepool(flags, { - _dispatch_client_callout2(dc->dc_ctxt, idx, (void*)dc->dc_func); + if ((uintptr_t)dc->dc_data & DA_FLAG_APPLY) { + _dispatch_client_callout2(dc->dc_ctxt, idx, (dispatch_apply_function_t)dc->dc_func); + } else if ((uintptr_t)dc->dc_data & DA_FLAG_APPLY_WITH_ATTR) { + // when running serially, the only worker is worker number 0 + _dispatch_client_callout3_a(dc->dc_ctxt, idx, 0, (dispatch_apply_attr_function_t)dc->dc_func); + } else { + DISPATCH_INTERNAL_CRASH(dc->dc_data, "apply continuation has invalid flags"); + } + _dispatch_perfmon_workitem_inc(); }); } while (++idx < iter); -#if DISPATCH_INTROSPECTION - _dispatch_continuation_free(da->da_dc); -#endif - _dispatch_continuation_free((dispatch_continuation_t)da); + _dispatch_apply_destroy(da); } DISPATCH_ALWAYS_INLINE @@ -234,7 +354,7 @@ _dispatch_apply_redirect(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; int32_t da_width = da->da_thr_cnt - 1; - dispatch_queue_t top_dq = da->da_dc->dc_data, dq = top_dq; + dispatch_queue_t top_dq = da->da_dc->dc_other, dq = top_dq; do { int32_t width = _dispatch_queue_try_reserve_apply_width(dq, da_width); @@ -249,9 +369,10 @@ _dispatch_apply_redirect(void *ctxt) da->da_thr_cnt -= excess; } if (!da->da_flags) { - // find first queue in descending target queue order that has - // an autorelease frequency set, and use that as the frequency for - // this continuation. + /* find first queue in descending target queue order that has + * an autorelease frequency set, and use that as the frequency for + * this continuation. + */ da->da_flags = _dispatch_queue_autorelease_frequency(dq); } dq = dq->do_targetq; @@ -267,29 +388,101 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_queue_global_t _dispatch_apply_root_queue(dispatch_queue_t dq) { + dispatch_queue_t tq = NULL; + if (dq) { while (unlikely(dq->do_targetq)) { - dq = dq->do_targetq; - } - // if the current root queue is a pthread root queue, select it - if (!_dispatch_is_in_root_queues_array(dq)) { - return upcast(dq)._dgq; + tq = dq->do_targetq; + + // If the current root is a custom pri workloop, select it. We have + // to this check here because custom pri workloops have a fake + // bottom targetq. + if (_dispatch_is_custom_pri_workloop(dq)) { + return upcast(dq)._dgq; + } + + dq = tq; } } + // if the current root queue is a pthread root queue, select it + if (dq && !_dispatch_is_in_root_queues_array(dq)) { + return upcast(dq)._dgq; + } + pthread_priority_t pp = _dispatch_get_priority(); dispatch_qos_t qos = _dispatch_qos_from_pp(pp); - return _dispatch_get_root_queue(qos ? qos : DISPATCH_QOS_DEFAULT, false); + return _dispatch_get_root_queue(qos ? qos : DISPATCH_QOS_DEFAULT, 0); } -DISPATCH_NOINLINE -void -dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, - void (*func)(void *, size_t)) +DISPATCH_ALWAYS_INLINE +static inline size_t +_dispatch_apply_calc_thread_count_for_cluster(dispatch_apply_attr_t _Nullable attr, dispatch_qos_t qos) +{ + size_t cluster_max = SIZE_MAX; + if (attr && attr->per_cluster_parallelism > 0) { + uint32_t rc = _dispatch_cluster_max_parallelism(qos); + if (likely(rc > 0)) { + cluster_max = rc * (uint32_t) (attr->per_cluster_parallelism); + } else { + /* if there's no cluster resource parallelism, then our return value + * is 0 which means "attr is a meaningless request" + */ + cluster_max = 0; + } + } + return cluster_max; +} + +DISPATCH_ALWAYS_INLINE +static inline size_t +_dispatch_apply_calc_thread_count(dispatch_apply_attr_t _Nullable attr, size_t nested, dispatch_qos_t qos, bool active) +{ + if (attr && !_dispatch_attr_is_initialized(attr)) { + DISPATCH_CLIENT_CRASH(attr, "dispatch_apply_attr not initialized using dispatch_apply_attr_init"); + } + + size_t thr_cnt = 0; + + if (likely(!attr)) { + /* Normal apply: Start with as many threads as the QOS class would + * allow. If we are nested inside another apply, account for the fact + * that it's calling us N times, so we need to use 1/Nth the threads + * we usually would, to stay under the useful parallelism limit. + */ + unsigned long flags = active ? DISPATCH_MAX_PARALLELISM_ACTIVE : 0; + thr_cnt = _dispatch_qos_max_parallelism(qos, flags); + if (unlikely(nested)) { + thr_cnt = nested < thr_cnt ? thr_cnt / nested : 1; + } + } else { + /* apply_with_attr: if we are already nested, just go serial. + * We should use the minimum of, the max allowed threads for this QOS + * level, and the max useful parallel workers based on the requested + * attributes (e.g. the number of cluster level resources). + */ + if (unlikely(nested)) { + thr_cnt = 1; + } else { + unsigned long flags = active ? DISPATCH_MAX_PARALLELISM_ACTIVE : 0; + size_t qos_max = _dispatch_qos_max_parallelism(qos, flags); + size_t cluster_max = _dispatch_apply_calc_thread_count_for_cluster(attr, qos); + thr_cnt = MIN(qos_max, cluster_max); + } + } + return thr_cnt; +} + +static void +_dispatch_apply_with_attr_f(size_t iterations, dispatch_apply_attr_t attr, + dispatch_queue_t _dq, void *ctxt, dispatch_function_t func, uintptr_t da_flags) { if (unlikely(iterations == 0)) { return; } + if (attr && !_dispatch_attr_is_initialized(attr)) { + DISPATCH_CLIENT_CRASH(attr, "dispatch_apply_attr not initialized using dispatch_apply_attr_init"); + } dispatch_thread_context_t dtctxt = _dispatch_thread_context_find(_dispatch_apply_key); size_t nested = dtctxt ? dtctxt->dtc_apply_nesting : 0; @@ -304,46 +497,92 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, dispatch_qos_t qos = _dispatch_priority_qos(dq->dq_priority) ?: _dispatch_priority_fallback_qos(dq->dq_priority); if (unlikely(dq->do_targetq)) { - // if the queue passed-in is not a root queue, use the current QoS - // since the caller participates in the work anyway + /* if the queue passed-in is not a root queue, use the current QoS + * since the caller participates in the work anyway + */ qos = _dispatch_qos_from_pp(_dispatch_get_priority()); } - int32_t thr_cnt = (int32_t)_dispatch_qos_max_parallelism(qos, - DISPATCH_MAX_PARALLELISM_ACTIVE); - if (likely(!nested)) { - nested = iterations; + size_t thr_cnt = _dispatch_apply_calc_thread_count(attr, nested, qos, true); + if (thr_cnt == 0) { + DISPATCH_CLIENT_CRASH(attr, "attribute's properties are invalid or meaningless on this system"); + } + + /* dispatch_apply's nesting behavior is a little complicated; it tries to + * account for the multiplicative effect of the applies above it to bring + * up just the right number of total threads. + * dispatch_apply_with_attr is much simpler: it just goes serial if it is + * nested at all, and it sets the nested TSD to the max value to indicate + * that we are already saturating the CPUs so any applies nested inside + * it will also go serial. + */ + size_t new_nested; + if (attr) { + new_nested = DISPATCH_APPLY_MAX; } else { - thr_cnt = nested < (size_t)thr_cnt ? thr_cnt / (int32_t)nested : 1; - nested = nested < DISPATCH_APPLY_MAX && iterations < DISPATCH_APPLY_MAX - ? nested * iterations : DISPATCH_APPLY_MAX; + if (likely(!nested)) { + new_nested = iterations; + } else { + /* DISPATCH_APPLY_MAX is sqrt(size_max) so we can do this + * multiplication without checking for overlow. The actual magnitude + * isn't important, it just needs to be >> ncpu. + */ + new_nested = nested < DISPATCH_APPLY_MAX && iterations < DISPATCH_APPLY_MAX + ? nested * iterations : DISPATCH_APPLY_MAX; + } } - if (iterations < (size_t)thr_cnt) { - thr_cnt = (int32_t)iterations; + + /* Notwithstanding any of the above, we should never try to start more + * threads than the number of work items. (The excess threads would have + * no work to do.) + */ + if (iterations < thr_cnt) { + thr_cnt = iterations; } + struct dispatch_continuation_s dc = { .dc_func = (void*)func, .dc_ctxt = ctxt, - .dc_data = dq, + .dc_other = dq, + .dc_data = (void *)da_flags, }; - dispatch_apply_t da = (__typeof__(da))_dispatch_continuation_alloc(); - da->da_index = 0; - da->da_todo = iterations; + dispatch_apply_t da = _dispatch_apply_alloc(); + os_atomic_init(&da->da_index, 0); + os_atomic_init(&da->da_todo, iterations); da->da_iterations = iterations; - da->da_nested = nested; - da->da_thr_cnt = thr_cnt; + da->da_nested = new_nested; + da->da_thr_cnt = (int32_t)thr_cnt; + os_atomic_init(&da->da_worker_index, 0); + _dispatch_apply_da_copy_attr(da, attr); #if DISPATCH_INTROSPECTION da->da_dc = _dispatch_continuation_alloc(); - *da->da_dc = dc; + da->da_dc->dc_func = (void *) dc.dc_func; + da->da_dc->dc_ctxt = dc.dc_ctxt; + da->da_dc->dc_other = dc.dc_other; + da->da_dc->dc_data = dc.dc_data; + da->da_dc->dc_flags = DC_FLAG_ALLOCATED; #else da->da_dc = &dc; #endif da->da_flags = 0; + if (unlikely(_dispatch_is_custom_pri_workloop(dq))) { + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); + + if (_dq_state_drain_locked_by_self(dq_state)) { + // We're already draining on the custom priority workloop, don't go + // wide, just call inline serially + return _dispatch_apply_serial(da); + } else { + return dispatch_async_and_wait_f(dq, da, _dispatch_apply_serial); + } + } + if (unlikely(dq->dq_width == 1 || thr_cnt <= 1)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } + if (unlikely(dq->do_targetq)) { if (unlikely(dq == old_dq)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); @@ -358,6 +597,21 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, _dispatch_thread_frame_pop(&dtf); } +DISPATCH_NOINLINE +void +dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, + void (*func)(void *, size_t)) +{ + _dispatch_apply_with_attr_f(iterations, NULL, _dq, ctxt, (dispatch_function_t)func, DA_FLAG_APPLY); +} + +void +dispatch_apply_with_attr_f(size_t iterations, dispatch_apply_attr_t attr, void *ctxt, + void (*func)(void *, size_t, size_t)) +{ + _dispatch_apply_with_attr_f(iterations, attr, DISPATCH_APPLY_AUTO, ctxt, (dispatch_function_t)func, DA_FLAG_APPLY_WITH_ATTR); +} + #ifdef __BLOCKS__ void dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) @@ -365,4 +619,117 @@ dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) dispatch_apply_f(iterations, dq, work, (dispatch_apply_function_t)_dispatch_Block_invoke(work)); } + +void +dispatch_apply_with_attr(size_t iterations, dispatch_apply_attr_t attr, + void (^work)(size_t iteration, size_t worker_index)) +{ + dispatch_apply_with_attr_f(iterations, attr, work, + (dispatch_apply_attr_function_t)_dispatch_Block_invoke(work)); +} #endif + +static bool +_dispatch_attr_is_initialized(dispatch_apply_attr_t attr) +{ + return (attr->sig == DISPATCH_APPLY_ATTR_SIG) && (~(attr->guard) == (uintptr_t) attr); +} + +void +dispatch_apply_attr_init(dispatch_apply_attr_t _Nonnull attr) +{ + bzero(attr, sizeof(*attr)); + + attr->sig = DISPATCH_APPLY_ATTR_SIG; + attr->guard = ~ (uintptr_t) (attr); /* To prevent leaks from picking it up */ +} + +void +dispatch_apply_attr_destroy(dispatch_apply_attr_t _Nonnull attr) +{ + bzero(attr, sizeof(*attr)); +} + +static void +_dispatch_apply_da_copy_attr(dispatch_apply_t da, dispatch_apply_attr_t _Nullable src) +{ + if (src == NULL) { + da->da_attr = NULL; + return; + } + dispatch_apply_attr_t dst = _dispatch_calloc(1, sizeof(struct dispatch_apply_attr_s)); + dispatch_apply_attr_init(dst); + + dst->per_cluster_parallelism = src->per_cluster_parallelism; + dst->flags = src->flags; + // if there were non-POD types, we would manage them here + + da->da_attr = dst; +} + +static void +dispatch_apply_attr_set_per_cluster_parallelism(dispatch_apply_attr_t _Nonnull attr, + size_t threads_per_cluster) +{ + if (threads_per_cluster == 0) { + DISPATCH_CLIENT_CRASH(threads_per_cluster, "0 is an invalid threads_per_cluster value"); + } + if (threads_per_cluster > 1) { + DISPATCH_CLIENT_CRASH(threads_per_cluster, "Invalid threads_per_cluster value, only acceptable value is 1"); + } + + if (attr && !_dispatch_attr_is_initialized(attr)) { + DISPATCH_CLIENT_CRASH(attr, "dispatch_apply_attr not initialized using dispatch_apply_attr_init"); + } + + attr->per_cluster_parallelism = threads_per_cluster; +} + +void +dispatch_apply_attr_set_parallelism(dispatch_apply_attr_t _Nonnull attr, + dispatch_apply_attr_entity_t entity, size_t threads_per_entity) +{ + switch (entity) { + case DISPATCH_APPLY_ATTR_ENTITY_CPU: + if (threads_per_entity != 1) { + DISPATCH_CLIENT_CRASH(threads_per_entity, "Invalid threads_per_entity value for CPU entity"); + } + break; + case DISPATCH_APPLY_ATTR_ENTITY_CLUSTER: + return dispatch_apply_attr_set_per_cluster_parallelism(attr, threads_per_entity); + default: + DISPATCH_CLIENT_CRASH(entity, "Unknown entity"); + } +} + +size_t +dispatch_apply_attr_query(dispatch_apply_attr_t attr, + dispatch_apply_attr_query_t which, + dispatch_apply_attr_query_flags_t flags) +{ + dispatch_thread_context_t dtctxt = _dispatch_thread_context_find(_dispatch_apply_key); + size_t current_nested = dtctxt ? dtctxt->dtc_apply_nesting : 0; + dispatch_queue_t old_dq = _dispatch_queue_get_current(); + dispatch_queue_t dq = _dispatch_apply_root_queue(old_dq)->_as_dq; + dispatch_qos_t current_qos = _dispatch_priority_qos(dq->dq_priority) ?: _dispatch_priority_fallback_qos(dq->dq_priority); + + switch (which) { + case DISPATCH_APPLY_ATTR_QUERY_VALID: + return (dispatch_apply_attr_query(attr, DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS, flags) == 0 ? 0 : 1); + case DISPATCH_APPLY_ATTR_QUERY_LIKELY_WORKERS: + return _dispatch_apply_calc_thread_count(attr, current_nested, current_qos, true); + case DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS: + if (flags & DISPATCH_APPLY_ATTR_QUERY_FLAGS_MAX_CURRENT_SCOPE) { + return _dispatch_apply_calc_thread_count(attr, current_nested, current_qos, true); + } else { + /* we SHOULD pass DISPATCH_QOS_UNSPECIFIED - the intention is "at any + * possible QOS", more exactly, "at the QOS which has highest limits". + * bsdthread_ctl_qos_max_parallelism doesn't accept unspecified, + * though, so let's say USER_INTERACTIVE assuming the highest QOS + * will be the least limited one. + * + */ + return _dispatch_apply_calc_thread_count(attr, 0, DISPATCH_QOS_USER_INTERACTIVE, false); + } + } +} diff --git a/src/benchmark.c b/src/benchmark.c index 15e9f5535..259a67ca5 100644 --- a/src/benchmark.c +++ b/src/benchmark.c @@ -60,14 +60,14 @@ _dispatch_benchmark_init(void *context) } while (i < cnt); delta = _dispatch_uptime() - start; - lcost = delta; + lcost = (typeof(lcost)) delta; #if HAVE_MACH_ABSOLUTE_TIME lcost *= bdata->tbi.numer; lcost /= bdata->tbi.denom; #endif lcost /= cnt; - bdata->loop_cost = lcost > UINT64_MAX ? UINT64_MAX : (uint64_t)lcost; + bdata->loop_cost = (uint64_t) lcost > UINT64_MAX ? UINT64_MAX : (uint64_t)lcost; } #ifdef __BLOCKS__ @@ -113,7 +113,7 @@ dispatch_benchmark_f(size_t count, register void *ctxt, } while (i < count); delta = _dispatch_uptime() - start; - conversion = delta; + conversion = (typeof(conversion)) delta; #if HAVE_MACH_ABSOLUTE_TIME conversion *= bdata.tbi.numer; big_denom = bdata.tbi.denom; @@ -122,7 +122,7 @@ dispatch_benchmark_f(size_t count, register void *ctxt, #endif big_denom *= count; conversion /= big_denom; - ns = conversion > UINT64_MAX ? UINT64_MAX : (uint64_t)conversion; + ns = (uint64_t) conversion > UINT64_MAX ? UINT64_MAX : (uint64_t)conversion; return ns - bdata.loop_cost; } diff --git a/src/data_internal.h b/src/data_internal.h index 1589a793a..9ed12e13b 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -57,10 +57,10 @@ DISPATCH_CLASS_DECL(data, OBJECT); struct dispatch_data_s { #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA - const void *do_vtable; + const void *__ptrauth_objc_isa_pointer do_vtable; dispatch_queue_t do_targetq; void *ctxt; - void *finalizer; + dispatch_function_t DISPATCH_FUNCTION_POINTER finalizer; #else DISPATCH_OBJECT_HEADER(data); #endif // DISPATCH_DATA_IS_BRIDGED_TO_NSDATA diff --git a/src/event/event_config.h b/src/event/event_config.h index f221d0922..0b883b035 100644 --- a/src/event/event_config.h +++ b/src/event/event_config.h @@ -243,7 +243,7 @@ typedef unsigned int mach_msg_priority_t; # endif // MACH_RCV_SYNC_WAIT # define DISPATCH_MACH_TRAILER_SIZE sizeof(dispatch_mach_trailer_t) -# define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX +# define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_AV # define DISPATCH_MACH_RCV_OPTIONS ( \ MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \ diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index f31d13ee0..e3578a095 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -702,6 +702,12 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) } } +void +_dispatch_event_loop_ensure_ownership(dispatch_wlh_t wlh) +{ + (void)wlh; +} + void _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state, uint32_t flags) diff --git a/src/event/event_internal.h b/src/event/event_internal.h index 1fb1de38a..305cf931e 100644 --- a/src/event/event_internal.h +++ b/src/event/event_internal.h @@ -130,7 +130,7 @@ typedef uint32_t dispatch_unote_ident_t; #endif #define DISPATCH_UNOTE_CLASS_HEADER() \ - dispatch_source_type_t du_type; \ + dispatch_source_type_t __ptrauth_objc_isa_pointer du_type; \ uintptr_t du_owner_wref; /* "weak" back reference to the owner object */ \ os_atomic(dispatch_unote_state_t) du_state; \ dispatch_unote_ident_t du_ident; \ @@ -251,7 +251,7 @@ void dispatch_debug_machport(mach_port_t name, const char *str); // layout must match dispatch_source_refs_s struct dispatch_mach_recv_refs_s { DISPATCH_UNOTE_CLASS_HEADER(); - dispatch_mach_handler_function_t dmrr_handler_func; + dispatch_mach_handler_function_t DISPATCH_FUNCTION_POINTER dmrr_handler_func; void *dmrr_handler_ctxt; }; typedef struct dispatch_mach_recv_refs_s *dispatch_mach_recv_refs_t; @@ -456,6 +456,34 @@ _dispatch_set_return_to_kernel(void) _dispatch_thread_setspecific(dispatch_r2k_key, (void *)1); } +DISPATCH_ALWAYS_INLINE +static inline uintptr_t +_dispatch_get_quantum_expiry_action(void) +{ + return (uintptr_t) _dispatch_thread_getspecific(dispatch_quantum_key); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_ack_quantum_expiry_action(void) +{ + return _dispatch_thread_setspecific(dispatch_quantum_key, (void *) 0); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_set_current_dsc(void *dsc) +{ + return _dispatch_thread_setspecific(dispatch_dsc_key, dsc); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_clear_current_dsc(void) +{ + return _dispatch_thread_setspecific(dispatch_dsc_key, NULL); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_clear_return_to_kernel(void) @@ -676,6 +704,7 @@ void _dispatch_event_loop_wake_owner(struct dispatch_sync_context_s *dsc, dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state); void _dispatch_event_loop_wait_for_ownership( struct dispatch_sync_context_s *dsc); +void _dispatch_event_loop_ensure_ownership(dispatch_wlh_t wlh); void _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state, uint32_t flags); #if DISPATCH_WLH_DEBUG diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c index 4c17e0897..790d72408 100644 --- a/src/event/event_kevent.c +++ b/src/event/event_kevent.c @@ -559,8 +559,8 @@ _dispatch_kevent_drain(dispatch_kevent_t ke) // when the process exists but is a zombie. As a workaround, we // simulate an exit event for any EVFILT_PROC with an invalid pid. ke->flags = EV_UDATA_SPECIFIC | EV_ONESHOT | EV_DELETE; - ke->fflags = NOTE_EXIT; - ke->data = 0; + ke->fflags = NOTE_EXIT | NOTE_EXITSTATUS; + ke->data = 0; // Fake exit status _dispatch_kevent_debug("synthetic NOTE_EXIT", ke); } else { return _dispatch_kevent_print_error(ke); @@ -873,7 +873,6 @@ _dispatch_kq_unote_set_kevent(dispatch_unote_t _du, dispatch_kevent_t dk, du->du_priority), #endif }; - (void)pp; // if DISPATCH_USE_KEVENT_QOS == 0 } DISPATCH_ALWAYS_INLINE @@ -1299,14 +1298,13 @@ _dispatch_unote_unregister_direct(dispatch_unote_t du, uint32_t flags) enum { DISPATCH_WORKLOOP_ASYNC, DISPATCH_WORKLOOP_ASYNC_FROM_SYNC, - DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC, DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE, DISPATCH_WORKLOOP_ASYNC_LEAVE, DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC, DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER, - DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP, DISPATCH_WORKLOOP_RETARGET, + DISPATCH_WORKLOOP_SYNC_DISCOVER, DISPATCH_WORKLOOP_SYNC_WAIT, DISPATCH_WORKLOOP_SYNC_WAKE, DISPATCH_WORKLOOP_SYNC_FAKE, @@ -1316,17 +1314,16 @@ enum { static char const * const _dispatch_workloop_actions[] = { [DISPATCH_WORKLOOP_ASYNC] = "async", [DISPATCH_WORKLOOP_ASYNC_FROM_SYNC] = "async (from sync)", - [DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC] = "discover sync", [DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE] = "qos update", [DISPATCH_WORKLOOP_ASYNC_LEAVE] = "leave", [DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC] = "leave (from sync)", [DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER] = "leave (from transfer)", - [DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP] = "leave (forced)", [DISPATCH_WORKLOOP_RETARGET] = "retarget", + [DISPATCH_WORKLOOP_SYNC_DISCOVER] = "sync-discover", [DISPATCH_WORKLOOP_SYNC_WAIT] = "sync-wait", - [DISPATCH_WORKLOOP_SYNC_FAKE] = "sync-fake", [DISPATCH_WORKLOOP_SYNC_WAKE] = "sync-wake", + [DISPATCH_WORKLOOP_SYNC_FAKE] = "sync-fake", [DISPATCH_WORKLOOP_SYNC_END] = "sync-end", }; @@ -1405,6 +1402,11 @@ _dispatch_kevent_workloop_priority(dispatch_queue_t dq, int which, qos = DISPATCH_QOS_MAINTENANCE; } pthread_priority_t pp = _dispatch_qos_to_pp(qos); + + if (rq_pri & DISPATCH_PRIORITY_FLAG_COOPERATIVE) { + DISPATCH_INTERNAL_CRASH(rq_pri, "Waking up a kq with cooperative thread request is not supported"); + } + return pp | (rq_pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); } @@ -1421,11 +1423,8 @@ _dispatch_kq_fill_workloop_event(dispatch_kevent_t ke, int which, uint16_t action = 0; switch (which) { - case DISPATCH_WORKLOOP_ASYNC_FROM_SYNC: - fflags |= NOTE_WL_END_OWNERSHIP; - /* FALLTHROUGH */ case DISPATCH_WORKLOOP_ASYNC: - case DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC: + case DISPATCH_WORKLOOP_ASYNC_FROM_SYNC: case DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE: dispatch_assert(_dq_state_is_base_wlh(dq_state)); dispatch_assert(_dq_state_is_enqueued_on_target(dq_state)); @@ -1433,21 +1432,16 @@ _dispatch_kq_fill_workloop_event(dispatch_kevent_t ke, int which, mask |= DISPATCH_QUEUE_ROLE_MASK; mask |= DISPATCH_QUEUE_ENQUEUED; mask |= DISPATCH_QUEUE_MAX_QOS_MASK; - if (which == DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC) { - dispatch_assert(!_dq_state_in_sync_transfer(dq_state)); - dispatch_assert(_dq_state_drain_locked(dq_state)); - mask |= DISPATCH_QUEUE_SYNC_TRANSFER; + fflags |= NOTE_WL_IGNORE_ESTALE; + fflags |= NOTE_WL_UPDATE_QOS; + if (_dq_state_in_uncontended_sync(dq_state)) { fflags |= NOTE_WL_DISCOVER_OWNER; - } else { - fflags |= NOTE_WL_IGNORE_ESTALE; + mask |= DISPATCH_QUEUE_UNCONTENDED_SYNC; } - fflags |= NOTE_WL_UPDATE_QOS; pp = _dispatch_kevent_workloop_priority(dq, which, qos); break; case DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC: - fflags |= NOTE_WL_END_OWNERSHIP; - /* FALLTHROUGH */ case DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER: fflags |= NOTE_WL_IGNORE_ESTALE; /* FALLTHROUGH */ @@ -1457,18 +1451,6 @@ _dispatch_kq_fill_workloop_event(dispatch_kevent_t ke, int which, mask |= DISPATCH_QUEUE_ENQUEUED; break; - case DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP: - // 0 is never a valid queue state, so the knote attach will fail due to - // the debounce. However, NOTE_WL_END_OWNERSHIP is always observed even - // when ESTALE is returned, which is the side effect we're after here. - fflags |= NOTE_WL_END_OWNERSHIP; - fflags |= NOTE_WL_IGNORE_ESTALE; - action = EV_ADD | EV_ENABLE; - mask = ~0ull; - dq_state = 0; - pp = _dispatch_kevent_workloop_priority(dq, which, qos); - break; - case DISPATCH_WORKLOOP_RETARGET: action = EV_ADD | EV_DELETE | EV_ENABLE; fflags |= NOTE_WL_END_OWNERSHIP; @@ -1520,6 +1502,16 @@ _dispatch_kq_fill_workloop_sync_event(dispatch_kevent_t ke, int which, uint16_t action = 0; switch (which) { + case DISPATCH_WORKLOOP_SYNC_DISCOVER: + dispatch_assert(_dq_state_received_sync_wait(dq_state)); + dispatch_assert(_dq_state_in_uncontended_sync(dq_state)); + action = EV_ADD | EV_DISABLE; + fflags = NOTE_WL_SYNC_WAKE | NOTE_WL_DISCOVER_OWNER | + NOTE_WL_IGNORE_ESTALE; + mask = DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | + DISPATCH_QUEUE_UNCONTENDED_SYNC; + break; + case DISPATCH_WORKLOOP_SYNC_WAIT: action = EV_ADD | EV_DISABLE; fflags = NOTE_WL_SYNC_WAIT; @@ -1527,10 +1519,6 @@ _dispatch_kq_fill_workloop_sync_event(dispatch_kevent_t ke, int which, if (_dispatch_qos_from_pp(pp) == 0) { pp = _dispatch_qos_to_pp(DISPATCH_QOS_DEFAULT); } - if (_dq_state_received_sync_wait(dq_state)) { - fflags |= NOTE_WL_DISCOVER_OWNER; - mask = DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; - } break; case DISPATCH_WORKLOOP_SYNC_FAKE: @@ -1653,9 +1641,6 @@ _dispatch_event_loop_get_action_for_state(uint64_t dq_state) if (!_dq_state_drain_locked(dq_state)) { return DISPATCH_WORKLOOP_ASYNC; } - if (!_dq_state_in_sync_transfer(dq_state)) { - return DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC; - } return DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE; } @@ -1729,42 +1714,11 @@ _dispatch_kevent_workloop_poke(dispatch_wlh_t wlh, uint64_t dq_state, dispatch_assert(_dq_state_is_enqueued_on_target(dq_state)); dispatch_assert(!_dq_state_is_enqueued_on_manager(dq_state)); action = _dispatch_event_loop_get_action_for_state(dq_state); -override: _dispatch_kq_fill_workloop_event(&ke, action, wlh, dq_state); if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { - _dispatch_kevent_workloop_drain_error(&ke, - DISPATCH_KEVENT_WORKLOOP_ALLOW_ESTALE); - dispatch_assert(action == DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC); - dq_state = ke.ext[EV_EXTIDX_WL_VALUE]; - // - // There are 4 things that can cause an ESTALE for DISCOVER_SYNC: - // - the queue role changed, we don't want to redrive - // - the queue is no longer enqueued, we don't want to redrive - // - the max QoS changed, whoever changed it is doing the same - // transition, so we don't need to redrive - // - the DISPATCH_QUEUE_IN_SYNC_TRANFER bit got set - // - // The interesting case is the last one, and will only happen in the - // following chain of events: - // 1. uncontended dispatch_sync() - // 2. contended dispatch_sync() - // 3. contended dispatch_async() - // - // And this code is running because of (3). It is possible that (1) - // hands off to (2) while this call is being made, causing the - // DISPATCH_QUEUE_IN_TRANSFER_SYNC to be set, and we don't need to tell - // the kernel about the owner anymore. However, the async in that case - // will have set a QoS on the queue (since dispatch_sync()s don't but - // dispatch_async()s always do), and we need to redrive to tell it - // to the kernel. - // - if (_dq_state_is_base_wlh(dq_state) && - _dq_state_is_enqueued_on_target(dq_state) && - _dq_state_in_sync_transfer(dq_state)) { - action = DISPATCH_WORKLOOP_ASYNC; - goto override; - } + _dispatch_kevent_workloop_drain_error(&ke, 0); + __builtin_unreachable(); } if (!(flags & DISPATCH_EVENT_LOOP_OVERRIDE)) { @@ -2043,11 +1997,25 @@ _dispatch_event_loop_leave_deferred(dispatch_deferred_items_t ddi, uint64_t dq_state) { #if DISPATCH_USE_KEVENT_WORKLOOP + if (_dq_state_received_sync_wait(dq_state)) { + dispatch_tid tid = _dq_state_drain_owner(dq_state); + int slot = _dispatch_kq_deferred_find_slot(ddi, EVFILT_WORKLOOP, + (uint64_t)ddi->ddi_wlh, tid); + if (slot == ddi->ddi_nevents) { + dispatch_assert(slot < DISPATCH_DEFERRED_ITEMS_EVENT_COUNT); + ddi->ddi_nevents++; + } + _dispatch_kq_fill_workloop_sync_event(&ddi->ddi_eventlist[slot], + DISPATCH_WORKLOOP_SYNC_DISCOVER, ddi->ddi_wlh, + dq_state, _dq_state_drain_owner(dq_state)); + } + int action = _dispatch_event_loop_get_action_for_state(dq_state); dispatch_assert(ddi->ddi_wlh_needs_delete); ddi->ddi_wlh_needs_delete = false; ddi->ddi_wlh_needs_update = false; _dispatch_kq_fill_ddi_workloop_event(ddi, action, ddi->ddi_wlh, dq_state); + #else (void)ddi; (void)dq_state; #endif // DISPATCH_USE_KEVENT_WORKLOOP @@ -2061,11 +2029,24 @@ _dispatch_event_loop_cancel_waiter(dispatch_sync_context_t dsc) uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; dispatch_kevent_s ke; +again: _dispatch_kq_fill_workloop_sync_event(&ke, DISPATCH_WORKLOOP_SYNC_END, wlh, 0, dsc->dsc_waiter); if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { _dispatch_kevent_workloop_drain_error(&ke, dsc->dsc_waiter_needs_cancel ? 0 : DISPATCH_KEVENT_WORKLOOP_ALLOW_ENOENT); + // + // quick hack for 78288114 + // + // something with DISPATCH_WORKLOOP_SYNC_FAKE is not quite right + // we can at least make the thread in the way finish the syscall + // it's trying to make with directed handoffs. + // + // it's inefficient but doesn't have a priority inversion. + // + _dispatch_preemption_yield_to(dsc->dsc_waiter, 1); + goto again; + // // Our deletion attempt is opportunistic as in most cases we will find // the matching knote and break the waiter out. @@ -2099,6 +2080,7 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, int action, n = 0; dispatch_assert(_dq_state_drain_locked_by(new_state, dsc->dsc_waiter)); + dispatch_assert(!dsc->dsc_wlh_self_wakeup); if (wlh != DISPATCH_WLH_ANON && ddi && ddi->ddi_wlh == wlh) { dispatch_assert(ddi->ddi_wlh_needs_delete); @@ -2107,8 +2089,8 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, if (wlh == waiter_wlh) { // async -> sync handoff dispatch_assert(_dq_state_is_enqueued_on_target(old_state)); - dispatch_assert(!_dq_state_in_sync_transfer(old_state)); - dispatch_assert(_dq_state_in_sync_transfer(new_state)); + dispatch_assert(!_dq_state_in_uncontended_sync(old_state)); + dispatch_assert(!_dq_state_in_uncontended_sync(new_state)); if (_dq_state_is_enqueued_on_target(new_state)) { action = DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE; @@ -2131,7 +2113,7 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { dispatch_assert(_dq_state_is_enqueued_on_target(old_state)); - dispatch_assert(_dq_state_in_sync_transfer(new_state)); + dispatch_assert(!_dq_state_in_uncontended_sync(new_state)); // During the handoff, the waiter noticed there was no work *after* // that last work item, so we want to kill the thread request while // there's an owner around to avoid races betwen knote_process() and @@ -2139,7 +2121,7 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER, wlh, new_state); } - if (_dq_state_in_sync_transfer(new_state)) { + if (_dq_state_is_base_wlh(new_state)) { // Even when waiter_wlh != wlh we can pretend we got woken up // which is a knote we will be able to delete later with a SYNC_END. // This allows rectifying incorrect ownership sooner, and also happens @@ -2147,10 +2129,13 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, _dispatch_kq_fill_workloop_sync_event(&ke[n++], DISPATCH_WORKLOOP_SYNC_WAKE, wlh, new_state, dsc->dsc_waiter); } - if (_dq_state_in_sync_transfer(old_state)) { + if (!dsc->dsc_from_async && _dq_state_is_base_wlh(old_state) && + !_dq_state_in_uncontended_sync(old_state)) { + // Note: when coming from dispatch_resume despite having work items + // the caller has an "uncontended sync" ownership dispatch_tid tid = _dispatch_tid_self(); _dispatch_kq_fill_workloop_sync_event(&ke[n++], - DISPATCH_WORKLOOP_SYNC_END, wlh, new_state, tid); + DISPATCH_WORKLOOP_SYNC_END, wlh, old_state, tid); } // // Past this call it is not safe to look at `wlh` anymore as the callers @@ -2212,6 +2197,10 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) // _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC, wlh, dq_state); + } else if (_dq_state_received_sync_wait(dq_state)) { + _dispatch_kq_fill_workloop_sync_event(&ke[n++], + DISPATCH_WORKLOOP_SYNC_DISCOVER, wlh, dq_state, + _dq_state_drain_owner(dq_state)); } again: @@ -2221,8 +2210,7 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) for (i = 0; i < n; i++) { long flags = 0; if (ke[i].fflags & NOTE_WL_SYNC_WAIT) { - flags = DISPATCH_KEVENT_WORKLOOP_ALLOW_EINTR | - DISPATCH_KEVENT_WORKLOOP_ALLOW_ESTALE; + flags = DISPATCH_KEVENT_WORKLOOP_ALLOW_EINTR; } _dispatch_kevent_workloop_drain_error(&ke[i], flags); } @@ -2243,6 +2231,25 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) } } +void +_dispatch_event_loop_ensure_ownership(dispatch_wlh_t wlh) +{ +#if DISPATCH_USE_KEVENT_WORKLOOP + uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; + dispatch_tid tid = _dispatch_tid_self(); + dispatch_kevent_s ke; + + _dispatch_kq_fill_workloop_sync_event(&ke, DISPATCH_WORKLOOP_SYNC_WAKE, + wlh, tid, tid); + if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { + _dispatch_kevent_workloop_drain_error(&ke, 0); + __builtin_unreachable(); + } +#else + (void)wlh; +#endif +} + void _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state, uint32_t flags) @@ -2250,7 +2257,6 @@ _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, #if DISPATCH_USE_KEVENT_WORKLOOP uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; dispatch_kevent_s ke[2]; - bool needs_forceful_end_ownership = false; int n = 0; dispatch_assert(_dq_state_is_base_wlh(new_state)); @@ -2258,50 +2264,15 @@ _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC_FROM_SYNC, wlh, new_state); } else if (_dq_state_is_enqueued_on_target(old_state)) { - // - // Because the thread request knote may not - // have made it, DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC may silently - // turn into a no-op. - // - // However, the kernel may know about our ownership anyway, so we need - // to make sure it is forcefully ended. - // - needs_forceful_end_ownership = true; dispatch_assert(_dq_state_is_suspended(new_state)); _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC, wlh, new_state); - } else if (_dq_state_received_sync_wait(old_state)) { - // - // This case happens when the current workloop got waited on by some - // thread calling _dispatch_event_loop_wait_for_ownership. - // - // When the workloop became IDLE, it didn't find the sync waiter - // continuation, didn't have a thread request to cancel either, and so - // we need the kernel to forget about the current thread ownership - // of the workloop. - // - // To forget this ownership, we create a fake WAKE knote that can not - // coalesce with any meaningful one, just so that we can EV_DELETE it - // with the NOTE_WL_END_OWNERSHIP. - // - // This is a gross hack, but this will really only ever happen for - // cases where a sync waiter started to wait on a workloop, but his part - // of the graph got mutated and retargeted onto a different workloop. - // In doing so, that sync waiter has snitched to the kernel about - // ownership, and the workloop he's bogusly waiting on will go through - // this codepath. - // - needs_forceful_end_ownership = true; } - if (_dq_state_in_sync_transfer(old_state)) { + if (!_dq_state_in_uncontended_sync(old_state)) { dispatch_tid tid = _dispatch_tid_self(); _dispatch_kq_fill_workloop_sync_event(&ke[n++], DISPATCH_WORKLOOP_SYNC_END, wlh, new_state, tid); - } else if (needs_forceful_end_ownership) { - kev_flags |= KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST; - _dispatch_kq_fill_workloop_event(&ke[n++], - DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP, wlh, new_state); } if (_dispatch_kq_poll(wlh, ke, n, ke, n, NULL, NULL, kev_flags)) { @@ -2482,6 +2453,9 @@ const dispatch_source_type_s _dispatch_source_type_vfs = { #if HAVE_DECL_VQ_VERYLOWDISK |VQ_VERYLOWDISK #endif +#if HAVE_DECL_VQ_SERVEREVENT + |VQ_SERVEREVENT +#endif #if HAVE_DECL_VQ_QUOTA |VQ_QUOTA #endif @@ -2594,6 +2568,8 @@ _dispatch_memorypressure_handler(void *context) } } +DISPATCH_STATIC_GLOBAL(dispatch_source_t _dispatch_memorypressure_source); + static void _dispatch_memorypressure_init(void) { @@ -2602,6 +2578,7 @@ _dispatch_memorypressure_init(void) DISPATCH_MEMORYPRESSURE_SOURCE_MASK, _dispatch_mgr_q._as_dq); dispatch_set_context(ds, ds); dispatch_source_set_event_handler_f(ds, _dispatch_memorypressure_handler); + _dispatch_memorypressure_source = ds; dispatch_activate(ds); } #endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE @@ -2615,7 +2592,7 @@ _dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED) if (!e) return; _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY); if (_dispatch_ios_simulator_memory_warnings_fd == -1) { - (void)dispatch_assume_zero(errno); + DISPATCH_INTERNAL_CRASH(errno, "Failed to create fd to simulator memory pressure file"); } } @@ -2632,8 +2609,12 @@ _dispatch_source_memorypressure_create(dispatch_source_type_t dst, dst = &_dispatch_source_type_vnode; handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd; + if (handle < 0) { + return DISPATCH_UNOTE_NULL; + } mask = NOTE_ATTRIB; + dispatch_unote_t du = dux_create(dst, handle, mask); if (du._du) { du._du->du_memorypressure_override = true; @@ -2702,6 +2683,7 @@ static void _dispatch_mach_host_notify_update(void *context); DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mach_notify_port_pred); DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mach_calendar_pred); DISPATCH_STATIC_GLOBAL(mach_port_t _dispatch_mach_notify_port); +DISPATCH_STATIC_GLOBAL(dispatch_unote_t _dispatch_mach_notify_unote); static void _dispatch_timers_calendar_change(void) @@ -2811,6 +2793,7 @@ _dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED) dispatch_assume(_dispatch_unote_register(du, DISPATCH_WLH_ANON, DISPATCH_PRIORITY_FLAG_MANAGER)); + _dispatch_mach_notify_unote = du; } static void @@ -3303,14 +3286,18 @@ const dispatch_source_type_s _dispatch_mach_type_recv = { DISPATCH_NORETURN static void _dispatch_mach_reply_merge_evt(dispatch_unote_t du DISPATCH_UNUSED, - uint32_t flags, uintptr_t data DISPATCH_UNUSED, + uint32_t flags, uintptr_t data, pthread_priority_t pp DISPATCH_UNUSED) { if (flags & EV_VANISHED) { DISPATCH_CLIENT_CRASH(0, "Unexpected EV_VANISHED (do not destroy random mach ports)"); } - DISPATCH_INTERNAL_CRASH(flags, "Unexpected event"); +#if __LP64__ + data = (uintptr_t)(kern_return_t)data; + data |= (uintptr_t)flags << 32; +#endif + DISPATCH_INTERNAL_CRASH(data, "Unexpected event"); } const dispatch_source_type_s _dispatch_mach_type_reply = { diff --git a/src/event/workqueue.c b/src/event/workqueue.c index 28f167517..afc82c02a 100644 --- a/src/event/workqueue.c +++ b/src/event/workqueue.c @@ -243,7 +243,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED) int i, target_runnable = (int)dispatch_hw_config(active_cpus); foreach_qos_bucket_reverse(i) { dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i]; - mon->dq = _dispatch_get_root_queue(DISPATCH_QOS_FOR_BUCKET(i), false); + mon->dq = _dispatch_get_root_queue(DISPATCH_QOS_FOR_BUCKET(i), 0); void *buf = _dispatch_calloc(WORKQ_MAX_TRACKED_TIDS, sizeof(dispatch_tid)); mon->registered_tids = buf; mon->target_runnable = target_runnable; diff --git a/src/eventlink.c b/src/eventlink.c new file mode 100644 index 000000000..ffba90002 --- /dev/null +++ b/src/eventlink.c @@ -0,0 +1,555 @@ +/* + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#include + +#if OS_EVENTLINK_USE_MACH_EVENTLINK + +OS_OBJECT_CLASS_DECL(os_eventlink); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(os_eventlink, + (void (*)(_os_object_t))_os_eventlink_xref_dispose, + (void (*)(_os_object_t))_os_eventlink_dispose); +#endif // USE_OBJC +#define EVENTLINK_CLASS OS_OBJECT_VTABLE(os_eventlink) + +/* Convenience macros for accessing into the struct os_eventlink_s */ +#define ev_local_port port_pair.pair[0] +#define ev_remote_port port_pair.pair[1] +#define ev_port_pair port_pair.desc + +#pragma mark Internal functions + +void +_os_eventlink_xref_dispose(os_eventlink_t ev) { + return _os_object_release_internal(ev->_as_os_obj); +} + +void +_os_eventlink_dispose(os_eventlink_t ev) { + if (ev->ev_state & OS_EVENTLINK_LABEL_NEEDS_FREE) { + free((void *) ev->name); + } + + if (MACH_PORT_VALID(ev->ev_local_port)) { + mach_port_deallocate(mach_task_self(), ev->ev_local_port); + } + if (MACH_PORT_VALID(ev->ev_remote_port)) { + mach_port_deallocate(mach_task_self(), ev->ev_remote_port); + } +} + +static inline os_eventlink_t +_os_eventlink_create_internal(const char *name) +{ + os_eventlink_t ev = NULL; + ev = (os_eventlink_t) _os_object_alloc(EVENTLINK_CLASS, + sizeof(struct os_eventlink_s)); + if (ev == NULL) { + errno = ENOMEM; + return NULL; + } + + if (name) { + const char *tmp = _dispatch_strdup_if_mutable(name); + if (tmp != name) { + ev->ev_state |= OS_EVENTLINK_LABEL_NEEDS_FREE; + } + ev->name = tmp; + } + + return ev; +} + +static inline int +_mach_error_to_errno(kern_return_t kr) +{ + int ret = 0; + + switch (kr) { + case KERN_NAME_EXISTS: + ret = EALREADY; + break; + case KERN_INVALID_ARGUMENT: + ret = EINVAL; + break; + case KERN_OPERATION_TIMED_OUT: + ret = ETIMEDOUT; + break; + case KERN_INVALID_NAME: + /* This is most likely due to waiting on a cancelled eventlink but also + * possible to hit this if there is a bug and a double free of the port. */ + case KERN_TERMINATED: /* Other side died */ + ret = ECANCELED; + break; + case KERN_ABORTED: + ret = ECONNABORTED; + break; + case KERN_SUCCESS: + ret = 0; + break; + default: + return -1; + } + + errno = ret; + return ret; +} + +static uint64_t +_os_clockid_normalize_to_machabs(os_clockid_t inclock, uint64_t intimeout) +{ + uint64_t timeout = 0; + + switch (inclock) { + case OS_CLOCK_MACH_ABSOLUTE_TIME: + timeout = intimeout; + break; + } + + return timeout; +} + +static int +os_eventlink_wait_until_internal(os_eventlink_t ev, os_clockid_t clock, + uint64_t deadline, uint64_t *signals_consumed_out) +{ + int ret = 0; + os_assert(clock == OS_CLOCK_MACH_ABSOLUTE_TIME); + + // These checks are racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + kern_return_t kr = KERN_SUCCESS; + uint64_t count_to_exceed = ev->local_count; + + kr = mach_eventlink_wait_until(ev->ev_local_port, &ev->local_count, + MELSW_OPTION_NONE, KERN_CLOCK_MACH_ABSOLUTE_TIME, deadline); + if (kr == KERN_SUCCESS && (signals_consumed_out != NULL)) { + *signals_consumed_out = ev->local_count - count_to_exceed; + } else if (kr == KERN_INVALID_NAME) { + /* This means that the eventlink got cancelled after the cancel check + * above but before we waited --> assert that that is indeed the case */ + os_assert(_os_eventlink_is_cancelled(ev->ev_state)); + } + + return _mach_error_to_errno(kr); +} + +static int +os_eventlink_signal_and_wait_until_internal(os_eventlink_t ev, os_clockid_t clock, + uint64_t deadline, uint64_t * _Nullable signals_consumed_out) +{ + int ret = 0; + kern_return_t kr = KERN_SUCCESS; + os_assert(clock == OS_CLOCK_MACH_ABSOLUTE_TIME); + + // These checks are racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + uint64_t count_to_exceed = ev->local_count; + kr = mach_eventlink_signal_wait_until(ev->ev_local_port, &ev->local_count, 0, + MELSW_OPTION_NONE, KERN_CLOCK_MACH_ABSOLUTE_TIME, deadline); + + if (kr == KERN_SUCCESS && (signals_consumed_out != NULL)) { + *signals_consumed_out = ev->local_count - count_to_exceed; + } else if (kr == KERN_INVALID_NAME) { + /* This means that the eventlink got cancelled after the cancel check + * above but before we signal and waited --> assert that that is indeed + * the case */ + os_assert(_os_eventlink_is_cancelled(ev->ev_state)); + } + + return _mach_error_to_errno(kr); +} + + +#pragma mark Private functions + +os_eventlink_t +os_eventlink_create(const char *name) +{ + return _os_eventlink_create_internal(name); +} + +int +os_eventlink_activate(os_eventlink_t ev) +{ + int ret = 0; + + // These checks are racy but allow us to shortcircuit before we make the syscall + if (MACH_PORT_VALID(ev->ev_local_port)) { + return ret; + } + + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + struct os_eventlink_s tmp_ev; + bzero(&tmp_ev, sizeof(tmp_ev)); + + kern_return_t kr = mach_eventlink_create(mach_task_self(), MELC_OPTION_NO_COPYIN, &tmp_ev.ev_local_port); + if (kr == KERN_SUCCESS) { + // Only atomically store the new ports if we have + // EVENTLINK_INACTIVE_PORT there. The only reason this would fail is + // cause it was concurrently activated. + uint64_t dummy; + bool success = os_atomic_cmpxchgv(&ev->ev_port_pair, EVENTLINK_INACTIVE_PORT, tmp_ev.ev_port_pair, &dummy, relaxed); + if (!success) { + // tmp_ev still has valid ports that need to be released + if (MACH_PORT_VALID(tmp_ev.ev_local_port)) { + mach_port_deallocate(mach_task_self(), tmp_ev.ev_local_port); + } + if (MACH_PORT_VALID(tmp_ev.ev_remote_port)) { + mach_port_deallocate(mach_task_self(), tmp_ev.ev_remote_port); + } + return EINVAL; + } + } + + return _mach_error_to_errno(kr); +} + +int +os_eventlink_extract_remote_port(os_eventlink_t ev, mach_port_t *port_out) +{ + int ret = 0; + + // These checks are racy but allows us to shortcircuit and give the right + // errors + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + /* We're giving away our +1 to the remote port */ + mach_port_t port = os_atomic_xchg(&ev->ev_remote_port, EVENTLINK_CLEARED_PORT, relaxed); + if (!MACH_PORT_VALID(port)) { + errno = ret = EINVAL; + return ret; + } + *port_out = port; + + return ret; +} + +os_eventlink_t +os_eventlink_create_with_port(const char *name, mach_port_t port) +{ + os_eventlink_t ev = _os_eventlink_create_internal(name); + if (ev == NULL) { + return NULL; + } + /* Take our own +1 on the port */ + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); + os_assert(kr == KERN_SUCCESS); + + os_assert(ev->ev_local_port == EVENTLINK_INACTIVE_PORT); + ev->ev_local_port = port; + return ev; +} + +os_eventlink_t +os_eventlink_create_remote_with_eventlink(const char *name, os_eventlink_t template) +{ + mach_port_t mp; + int ret = os_eventlink_extract_remote_port(template, &mp); + if (ret) { + errno = ret; + return NULL; + } + + os_eventlink_t ev = os_eventlink_create_with_port(name, mp); + + /* os_eventlink_create_with_port doesn't consume the right it was given, we + * should release our reference */ + mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_SEND, -1); + + return ev; +} + +int +os_eventlink_associate(os_eventlink_t ev, os_eventlink_associate_options_t + options) +{ + int ret = 0; + + // These checks are racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + mach_eventlink_associate_option_t mela_options; + mela_options = (options == OE_ASSOCIATE_ON_WAIT) ? + MELA_OPTION_ASSOCIATE_ON_WAIT : MELA_OPTION_NONE; + mach_port_t thread_port = (options == OE_ASSOCIATE_ON_WAIT) ? MACH_PORT_NULL : _dispatch_thread_port(); + + kern_return_t kr = KERN_SUCCESS; + kr = mach_eventlink_associate(ev->ev_local_port, thread_port, 0, 0, 0, 0, mela_options); + return _mach_error_to_errno(kr); +} + +int +os_eventlink_disassociate(os_eventlink_t ev) +{ + int ret = 0; + + // These checks are racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + /* Don't bother call mach_eventlink_disassociate since the backing + * eventlink object in the kernel will be gone */ + return ret; + } + + /* TODO: Track the associated thread in the eventlink object and error out + * in user space if the thread calling disassociate isn't the same thread. + * The kernel doesn't enforce this */ + kern_return_t kr = KERN_SUCCESS; + kr = mach_eventlink_disassociate(ev->ev_local_port, MELD_OPTION_NONE); + + if (kr == KERN_TERMINATED) { + /* Absorb this error in libdispatch, knowing that the other side died + * first is not helpful here */ + return 0; + } + + return _mach_error_to_errno(kr); +} + + +int +os_eventlink_wait_until(os_eventlink_t ev, os_clockid_t clock, + uint64_t timeout, uint64_t *signals_consumed_out) +{ + uint64_t machabs_timeout = _os_clockid_normalize_to_machabs(clock, timeout); + + /* Convert timeout to deadline */ + return os_eventlink_wait_until_internal(ev, clock, mach_absolute_time() + machabs_timeout, + signals_consumed_out); +} + +int +os_eventlink_wait(os_eventlink_t ev, uint64_t *signals_consumed_out) +{ + /* Passing in deadline = 0 means wait forever */ + return os_eventlink_wait_until_internal(ev, OS_CLOCK_MACH_ABSOLUTE_TIME, 0, + signals_consumed_out); +} + +int +os_eventlink_signal(os_eventlink_t ev) +{ + int ret = 0; + + // This is racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + kern_return_t kr = KERN_SUCCESS; + kr = mach_eventlink_signal(ev->ev_local_port, 0); + + return _mach_error_to_errno(kr); +} + +int +os_eventlink_signal_and_wait(os_eventlink_t ev, uint64_t *signals_consumed_out) +{ + /* Passing in deadline = 0 means wait forever */ + return os_eventlink_signal_and_wait_until_internal(ev, OS_CLOCK_MACH_ABSOLUTE_TIME, 0, + signals_consumed_out); +} + +int +os_eventlink_signal_and_wait_until(os_eventlink_t ev, os_clockid_t clock, + uint64_t timeout, uint64_t * _Nullable signals_consumed_out) +{ + uint64_t machabs_timeout = _os_clockid_normalize_to_machabs(clock, timeout); + + /* Converts timeout to deadline */ + return os_eventlink_signal_and_wait_until_internal(ev, clock, mach_absolute_time() + machabs_timeout, + signals_consumed_out); +} + +void +os_eventlink_cancel(os_eventlink_t ev) +{ + if (_os_eventlink_is_cancelled(ev->ev_state)) { + return; + } + + os_atomic_or(&ev->ev_state, OS_EVENTLINK_CANCELLED, relaxed); + + + mach_port_t p = ev->ev_local_port; + if (MACH_PORT_VALID(p)) { + /* mach_eventlink_destroy consumes a ref on the ports. We therefore take + * +1 on the local port so that other threads using the ev_local_port have valid + * ports even if it isn't backed by an eventlink object. The last ref of + * the port in the eventlink object will be dropped in xref dispose */ + kern_return_t kr = mach_port_mod_refs(mach_task_self(), p, MACH_PORT_RIGHT_SEND, 1); + os_assert(kr == KERN_SUCCESS); + mach_eventlink_destroy(p); + } + + // If the remote port was valid, then we already called destroy on the + // local port and we don't need to call it again on the remote port. We keep + // the reference we already have on the remote port (if any) and deallocate + // it in xref dispose + +} + +#else /* OS_EVENTLINK_USE_MACH_EVENTLINK */ +#pragma mark Simulator + +void +_os_eventlink_dispose(os_eventlink_t __unused ev) { +} + +os_eventlink_t +os_eventlink_create(const char * __unused name) +{ + return NULL; +} + +int +os_eventlink_activate(os_eventlink_t __unused ev) +{ + return ENOTSUP; +} + +int +os_eventlink_extract_remote_port(os_eventlink_t __unused eventlink, mach_port_t *port_out) +{ + *port_out = MACH_PORT_NULL; + return ENOTSUP; +} + +os_eventlink_t +os_eventlink_create_with_port(const char * __unused name, mach_port_t __unused mach_port) +{ + errno = ENOTSUP; + return NULL; +} + +os_eventlink_t +os_eventlink_create_remote_with_eventlink(const char * __unused name, os_eventlink_t __unused eventlink) +{ + errno = ENOTSUP; + return NULL; +} + +int +os_eventlink_associate(os_eventlink_t __unused eventlink, os_eventlink_associate_options_t __unused options) +{ + int ret = errno = ENOTSUP; + return ret; +} + +int +os_eventlink_disassociate(os_eventlink_t __unused eventlink) +{ + int ret = errno = ENOTSUP; + return ret; +} + +int +os_eventlink_wait(os_eventlink_t __unused eventlink, uint64_t * _Nullable signals_consumed_out) +{ + int ret = errno = ENOTSUP; + *signals_consumed_out = 0; + return ret; +} + +int +os_eventlink_wait_until(os_eventlink_t __unused eventlink, os_clockid_t __unused clock, + uint64_t __unused timeout, uint64_t * _Nullable signals_consumed_out) +{ + int ret = errno = ENOTSUP; + *signals_consumed_out = 0; + return ret; +} + +int +os_eventlink_signal(os_eventlink_t __unused eventlink) +{ + int ret = errno = ENOTSUP; + return ret; +} + +int +os_eventlink_signal_and_wait(os_eventlink_t __unused eventlink, uint64_t * _Nullable signals_consumed_out) +{ + int ret = errno = ENOTSUP; + *signals_consumed_out = 0; + return ret; +} + +int +os_eventlink_signal_and_wait_until(os_eventlink_t __unused eventlink, os_clockid_t __unused clock, + uint64_t __unused timeout, uint64_t * _Nullable signals_consumed_out) +{ + int ret = errno = ENOTSUP; + *signals_consumed_out = 0; + return ret; +} + +void +os_eventlink_cancel(os_eventlink_t __unused ev) +{ +} + +#endif /* OS_EVENTLINK_USE_MACH_EVENTLINK */ diff --git a/src/eventlink_internal.h b/src/eventlink_internal.h new file mode 100644 index 000000000..4c8f0d288 --- /dev/null +++ b/src/eventlink_internal.h @@ -0,0 +1,67 @@ +// +// eventlink_internal.h +// libdispatch +// +// Created by Rokhini Prabhu on 12/13/19. +// + +#ifndef __OS_EVENTLINK_INTERNAL__ +#define __OS_EVENTLINK_INTERNAL__ + +#if OS_EVENTLINK_USE_MACH_EVENTLINK +#include +#endif + +#define OS_EVENTLINK_LABEL_NEEDS_FREE 0x1ull +#define OS_EVENTLINK_CANCELLED 0x2ull + +union eventlink_internal { + mach_port_t pair[2]; + uint64_t desc; +}; + +struct os_eventlink_s { + struct _os_object_s _as_os_obj[0]; + OS_OBJECT_STRUCT_HEADER(eventlink); + + const char *name; + uint64_t ev_state; + + /* Note: We use the union which allows us to write to both local and remote + * port atomically during activate and cancellation APIs. The combination of + * the state of the local_port as well as the ev_state tells us the state of + * the eventlink + * + * local_port = EVENTLINK_INACTIVE_PORT means that it hasn't been created yet. + * local_port = a valid mach port means that it has been created. + * + * If the OS_EVENTLINK_CANCELLED bit is set, that means that the port does + * not point to a valid kernel eventlink object. + * + * The ref of the ports are only dropped when the last external ref is + * dropped. + */ + union eventlink_internal port_pair; + + uint64_t local_count; +}; + +#define EVENTLINK_INACTIVE_PORT ((uint64_t) 0) +#define EVENTLINK_CLEARED_PORT ((uint64_t) 0) + +static inline bool +_os_eventlink_inactive(mach_port_t port) +{ + return port == EVENTLINK_INACTIVE_PORT; +} + +static inline bool +_os_eventlink_is_cancelled(uint64_t ev_state) +{ + return (ev_state & OS_EVENTLINK_CANCELLED) == OS_EVENTLINK_CANCELLED; +} + +void _os_eventlink_xref_dispose(os_eventlink_t ev); +void _os_eventlink_dispose(os_eventlink_t ev); + +#endif /* __OS_EVENTLINK_INTERNAL */ diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c index f6331a32f..a79053c9d 100644 --- a/src/firehose/firehose_buffer.c +++ b/src/firehose/firehose_buffer.c @@ -1430,13 +1430,26 @@ __firehose_buffer_tracepoint_flush(firehose_tracepoint_t ft, return firehose_buffer_tracepoint_flush(kernel_firehose_buffer, ft, ftid); } -void +bool __firehose_merge_updates(firehose_push_reply_t update) { firehose_buffer_t fb = kernel_firehose_buffer; + bool has_more = false; + uint16_t head; + if (likely(fb)) { + firehose_buffer_header_t fbh = &fb->fb_header; firehose_client_merge_updates(fb, true, update, false, NULL); + head = os_atomic_load(&fbh->fbh_ring_io_head, relaxed); + if (head != (uint16_t)os_atomic_load(&fbh->fbh_bank.fbb_io_flushed, relaxed)) { + has_more = true; + } + head = os_atomic_load(&fbh->fbh_ring_mem_head, relaxed); + if (head != (uint16_t)os_atomic_load(&fbh->fbh_bank.fbb_mem_flushed, relaxed)) { + has_more = true; + } } + return has_more; } int diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h index fd05801a1..ea7632801 100644 --- a/src/firehose/firehose_inline_internal.h +++ b/src/firehose/firehose_inline_internal.h @@ -473,6 +473,7 @@ firehose_buffer_stream_signal_waiting_for_logd(firehose_buffer_t fb, new_state = (firehose_stream_state_u){ .fss_allocator = (state.fss_allocator & ~FIREHOSE_GATE_UNRELIABLE_WAITERS_BIT), + .fss_current = state.fss_current, .fss_loss = state.fss_loss, .fss_timestamped = true, .fss_waiting_for_logd = true, @@ -486,6 +487,7 @@ firehose_buffer_stream_signal_waiting_for_logd(firehose_buffer_t fb, new_state = (firehose_stream_state_u){ .fss_allocator = (state.fss_allocator & ~FIREHOSE_GATE_UNRELIABLE_WAITERS_BIT), + .fss_current = state.fss_current, .fss_loss = state.fss_loss, .fss_timestamped = true, .fss_waiting_for_logd = true, diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c index 64cd2feae..11e3f3fa8 100644 --- a/src/firehose/firehose_server.c +++ b/src/firehose/firehose_server.c @@ -495,7 +495,7 @@ firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED) } fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS; fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS; - _os_object_release_without_xref_dispose(&fc->fc_as_os_object); + _os_object_release_without_xref_dispose(&fc->fc_object_header); } OS_NOINLINE @@ -764,7 +764,8 @@ firehose_client_create(firehose_buffer_t fb, firehose_token_t token, server_config.fs_mem_drain_queue, server_config.fs_io_drain_queue }; - fc->fc_mach_channel_refcnt = FIREHOSE_BUFFER_NPUSHPORTS; + + os_atomic_init(&fc->fc_mach_channel_refcnt, FIREHOSE_BUFFER_NPUSHPORTS); for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { fc->fc_recvp[i] = recvp[i]; firehose_mach_port_guard(fc->fc_recvp[i], true, &fc->fc_recvp[i]); @@ -1370,8 +1371,8 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, * Request a no senders notification for the memory channel. * That should indicate the client going away. */ - dispatch_mach_request_no_senders( - fc->fc_mach_channel[FIREHOSE_BUFFER_PUSHPORT_MEM]); + dispatch_mach_notify_no_senders( + fc->fc_mach_channel[FIREHOSE_BUFFER_PUSHPORT_MEM], true); firehose_client_resume(fc, &fcci); if (fcci.fcci_size) { diff --git a/src/firehose/firehose_server_internal.h b/src/firehose/firehose_server_internal.h index daba772b5..c3ea87982 100644 --- a/src/firehose/firehose_server_internal.h +++ b/src/firehose/firehose_server_internal.h @@ -31,10 +31,7 @@ struct firehose_snapshot_s { }; struct firehose_client_s { - union { - _OS_OBJECT_HEADER(void *os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); - struct _os_object_s fc_as_os_object; - }; + struct _os_object_s fc_object_header; TAILQ_ENTRY(firehose_client_s) fc_entry; struct firehose_client_s *volatile fc_next[2]; diff --git a/src/init.c b/src/init.c index 04ab9c459..08f790828 100644 --- a/src/init.c +++ b/src/init.c @@ -66,6 +66,7 @@ void dispatch_atfork_parent(void) { _os_object_atfork_parent(); + _voucher_atfork_parent(); } DISPATCH_EXPORT DISPATCH_NOTHROW @@ -141,8 +142,11 @@ pthread_key_t dispatch_bcounter_key; pthread_key_t dispatch_wlh_key; pthread_key_t dispatch_voucher_key; pthread_key_t dispatch_deferred_items_key; +pthread_key_t dispatch_enqueue_key; +pthread_key_t os_workgroup_key; #endif // !DISPATCH_USE_DIRECT_TSD && !DISPATCH_USE_THREAD_LOCAL_STORAGE + #if VOUCHER_USE_MACH_VOUCHER dispatch_once_t _voucher_task_mach_voucher_pred; mach_voucher_t _voucher_task_mach_voucher; @@ -156,6 +160,10 @@ uint64_t _voucher_unique_pid; voucher_activity_hooks_t _voucher_libtrace_hooks; dispatch_mach_t _voucher_activity_debug_channel; #endif + +dispatch_once_t _voucher_process_can_use_arbitrary_personas_pred; +bool _voucher_process_can_use_arbitrary_personas = false; + #if HAVE_PTHREAD_WORKQUEUE_QOS && DISPATCH_DEBUG bool _dispatch_set_qos_class_enabled; #endif @@ -165,7 +173,7 @@ bool _dispatch_kevent_workqueue_enabled = 1; DISPATCH_HW_CONFIG(); uint8_t _dispatch_unsafe_fork; -uint8_t _dispatch_mode; +uint8_t _dispatch_mode = DISPATCH_MODE_NO_FAULTS; bool _dispatch_child_of_unsafe_fork; #if DISPATCH_USE_MEMORYPRESSURE_SOURCE bool _dispatch_memory_warn; @@ -299,10 +307,12 @@ static struct dispatch_pthread_root_queue_context_s // renaming this symbol struct dispatch_queue_global_s _dispatch_root_queues[] = { #define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \ - ((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \ + (((flags) & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \ DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \ - DISPATCH_ROOT_QUEUE_IDX_##n##_QOS) -#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \ + (((flags) & DISPATCH_PRIORITY_FLAG_COOPERATIVE) ? \ + DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_COOPERATIVE : \ + DISPATCH_ROOT_QUEUE_IDX_##n##_QOS)) +#define _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(n, flags, ...) \ [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), \ .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \ @@ -313,83 +323,127 @@ struct dispatch_queue_global_s _dispatch_root_queues[] = { _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \ __VA_ARGS__ \ } - _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0, + +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE +#define _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(n, flags, ...) \ + [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ + DISPATCH_GLOBAL_OBJECT_HEADER(queue_cooperative), \ + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \ + .do_ctxt = NULL, \ + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \ + .dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \ + _dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \ + _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \ + __VA_ARGS__ \ + } +#else /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ + /* We initialize the rest of the fields in + * _dispatch_cooperative_root_queue_init_fallback */ +#define _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(n, flags, ...) \ + [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ + .do_vtable = DISPATCH_VTABLE(queue_concurrent), \ + .dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \ + _dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \ + _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \ + __VA_ARGS__ \ + } +#endif /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ + + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(MAINTENANCE, 0, .dq_label = "com.apple.root.maintenance-qos", .dq_serialnum = 4, ), - _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.maintenance-qos.overcommit", .dq_serialnum = 5, ), - _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0, - .dq_label = "com.apple.root.background-qos", + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.maintenance-qos.cooperative", .dq_serialnum = 6, ), - _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, - .dq_label = "com.apple.root.background-qos.overcommit", + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(BACKGROUND, 0, + .dq_label = "com.apple.root.background-qos", .dq_serialnum = 7, ), - _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0, - .dq_label = "com.apple.root.utility-qos", + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + .dq_label = "com.apple.root.background-qos.overcommit", .dq_serialnum = 8, ), - _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, - .dq_label = "com.apple.root.utility-qos.overcommit", + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.background-qos.cooperative", .dq_serialnum = 9, ), - _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK, - .dq_label = "com.apple.root.default-qos", + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(UTILITY, 0, + .dq_label = "com.apple.root.utility-qos", .dq_serialnum = 10, ), - _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + .dq_label = "com.apple.root.utility-qos.overcommit", + .dq_serialnum = 11, + ), + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.utility-qos.cooperative", + .dq_serialnum = 12, + ), + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK, + .dq_label = "com.apple.root.default-qos", + .dq_serialnum = 13, + ), + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.default-qos.overcommit", - .dq_serialnum = 11, + .dq_serialnum = 14, + ), + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(DEFAULT, + DISPATCH_PRIORITY_FLAG_COOPERATIVE | DISPATCH_PRIORITY_FLAG_FALLBACK, + .dq_label = "com.apple.root.default-qos.cooperative", + .dq_serialnum = 15, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(USER_INITIATED, 0, .dq_label = "com.apple.root.user-initiated-qos", - .dq_serialnum = 12, + .dq_serialnum = 16, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.user-initiated-qos.overcommit", - .dq_serialnum = 13, + .dq_serialnum = 17, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0, + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.user-initiated-qos.cooperative", + .dq_serialnum = 18, + ), + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0, .dq_label = "com.apple.root.user-interactive-qos", - .dq_serialnum = 14, + .dq_serialnum = 19, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.user-interactive-qos.overcommit", - .dq_serialnum = 15, + .dq_serialnum = 20, + ), + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.user-interactive-qos.cooperative", + .dq_serialnum = 21, ), }; -unsigned long volatile _dispatch_queue_serial_numbers = - DISPATCH_QUEUE_SERIAL_NUMBER_INIT; +__dispatch_is_array(_dispatch_root_queues); +_Static_assert(sizeof(_dispatch_root_queues) == + sizeof(struct dispatch_queue_global_s) * DISPATCH_ROOT_QUEUE_COUNT, + "_dispatch_root_queues array size mismatch"); +const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { + DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, + .do_ctxt = NULL, + .dq_label = "com.apple.root.workloop-custom", + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), + .dq_priority = _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT) | + DISPATCH_PRIORITY_SATURATED_OVERRIDE, + .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, + .dgq_thread_pool_size = 1, +}; -dispatch_queue_global_t -dispatch_get_global_queue(intptr_t priority, uintptr_t flags) -{ - dispatch_assert(countof(_dispatch_root_queues) == - DISPATCH_ROOT_QUEUE_COUNT); - - if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { - return DISPATCH_BAD_INPUT; - } - dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority); -#if !HAVE_PTHREAD_WORKQUEUE_QOS - if (qos == QOS_CLASS_MAINTENANCE) { - qos = DISPATCH_QOS_BACKGROUND; - } else if (qos == QOS_CLASS_USER_INTERACTIVE) { - qos = DISPATCH_QOS_USER_INITIATED; - } -#endif - if (qos == DISPATCH_QOS_UNSPECIFIED) { - return DISPATCH_BAD_INPUT; - } - return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT); -} +unsigned long volatile _dispatch_queue_serial_numbers = + DISPATCH_QUEUE_SERIAL_NUMBER_INIT; dispatch_queue_t dispatch_get_current_queue(void) @@ -704,6 +758,17 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane, .dq_push = _dispatch_root_queue_push, ); +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_cooperative, lane, + .do_type = DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE, + .do_dispose = _dispatch_object_no_dispose, + .do_debug = _dispatch_queue_debug, + .do_invoke = _dispatch_object_no_invoke, + + .dq_activate = _dispatch_queue_no_activate, + .dq_wakeup = _dispatch_root_queue_wakeup, + .dq_push = _dispatch_root_queue_push, +); + #if DISPATCH_USE_PTHREAD_ROOT_QUEUES DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_pthread_root, lane, .do_type = DISPATCH_QUEUE_PTHREAD_ROOT_TYPE, @@ -1054,7 +1119,11 @@ _dispatch_bug_kevent_vanished(dispatch_unote_t du) _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_vanished", "BUG in libdispatch client: %s, monitored resource vanished before " "the source cancel handler was invoked " +#if !defined(_WIN32) + "{ %p[%s], ident: %d / 0x%x, handler: %p }", +#else // !defined(_WIN32) "{ %p[%s], ident: %" PRIdPTR " / 0x%" PRIxPTR ", handler: %p }", +#endif // !defined(_WIN32) dux_type(du._du)->dst_kind, dou._dq, dou._dq->dq_label ? dou._dq->dq_label : "", du._du->du_ident, du._du->du_ident, func); @@ -1523,6 +1592,20 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, } #endif // HAVE_MACH +#undef _dispatch_client_callout3_a +DISPATCH_NOINLINE +void +_dispatch_client_callout3_a(void *ctxt, size_t i, size_t w, dispatch_apply_attr_function_t f) +{ + _dispatch_get_tsd_base(); + void *u = _dispatch_get_unwind_tsd(); + if (likely(!u)) return f(ctxt, i, w); + _dispatch_set_unwind_tsd(NULL); + f(ctxt, i, w); + _dispatch_free_unwind_tsd(); + _dispatch_set_unwind_tsd(u); +} + #endif // DISPATCH_USE_CLIENT_CALLOUT #pragma mark - @@ -1554,7 +1637,7 @@ _os_object_t _os_object_alloc(const void *cls, size_t size) { if (!cls) cls = &_os_object_vtable; - return _os_object_alloc_realized(cls, size); + return _os_object_alloc_realized((const void * _Nonnull) cls, size); } void diff --git a/src/inline_internal.h b/src/inline_internal.h index ba6dbed47..a78e50277 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -48,6 +48,11 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, dispatch_mach_handler_function_t f); #endif // HAVE_MACH +typedef void (*dispatch_apply_attr_function_t)(void *, size_t, size_t); + +DISPATCH_NOTHROW void +_dispatch_client_callout3_a(void *ctxt, size_t i, size_t w, dispatch_apply_attr_function_t f); + #else // !DISPATCH_USE_CLIENT_CALLOUT DISPATCH_ALWAYS_INLINE @@ -83,6 +88,13 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, } #endif // HAVE_MACH +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout3_a(void *ctxt, size_t i, size_t w, void (*f)(void *, size_t)) +{ + return f(ctxt, i, w); +} + #endif // !DISPATCH_USE_CLIENT_CALLOUT #pragma mark - @@ -759,7 +771,7 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_workloop_t _dispatch_wlh_to_workloop(dispatch_wlh_t wlh) { - if (wlh == DISPATCH_WLH_ANON) { + if (wlh == NULL || wlh == DISPATCH_WLH_ANON) { return NULL; } if (dx_metatype((dispatch_workloop_t)wlh) == _DISPATCH_WORKLOOP_TYPE) { @@ -1012,9 +1024,9 @@ _dq_state_is_enqueued_on_manager(uint64_t dq_state) DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_in_sync_transfer(uint64_t dq_state) +_dq_state_in_uncontended_sync(uint64_t dq_state) { - return dq_state & DISPATCH_QUEUE_SYNC_TRANSFER; + return dq_state & DISPATCH_QUEUE_UNCONTENDED_SYNC; } DISPATCH_ALWAYS_INLINE @@ -1033,6 +1045,18 @@ _dq_state_received_sync_wait(uint64_t dq_state) (dq_state & DISPATCH_QUEUE_RECEIVED_SYNC_WAIT); } +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_needs_ensure_ownership(uint64_t dq_state) +{ + if (_dq_state_is_base_wlh(dq_state) && + _dq_state_in_uncontended_sync(dq_state)) { + return dq_state & (DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | + DISPATCH_QUEUE_ENQUEUED); + } + return false; +} + DISPATCH_ALWAYS_INLINE static inline dispatch_qos_t _dq_state_max_qos(uint64_t dq_state) @@ -1110,6 +1134,20 @@ _dq_state_is_runnable(uint64_t dq_state) DISPATCH_ALWAYS_INLINE static inline bool _dq_state_should_override(uint64_t dq_state) +{ + if (_dq_state_is_suspended(dq_state) || + _dq_state_is_enqueued_on_manager(dq_state)) { + return false; + } + if (_dq_state_is_enqueued_on_target(dq_state)) { + return true; + } + return _dq_state_drain_locked(dq_state); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_should_override_for_waiter(uint64_t dq_state) { if (_dq_state_is_suspended(dq_state) || _dq_state_is_enqueued_on_manager(dq_state)) { @@ -1119,6 +1157,11 @@ _dq_state_should_override(uint64_t dq_state) return true; } if (_dq_state_is_base_wlh(dq_state)) { + // _dq_state_should_override is called only when the enqueued bit + // hasn't changed. For kqworkloop based code, if there's no thread + // request, then we should not try to assign a QoS/kevent override + // at all, because turnstiles are the only thing needed to resolve + // priority inversions. return false; } return _dq_state_drain_locked(dq_state); @@ -1317,7 +1360,11 @@ _dispatch_queue_drain_try_lock_wlh(dispatch_queue_t dq, uint64_t *dq_state) if (unlikely(_dq_state_is_suspended(old_state))) { new_state &= ~DISPATCH_QUEUE_ENQUEUED; } else if (unlikely(_dq_state_drain_locked(old_state))) { - os_atomic_rmw_loop_give_up(break); + if (_dq_state_in_uncontended_sync(old_state)) { + new_state |= DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; + } else { + os_atomic_rmw_loop_give_up(break); + } } else { new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; new_state |= lock_bits; @@ -1356,6 +1403,7 @@ _dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_lane_t dq, uint64_t init = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | _dispatch_lock_value_from_tid(tid) | + DISPATCH_QUEUE_UNCONTENDED_SYNC | (suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL); uint64_t old_state, new_state; @@ -1538,6 +1586,27 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) return true; } +DISPATCH_ALWAYS_INLINE +static inline +dispatch_swift_job_invoke_flags_t +_dispatch_invoke_flags_to_swift_invoke_flags(dispatch_invoke_flags_t invoke_flags) +{ + return (invoke_flags & DISPATCH_INVOKE_COOPERATIVE_DRAIN) ? + DISPATCH_SWIFT_JOB_INVOKE_COOPERATIVE : DISPATCH_SWIFT_JOB_INVOKE_NONE; +} + +/* + * Clears UNCONTENDED_SYNC and RECEIVED_SYNC_WAIT + */ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) +{ + uint64_t clearbits = DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | + DISPATCH_QUEUE_UNCONTENDED_SYNC; + os_atomic_and2o(dq, dq_state, ~clearbits, relaxed); +} + #pragma mark - #pragma mark os_mpsc_queue @@ -1556,6 +1625,7 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) #define os_mpsc_push_update_tail(Q, tail, _o_next) ({ \ os_mpsc_node_type(Q) _tl = (tail); \ os_atomic_store2o(_tl, _o_next, NULL, relaxed); \ + _dispatch_set_enqueuer_for(_os_mpsc_tail Q); \ os_atomic_xchg(_os_mpsc_tail Q, _tl, release); \ }) @@ -1568,6 +1638,7 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) } else { \ (void)os_atomic_store(_os_mpsc_head Q, (head), relaxed); \ } \ + _dispatch_clear_enqueuer(); \ }) #define os_mpsc_push_list(Q, head, tail, _o_next) ({ \ @@ -1595,17 +1666,19 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) os_mpsc_node_type(Q) _node; \ _node = os_atomic_load(__n, dependency); \ if (unlikely(_node == NULL)) { \ - _node = _dispatch_wait_for_enqueuer((void **)__n); \ + _node = _dispatch_wait_for_enqueuer((void **)__n, \ + (void **) _os_mpsc_tail Q); \ } \ _node; \ }) -#define os_mpsc_get_next(_n, _o_next) ({ \ +#define os_mpsc_get_next(_n, _o_next, tailp) ({ \ __typeof__(_n) __n = (_n); \ _os_atomic_basetypeof(&__n->_o_next) _node; \ _node = os_atomic_load(&__n->_o_next, dependency); \ if (unlikely(_node == NULL)) { \ - _node = _dispatch_wait_for_enqueuer((void **)&__n->_o_next); \ + _node = _dispatch_wait_for_enqueuer((void **)&__n->_o_next, \ + (void **) tailp); \ } \ _node; \ }) @@ -1618,7 +1691,7 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) /* to head above doesn't clobber head from concurrent enqueuer */ \ if (unlikely(!_n && \ !os_atomic_cmpxchg(_os_mpsc_tail Q, _head, NULL, release))) { \ - _n = os_mpsc_get_next(_head, _o_next); \ + _n = os_mpsc_get_next(_head, _o_next, _os_mpsc_tail Q); \ os_atomic_store(_os_mpsc_head Q, _n, relaxed); \ } \ _n; \ @@ -1651,7 +1724,7 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \ __typeof__(head) _head = (head), _tail = (tail), _n = NULL; \ - if (_head != _tail) _n = os_mpsc_get_next(_head, _o_next); \ + if (_head != _tail) _n = os_mpsc_get_next(_head, _o_next, NULL); \ _n; \ }) @@ -1909,6 +1982,15 @@ _dispatch_queue_class_probe(dispatch_lane_class_t dqu) return unlikely(tail != NULL); } +extern const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue; + +DISPATCH_ALWAYS_INLINE DISPATCH_CONST +inline bool +_dispatch_is_custom_pri_workloop(dispatch_queue_t dq) +{ + return (dq->do_targetq) == (dispatch_queue_t) _dispatch_custom_workloop_root_queue._as_dq; +} + DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline bool _dispatch_is_in_root_queues_array(dispatch_queue_class_t dqu) @@ -1917,14 +1999,28 @@ _dispatch_is_in_root_queues_array(dispatch_queue_class_t dqu) (dqu._dgq < _dispatch_root_queues + _DISPATCH_ROOT_QUEUE_IDX_COUNT); } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_is_cooperative(dispatch_queue_class_t dqu) +{ + return (dqu._dgq)->dq_priority & DISPATCH_PRIORITY_FLAG_COOPERATIVE; +} + DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline dispatch_queue_global_t -_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit) +_dispatch_get_root_queue(dispatch_qos_t qos, uintptr_t flags) { if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) { DISPATCH_CLIENT_CRASH(qos, "Corrupted priority"); } - return &_dispatch_root_queues[2 * (qos - 1) + overcommit]; + unsigned int add_on = 0; + if (flags & DISPATCH_QUEUE_OVERCOMMIT) { + add_on = DISPATCH_ROOT_QUEUE_IDX_OFFSET_OVERCOMMIT; + } else if (flags & DISPATCH_QUEUE_COOPERATIVE) { + add_on = DISPATCH_ROOT_QUEUE_IDX_OFFSET_COOPERATIVE; + } + + return &_dispatch_root_queues[3 * (qos - 1) + add_on]; } #define _dispatch_get_default_queue(overcommit) \ @@ -2055,7 +2151,7 @@ _dispatch_set_basepri(dispatch_priority_t dq_dbp) dbp = dq_dbp & ~DISPATCH_PRIORITY_OVERRIDE_MASK; } else if (dq_dbp & DISPATCH_PRIORITY_REQUESTED_MASK) { dbp &= (DISPATCH_PRIORITY_OVERRIDE_MASK | - DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + DISPATCH_PRIORITY_THREAD_TYPE_MASK); dbp |= MAX(old_dbp & DISPATCH_PRIORITY_REQUESTED_MASK, dq_dbp & DISPATCH_PRIORITY_REQUESTED_MASK); if (_dispatch_priority_fallback_qos(dq_dbp) > @@ -2170,24 +2266,29 @@ _dispatch_priority_compute_update(pthread_priority_t pp) { dispatch_assert(pp != DISPATCH_NO_PRIORITY); if (!_dispatch_set_qos_class_enabled) return 0; - // the priority in _dispatch_get_priority() only tracks manager-ness - // and overcommit, which is inherited from the current value for each update - // however if the priority had the NEEDS_UNBIND flag set we need to clear it - // the first chance we get + // the priority in _dispatch_get_priority() only tracks manager-ness and + // thread request type, which is inherited from the current value for each + // update however if the priority had the NEEDS_UNBIND flag set we need to + // clear it the first chance we get // // the manager bit is invalid input, but we keep it to get meaningful // assertions in _dispatch_set_priority_and_voucher_slow() pp &= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; pthread_priority_t cur_priority = _dispatch_get_priority(); pthread_priority_t unbind = _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; - pthread_priority_t overcommit = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + pthread_priority_t thread_type = _PTHREAD_PRIORITY_THREAD_TYPE_MASK; + + // The thread request type only matters if we have NEEDS_UNBIND. For the + // rest, we don't consider the thread request type when deciding if we need + // to consider changing current thread's priority. + if (unlikely(cur_priority & unbind)) { - // else we always need an update if the NEEDS_UNBIND flag is set - // the slow path in _dispatch_set_priority_and_voucher_slow() will + // if the NEEDS_UNBIND flag is set, we always need to update and take + // the slow path in _dispatch_set_priority_and_voucher_slow() which will // adjust the priority further with the proper overcommitness return pp ? pp : (cur_priority & ~unbind); } else { - cur_priority &= ~overcommit; + cur_priority &= ~thread_type; } if (unlikely(pp != cur_priority)) return pp; return 0; @@ -2295,6 +2396,19 @@ _dispatch_queue_need_override(dispatch_queue_class_t dq, dispatch_qos_t qos) #define DISPATCH_PRIORITY_PROPAGATE_CURRENT 0x1 #define DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC 0x2 +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_qos_propagate(dispatch_qos_t qos) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + // Cap QOS for propagation at user-initiated + return MIN(qos, DISPATCH_QOS_USER_INITIATED); +#else + (void)qos; + return 0; +#endif +} + DISPATCH_ALWAYS_INLINE static inline pthread_priority_t _dispatch_priority_compute_propagated(pthread_priority_t pp, @@ -2525,7 +2639,12 @@ _dispatch_continuation_pop_inline(dispatch_object_t dou, if (observer_hooks) observer_hooks->queue_will_execute(dqu._dq); flags &= _DISPATCH_INVOKE_PROPAGATE_MASK; if (_dispatch_object_has_vtable(dou)) { - dx_invoke(dou._dq, dic, flags); + if (dx_type(dou._do) == DISPATCH_SWIFT_JOB_TYPE) { + dx_invoke(dou._dsjc, NULL, + _dispatch_invoke_flags_to_swift_invoke_flags(flags)); + } else { + dx_invoke(dou._dq, dic, flags); + } } else { _dispatch_continuation_invoke_inline(dou, flags, dqu); } diff --git a/src/internal.h b/src/internal.h index 66f0244cb..d22a3ac09 100644 --- a/src/internal.h +++ b/src/internal.h @@ -35,6 +35,8 @@ #define __DISPATCH_BUILDING_DISPATCH__ #define __DISPATCH_INDIRECT__ +#define __OS_WORKGROUP_INDIRECT__ +#define __OS_WORKGROUP_PRIVATE_INDIRECT__ #ifdef __APPLE__ #include @@ -97,6 +99,19 @@ #include #include +#if __has_feature(ptrauth_calls) +#include +#define DISPATCH_VTABLE_ENTRY(op) \ + (* __ptrauth(ptrauth_key_process_independent_code, true, \ + ptrauth_string_discriminator("dispatch." #op)) const op) +#define DISPATCH_FUNCTION_POINTER \ + __ptrauth(ptrauth_key_process_dependent_code, true, \ + ptrauth_string_discriminator("dispatch.handler")) +#else +#define DISPATCH_VTABLE_ENTRY(op) (* const op) +#define DISPATCH_FUNCTION_POINTER +#endif + #define __DISPATCH_HIDE_SYMBOL(sym, version) \ __asm__(".section __TEXT,__const\n\t" \ ".globl $ld$hide$os" #version "$_" #sym "\n\t" \ @@ -181,6 +196,7 @@ typedef union { struct dispatch_io_s *_dchannel; struct dispatch_continuation_s *_dc; + struct dispatch_swift_continuation_s *_dsjc; struct dispatch_sync_context_s *_dsc; struct dispatch_operation_s *_doperation; struct dispatch_disk_s *_ddisk; @@ -203,6 +219,10 @@ upcast(dispatch_object_t dou) #endif // __OBJC__ #include +#include +#include +#include +#include #include #include #include @@ -220,12 +240,17 @@ upcast(dispatch_object_t dou) #include #endif #include "os/object_private.h" +#include "os/eventlink_private.h" +#include "os/workgroup_object_private.h" +#include "os/workgroup_interval_private.h" +#include "apply_private.h" #include "queue_private.h" #include "channel_private.h" #include "workloop_private.h" #include "source_private.h" #include "mach_private.h" #include "data_private.h" +#include "time_private.h" #include "os/voucher_private.h" #include "os/voucher_activity_private.h" #include "io_private.h" @@ -475,6 +500,7 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define DISPATCH_MODE_STRICT (1U << 0) #define DISPATCH_MODE_NO_FAULTS (1U << 1) +#define DISPATCH_COOPERATIVE_POOL_STRICT (1U << 2) extern uint8_t _dispatch_mode; DISPATCH_EXPORT DISPATCH_NOINLINE DISPATCH_COLD @@ -700,8 +726,7 @@ _dispatch_fork_becomes_unsafe(void) #ifndef HAVE_PTHREAD_WORKQUEUE_WORKLOOP #if HAVE_PTHREAD_WORKQUEUE_KEVENT && defined(WORKQ_FEATURE_WORKLOOP) && \ - defined(KEVENT_FLAG_WORKLOOP) && \ - DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) + defined(KEVENT_FLAG_WORKLOOP) #define HAVE_PTHREAD_WORKQUEUE_WORKLOOP 1 #else #define HAVE_PTHREAD_WORKQUEUE_WORKLOOP 0 @@ -709,13 +734,21 @@ _dispatch_fork_becomes_unsafe(void) #endif // !defined(HAVE_PTHREAD_WORKQUEUE_WORKLOOP) #ifndef DISPATCH_USE_WORKQUEUE_NARROWING -#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) +#if HAVE_PTHREAD_WORKQUEUES #define DISPATCH_USE_WORKQUEUE_NARROWING 1 #else #define DISPATCH_USE_WORKQUEUE_NARROWING 0 #endif #endif // !defined(DISPATCH_USE_WORKQUEUE_NARROWING) +#ifndef DISPATCH_USE_COOPERATIVE_WORKQUEUE +#if defined(WORKQ_FEATURE_COOPERATIVE_WORKQ) && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) +#define DISPATCH_USE_COOPERATIVE_WORKQUEUE 1 +#else +#define DISPATCH_USE_COOPERATIVE_WORKQUEUE 0 +#endif +#endif + #ifndef DISPATCH_USE_PTHREAD_ROOT_QUEUES #if defined(__BLOCKS__) && defined(__APPLE__) #define DISPATCH_USE_PTHREAD_ROOT_QUEUES 1 // @@ -802,10 +835,10 @@ extern int malloc_engaged_nano(void); extern bool _dispatch_memory_warn; #endif -#if defined(MACH_SEND_SYNC_OVERRIDE) && defined(MACH_RCV_SYNC_WAIT) && \ - DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) && \ - !defined(DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE) -#define DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE 1 +#if defined(MACH_MSG_QOS_LAST) && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101600) +#define DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED 1 +#else +#define DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED 0 #endif #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE) @@ -993,7 +1026,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #ifndef VOUCHER_USE_PERSONA #if VOUCHER_USE_MACH_VOUCHER && defined(BANK_PERSONA_TOKEN) && \ - !TARGET_OS_SIMULATOR + !TARGET_OS_SIMULATOR && !TARGET_CPU_ARM #define VOUCHER_USE_PERSONA 1 #else #define VOUCHER_USE_PERSONA 0 @@ -1009,6 +1042,23 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define VOUCHER_USE_PERSONA 0 #endif // VOUCHER_USE_MACH_VOUCHER +#ifndef VOUCHER_USE_PERSONA_ADOPT_ANY +#if VOUCHER_USE_PERSONA && defined(BANK_PERSONA_ADOPT_ANY) && \ + DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) +#define VOUCHER_USE_PERSONA_ADOPT_ANY 1 +#else +#define VOUCHER_USE_PERSONA_ADOPT_ANY 0 +#endif +#endif + +#ifndef OS_EVENTLINK_USE_MACH_EVENTLINK +#if DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101600) && __has_include() +#define OS_EVENTLINK_USE_MACH_EVENTLINK 1 +#else +#define OS_EVENTLINK_USE_MACH_EVENTLINK 0 +#endif +#endif // OS_EVENTLINK_USE_MACH_EVENTLINK + #define _dispatch_hardware_crash() \ __asm__(""); __builtin_trap() // @@ -1130,6 +1180,8 @@ extern bool _dispatch_kevent_workqueue_enabled; /* #includes dependent on internal.h */ #include "object_internal.h" +#include "workgroup_internal.h" +#include "eventlink_internal.h" #include "semaphore_internal.h" #include "introspection_internal.h" #include "queue_internal.h" diff --git a/src/introspection.c b/src/introspection.c index 27a955be9..bee263917 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -256,9 +256,9 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, } else if (func == _dispatch_apply_invoke || func == _dispatch_apply_redirect_invoke) { dispatch_apply_t da = ctxt; - if (da->da_todo) { + if (os_atomic_load2o(da, da_todo, relaxed)) { dc = da->da_dc; - dq = dc->dc_data; + dq = dc->dc_other; ctxt = dc->dc_ctxt; func = dc->dc_func; apply = true; @@ -427,7 +427,7 @@ dispatch_introspection_get_queues(dispatch_queue_t start, size_t count, dispatch_queue_introspection_context_t next; if (start) { - next = start->do_finalizer; + next = start->do_introspection_ctxt; } else { next = LIST_FIRST(&_dispatch_introspection.queues); } @@ -616,7 +616,7 @@ _dispatch_object_finalizer(dispatch_object_t dou) switch (dx_metatype(dou._do)) { case _DISPATCH_LANE_TYPE: case _DISPATCH_WORKLOOP_TYPE: - dqic = dou._dq->do_finalizer; + dqic = dou._dq->do_introspection_ctxt; return dqic->dqic_finalizer; default: return dou._do->do_finalizer; @@ -631,7 +631,7 @@ _dispatch_object_set_finalizer(dispatch_object_t dou, switch (dx_metatype(dou._do)) { case _DISPATCH_LANE_TYPE: case _DISPATCH_WORKLOOP_TYPE: - dqic = dou._dq->do_finalizer; + dqic = dou._dq->do_introspection_ctxt; dqic->dqic_finalizer = finalizer; break; default: @@ -656,7 +656,7 @@ _dispatch_introspection_queue_create(dispatch_queue_t dq) LIST_INIT(&dqic->dqic_order_top_head); LIST_INIT(&dqic->dqic_order_bottom_head); } - dq->do_finalizer = dqic; + dq->do_introspection_ctxt = dqic; _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); LIST_INSERT_HEAD(&_dispatch_introspection.queues, dqic, dqic_list); @@ -689,7 +689,7 @@ _dispatch_introspection_queue_dispose_hook(dispatch_queue_t dq) void _dispatch_introspection_queue_dispose(dispatch_queue_t dq) { - dispatch_queue_introspection_context_t dqic = dq->do_finalizer; + dispatch_queue_introspection_context_t dqic = dq->do_introspection_ctxt; DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_destroy, dq); if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_dispose)) { @@ -983,7 +983,7 @@ _dispatch_introspection_queue_order_dispose( LIST_FOREACH_SAFE(e, &head, dqoe_order_top_list, te) { otherq = e->dqoe_bottom_tq; - o_dqic = otherq->do_finalizer; + o_dqic = otherq->do_introspection_ctxt; _dispatch_unfair_lock_lock(&o_dqic->dqic_order_bottom_head_lock); LIST_REMOVE(e, dqoe_order_bottom_list); _dispatch_unfair_lock_unlock(&o_dqic->dqic_order_bottom_head_lock); @@ -998,7 +998,7 @@ _dispatch_introspection_queue_order_dispose( LIST_FOREACH_SAFE(e, &head, dqoe_order_bottom_list, te) { otherq = e->dqoe_top_tq; - o_dqic = otherq->do_finalizer; + o_dqic = otherq->do_introspection_ctxt; _dispatch_unfair_lock_lock(&o_dqic->dqic_order_top_head_lock); LIST_REMOVE(e, dqoe_order_top_list); _dispatch_unfair_lock_unlock(&o_dqic->dqic_order_top_head_lock); @@ -1070,7 +1070,8 @@ _dispatch_introspection_order_check(dispatch_order_frame_t dof_prev, dispatch_queue_t bottom_q, dispatch_queue_t bottom_tq) { struct dispatch_order_frame_s dof = { .dof_prev = dof_prev }; - dispatch_queue_introspection_context_t btqic = bottom_tq->do_finalizer; + dispatch_queue_introspection_context_t btqic = + bottom_tq->do_introspection_ctxt; // has anyone above bottom_tq ever sync()ed onto top_tq ? _dispatch_unfair_lock_lock(&btqic->dqic_order_top_head_lock); @@ -1099,8 +1100,9 @@ _dispatch_introspection_order_record(dispatch_queue_t top_q) dispatch_queue_t top_tq = _dispatch_queue_bottom_target_queue(top_q); dispatch_queue_t bottom_tq = _dispatch_queue_bottom_target_queue(bottom_q); - dispatch_queue_introspection_context_t ttqic = top_tq->do_finalizer; - dispatch_queue_introspection_context_t btqic = bottom_tq->do_finalizer; + dispatch_queue_introspection_context_t ttqic, btqic; + ttqic = top_tq->do_introspection_ctxt; + btqic = bottom_tq->do_introspection_ctxt; _dispatch_unfair_lock_lock(&ttqic->dqic_order_top_head_lock); LIST_FOREACH(it, &ttqic->dqic_order_top_head, dqoe_order_top_list) { @@ -1187,7 +1189,7 @@ _dispatch_introspection_target_queue_changed(dispatch_queue_t dq) [2] = "a recipient", [3] = "both an initiator and a recipient" }; - dispatch_queue_introspection_context_t dqic = dq->do_finalizer; + dispatch_queue_introspection_context_t dqic = dq-> do_introspection_ctxt; bool as_top = !LIST_EMPTY(&dqic->dqic_order_top_head); bool as_bottom = !LIST_EMPTY(&dqic->dqic_order_top_head); @@ -1200,7 +1202,7 @@ _dispatch_introspection_target_queue_changed(dispatch_queue_t dq) "a dispatch_sync", dq, dq->dq_label ?: "", reasons[(int)as_top + 2 * (int)as_bottom]); _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); - _dispatch_introspection_queue_order_dispose(dq->do_finalizer); + _dispatch_introspection_queue_order_dispose(dq->do_introspection_ctxt); _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock); } } diff --git a/src/io.c b/src/io.c index 99a9ba6cb..fa721bdd0 100644 --- a/src/io.c +++ b/src/io.c @@ -151,8 +151,13 @@ enum { #define _dispatch_io_log(x, ...) #endif // DISPATCH_IO_DEBUG +#if !defined(_WIN32) +#define _dispatch_fd_debug(msg, fd, ...) \ + _dispatch_io_log("fd[0x%x]: " msg, fd, ##__VA_ARGS__) +#else // !defined(_WIN32) #define _dispatch_fd_debug(msg, fd, ...) \ _dispatch_io_log("fd[0x%" PRIx64 "]: " msg, fd, ##__VA_ARGS__) +#endif // !defined(_WIN32) #define _dispatch_op_debug(msg, op, ...) \ _dispatch_io_log("op[%p]: " msg, op, ##__VA_ARGS__) #define _dispatch_io_channel_debug(msg, channel, ...) \ @@ -1312,15 +1317,15 @@ _dispatch_fd_entry_guarded_open(dispatch_fd_entry_t fd_entry, const char *path, (void)mode; DWORD dwDesiredAccess = 0; switch (oflag & (_O_RDONLY | _O_WRONLY | _O_RDWR)) { - case _O_RDONLY: - dwDesiredAccess = GENERIC_READ; - break; - case _O_WRONLY: - dwDesiredAccess = GENERIC_WRITE; - break; - case _O_RDWR: - dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; - break; + case _O_RDONLY: + dwDesiredAccess = GENERIC_READ; + break; + case _O_WRONLY: + dwDesiredAccess = GENERIC_WRITE; + break; + case _O_RDWR: + dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; + break; } DWORD dwCreationDisposition = OPEN_EXISTING; if (oflag & _O_CREAT) { @@ -1422,7 +1427,11 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // On fds lock queue dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( _dispatch_io_fds_lockq); - _dispatch_fd_entry_debug("create: fd %" PRId64, fd_entry, fd); +#if !defined(_WIN32) + _dispatch_fd_entry_debug("create: fd %d", fd_entry, fd); +#else // !defined(_WIN32) + _dispatch_fd_entry_debug("create: fd %"PRId64, fd_entry, fd); +#endif // !defined(_WIN32) fd_entry->fd = fd; LIST_INSERT_HEAD(&_dispatch_io_fds[hash], fd_entry, fd_list); fd_entry->barrier_queue = dispatch_queue_create( diff --git a/src/mach.c b/src/mach.c index 877761a03..3a39d8d9c 100644 --- a/src/mach.c +++ b/src/mach.c @@ -71,6 +71,9 @@ static dispatch_continuation_t _dispatch_mach_msg_async_reply_wrap( static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm); static void _dispatch_mach_notification_kevent_register(dispatch_mach_t dm, mach_port_t send); +static inline mach_msg_option_t +_dispatch_mach_send_msg_prepare(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options); // For tests only. DISPATCH_EXPORT void _dispatch_mach_hooks_install_default(void); @@ -143,6 +146,9 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, if (unlikely(!q)) { q = _dispatch_get_default_queue(true); } else { + if (_dispatch_queue_is_cooperative(q)) { + DISPATCH_CLIENT_CRASH(q, "Cannot target object to cooperative root queue - not implemented"); + } _dispatch_retain(q); } dm->do_targetq = q; @@ -196,12 +202,18 @@ dispatch_mach_request_no_senders(dispatch_mach_t dm) _dispatch_queue_setter_assert_inactive(dm); } +void +dispatch_mach_notify_no_senders(dispatch_mach_t dm, bool made_sendrights) +{ + dm->dm_arm_no_senders = true; + dm->dm_made_sendrights = made_sendrights; + _dispatch_queue_setter_assert_inactive(dm); +} + void dispatch_mach_set_flags(dispatch_mach_t dm, dispatch_mach_flags_t flags) { dm->dm_strict_reply = !!(flags & DMF_USE_STRICT_REPLY); - dm->dm_arm_no_senders = !!(flags & DMF_REQUEST_NO_SENDERS); - _dispatch_queue_setter_assert_inactive(dm); } @@ -213,8 +225,28 @@ _dispatch_mach_arm_no_senders(dispatch_mach_t dm, bool allow_previous) kern_return_t kr; if (MACH_PORT_VALID(recvp)) { + // + // + // Establishing a peer-connection can be done in two ways: + // 1) the client makes a receive right with an inserted send right, + // and ships the receive right across in a checkin message, + // + // 2) the server makes a receive right and "make-send" a send right + // in the checkin reply. + // + // While for the case (1) which is the typical XPC case, at the time + // dispatch_mach_connect() is called the send right for the peer + // connection is made, for case (2) it will only be made later. + // + // We use dm->dm_made_sendrights to determine which case we're in. If + // (1), sync = 0 since the send right could have gone away and we want + // no-senders to fire immediately. If (2), sync = 1, we want to fire + // no-senders only after creating at least one send right. + + mach_port_mscount_t sync = dm->dm_made_sendrights ? 0 : 1; + kr = mach_port_request_notification(mach_task_self(), recvp, - MACH_NOTIFY_NO_SENDERS, 0, recvp, + MACH_NOTIFY_NO_SENDERS, sync, recvp, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); DISPATCH_VERIFY_MIG(kr); dispatch_assume_zero(kr); @@ -472,30 +504,13 @@ _dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, #pragma mark - #pragma mark dispatch_mach_msg -DISPATCH_ALWAYS_INLINE DISPATCH_CONST -static inline bool -_dispatch_use_mach_special_reply_port(void) -{ -#if DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE - return true; -#else -#define thread_get_special_reply_port() ({__builtin_trap(); MACH_PORT_NULL;}) - return false; -#endif -} - static void _dispatch_destruct_reply_port(mach_port_t reply_port, enum thread_destruct_special_reply_port_rights rights) { kern_return_t kr = KERN_SUCCESS; - if (_dispatch_use_mach_special_reply_port()) { - kr = thread_destruct_special_reply_port(reply_port, rights); - } else if (rights == THREAD_SPECIAL_REPLY_PORT_ALL || - rights == THREAD_SPECIAL_REPLY_PORT_RECEIVE_ONLY) { - kr = mach_port_destruct(mach_task_self(), reply_port, 0, 0); - } + kr = thread_destruct_special_reply_port(reply_port, rights); DISPATCH_VERIFY_MIG(kr); dispatch_assume_zero(kr); } @@ -504,25 +519,16 @@ static mach_port_t _dispatch_get_thread_reply_port(void) { mach_port_t reply_port, mrp; - if (_dispatch_use_mach_special_reply_port()) { - mrp = _dispatch_get_thread_special_reply_port(); - } else { - mrp = _dispatch_get_thread_mig_reply_port(); - } + mrp = _dispatch_get_thread_special_reply_port(); if (mrp) { reply_port = mrp; _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port", reply_port); } else { - if (_dispatch_use_mach_special_reply_port()) { - reply_port = thread_get_special_reply_port(); - _dispatch_set_thread_special_reply_port(reply_port); - } else { - reply_port = mach_reply_port(); - _dispatch_set_thread_mig_reply_port(reply_port); - } + reply_port = thread_get_special_reply_port(); + _dispatch_set_thread_special_reply_port(reply_port); if (unlikely(!MACH_PORT_VALID(reply_port))) { - DISPATCH_CLIENT_CRASH(_dispatch_use_mach_special_reply_port(), + DISPATCH_CLIENT_CRASH(0, "Unable to allocate reply port, possible port leak"); } _dispatch_debug("machport[0x%08x]: allocated thread sync reply port", @@ -535,12 +541,7 @@ _dispatch_get_thread_reply_port(void) static void _dispatch_clear_thread_reply_port(mach_port_t reply_port) { - mach_port_t mrp; - if (_dispatch_use_mach_special_reply_port()) { - mrp = _dispatch_get_thread_special_reply_port(); - } else { - mrp = _dispatch_get_thread_mig_reply_port(); - } + mach_port_t mrp = _dispatch_get_thread_special_reply_port(); if (reply_port != mrp) { if (mrp) { _dispatch_debug("machport[0x%08x]: did not clear thread sync reply " @@ -548,11 +549,7 @@ _dispatch_clear_thread_reply_port(mach_port_t reply_port) } return; } - if (_dispatch_use_mach_special_reply_port()) { - _dispatch_set_thread_special_reply_port(MACH_PORT_NULL); - } else { - _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL); - } + _dispatch_set_thread_special_reply_port(MACH_PORT_NULL); _dispatch_debug_machport(reply_port); _dispatch_debug("machport[0x%08x]: cleared thread sync reply port", reply_port); @@ -562,23 +559,14 @@ static void _dispatch_set_thread_reply_port(mach_port_t reply_port) { _dispatch_debug_machport(reply_port); - mach_port_t mrp; - if (_dispatch_use_mach_special_reply_port()) { - mrp = _dispatch_get_thread_special_reply_port(); - } else { - mrp = _dispatch_get_thread_mig_reply_port(); - } + mach_port_t mrp = _dispatch_get_thread_special_reply_port(); if (mrp) { _dispatch_destruct_reply_port(reply_port, THREAD_SPECIAL_REPLY_PORT_ALL); _dispatch_debug("machport[0x%08x]: deallocated sync reply port " "(found 0x%08x)", reply_port, mrp); } else { - if (_dispatch_use_mach_special_reply_port()) { - _dispatch_set_thread_special_reply_port(reply_port); - } else { - _dispatch_set_thread_mig_reply_port(reply_port); - } + _dispatch_set_thread_special_reply_port(reply_port); _dispatch_debug("machport[0x%08x]: restored thread sync reply port", reply_port); } @@ -1050,6 +1038,39 @@ _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou, } } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_mach_send_priority_in_voucher(void) +{ + return DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED; +} + +DISPATCH_ALWAYS_INLINE +static inline mach_msg_priority_t +_dispatch_mach_send_priority(dispatch_mach_msg_t dmsg, + dispatch_qos_t qos_ovr, mach_msg_option_t *opts) +{ + qos_ovr = _dispatch_qos_propagate(qos_ovr); + if (qos_ovr) { +#if DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED + if (!_dispatch_mach_send_priority_in_voucher()) { + mach_msg_qos_t qos; + int relpri; + + qos = (mach_msg_qos_t)_dispatch_qos_from_pp(dmsg->dmsg_priority); + relpri = _pthread_priority_relpri(dmsg->dmsg_priority); + *opts |= MACH_SEND_OVERRIDE; + return mach_msg_priority_encode((mach_msg_qos_t)qos_ovr, qos, relpri); + } +#else + (void)dmsg; +#endif + *opts |= MACH_SEND_OVERRIDE; + return (mach_msg_priority_t)_dispatch_qos_to_pp(qos_ovr); + } + return MACH_MSG_PRIORITY_UNSPECIFIED; +} + DISPATCH_NOINLINE static uint32_t _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, @@ -1077,8 +1098,19 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, dm->dm_needs_mgr = true; goto out; } + // Tag the checkin message with a voucher and priority and necessary + // options + (void) _dispatch_mach_send_msg_prepare(dm, dsrr->dmsr_checkin, 0); if (unlikely(!_dispatch_mach_msg_send(dm, dsrr->dmsr_checkin, NULL, qos, DM_SEND_INVOKE_NONE))) { + + // We failed to send the checkin message, clear the voucher on + // it and let the retry tag it with the voucher later. + voucher_t v = dsrr->dmsr_checkin->dmsg_voucher; + if (v) { + _voucher_release(v); + dsrr->dmsr_checkin->dmsg_voucher = NULL; + } goto out; } if (dm->dm_arm_no_senders) { @@ -1108,24 +1140,20 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, opts |= MACH_SEND_NOTIFY; } opts |= MACH_SEND_TIMEOUT; - if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) { + if (_dispatch_mach_send_priority_in_voucher() && + dmsg->dmsg_priority != _voucher_get_priority(voucher)) { ipc_kvoucher = _voucher_create_mach_voucher_with_priority( voucher, dmsg->dmsg_priority); } _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg); - if (ipc_kvoucher) { + if (_dispatch_mach_send_priority_in_voucher() && ipc_kvoucher) { kvoucher_move_send = true; clear_voucher = _voucher_mach_msg_set_mach_voucher(msg, ipc_kvoucher, kvoucher_move_send); } else { clear_voucher = _voucher_mach_msg_set(msg, voucher); } - if (qos) { - opts |= MACH_SEND_OVERRIDE; - msg_priority = (mach_msg_priority_t) - _dispatch_priority_compute_propagated( - _dispatch_qos_to_pp(qos), 0); - } + msg_priority = _dispatch_mach_send_priority(dmsg, qos, &opts); if (reply_port && dm->dm_strict_reply) { opts |= MACH_MSG_STRICT_REPLY; } @@ -1134,9 +1162,7 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, if (reply_port) _dispatch_debug_machport(reply_port); if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) { if (dwr->dwr_refs.dmr_reply_port_owned) { - if (_dispatch_use_mach_special_reply_port()) { - opts |= MACH_SEND_SYNC_OVERRIDE; - } + opts |= MACH_SEND_SYNC_OVERRIDE; _dispatch_clear_thread_reply_port(reply_port); } _dispatch_mach_reply_waiter_register(dm, dwr, reply_port, dmsg); @@ -1163,13 +1189,31 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, } } if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) { + if (msg->msgh_remote_port == MACH_PORT_DEAD) { + // It's possible that the remote port may have died after the + // attempt to enqueue the message timed out. In this case, the + // pseudo-receive will copy-out MOVE_SEND over the disposition and + // MACH_PORT_DEAD for the remote port name, without giving us a + // deadname ref for the send right name. + // + // When we next attempt to resend this message, we'll overwrite the + // remote port back to the channel send right. It is therefore + // crucial that we reset the disposition to COPY_SEND, since the ref + // the MOVE_SEND was referring to never actually arrived. + // + // rdar://77994175 + + msg->msgh_bits &= ~((mach_msg_bits_t)MACH_MSGH_BITS_REMOTE_MASK); + msg->msgh_bits |= MACH_MSG_TYPE_COPY_SEND; + } + if (opts & MACH_SEND_NOTIFY) { _dispatch_mach_notification_set_armed(dsrr); } else { // send kevent must be installed on the manager queue dm->dm_needs_mgr = true; } - if (ipc_kvoucher) { + if (_dispatch_mach_send_priority_in_voucher() && ipc_kvoucher) { _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher); voucher_t ipc_voucher; ipc_voucher = _voucher_create_with_priority_and_mach_voucher( @@ -1516,9 +1560,13 @@ _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, uint64_t old_state, new_state, state_flags = 0; struct dispatch_object_s *prev; dispatch_wakeup_flags_t wflags = 0; - bool is_send_barrier = (dou._dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)); + bool is_send_barrier = false; dispatch_tid owner; + if (_dispatch_object_has_vtable(dou._dc)) { + is_send_barrier = (dou._dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)); + } + // the send queue needs to retain // the mach channel if not empty, for the whole duration of this call // @@ -1784,8 +1832,7 @@ _dispatch_mach_send_msg_prepare(dispatch_mach_t dm, dmsg->dmsg_priority = 0; } else { unsigned int flags = DISPATCH_PRIORITY_PROPAGATE_CURRENT; - if ((options & DISPATCH_MACH_WAIT_FOR_REPLY) && - _dispatch_use_mach_special_reply_port()) { + if (options & DISPATCH_MACH_WAIT_FOR_REPLY) { // TODO: remove QoS contribution of sync IPC messages to send queue // rdar://31848737 flags |= DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC; @@ -1922,11 +1969,9 @@ _dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options); if (dwr->dwr_refs.dmr_reply_port_owned) { _dispatch_clear_thread_reply_port(reply_port); - if (_dispatch_use_mach_special_reply_port()) { - // link special reply port to send right for remote receive right - // TODO: extend to pre-connect phase - send = dm->dm_send_refs->dmsr_send; - } + // link special reply port to send right for remote receive right + // TODO: extend to pre-connect phase + send = dm->dm_send_refs->dmsr_send; } dmsg = _dispatch_mach_msg_reply_recv(dm, dwr, reply_port, send); #if DISPATCH_DEBUG @@ -2229,6 +2274,15 @@ _dispatch_mach_handoff_context(mach_port_t port) return dihc; } +bool +dispatch_mach_can_handoff_4libxpc(void) +{ + dispatch_thread_context_t dtc; + + dtc = _dispatch_thread_context_find(_dispatch_mach_msg_context_key); + return dtc && dtc->dtc_dmsg && dtc->dtc_dih->dih_dc.dc_other == NULL; +} + static void _dispatch_ipc_handoff_release(dispatch_ipc_handoff_t dih) { @@ -2500,7 +2554,7 @@ _dispatch_mach_barrier_set_vtable(dispatch_continuation_t dc, { dc->dc_data = (void *)dc->dc_flags; dc->dc_other = dm; - dc->do_vtable = vtable; // Must be after dc_flags load, dc_vtable aliases + dc->do_vtable = vtable; // Must be after dc_flags load, do_vtable aliases } DISPATCH_NOINLINE @@ -3050,6 +3104,36 @@ _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, _dispatch_continuation_free(dc); } +void +dispatch_mach_msg_get_filter_policy_id(dispatch_mach_msg_t msg, mach_msg_filter_id *filter_id) +{ + mach_msg_trailer_t *tlr = NULL; + mach_msg_mac_trailer_t *mac_tlr; + + if (!filter_id) { + DISPATCH_CLIENT_CRASH((uintptr_t)filter_id, "Filter id should be non-NULL"); + } + + mach_msg_header_t *hdr = dispatch_mach_msg_get_msg(msg, NULL); + if (!hdr) { + DISPATCH_CLIENT_CRASH((uintptr_t)msg, "Messsage should be non-NULL"); + } + tlr = (mach_msg_trailer_t *)((unsigned char *)hdr + + round_msg(hdr->msgh_size)); + + // The trailer should always be of format zero. + if (tlr->msgh_trailer_type != MACH_MSG_TRAILER_FORMAT_0) { + DISPATCH_INTERNAL_CRASH(tlr->msgh_trailer_type, "Trailer format is invalid"); + } + + if (tlr->msgh_trailer_size >= sizeof(mach_msg_mac_trailer_t)) { + mac_tlr = (mach_msg_mac_trailer_t *)tlr; + *filter_id = mac_tlr->msgh_ad; + } else { + DISPATCH_INTERNAL_CRASH(tlr->msgh_trailer_size, "Trailer doesn't contain filter policy id"); + } +} + #pragma mark - #pragma mark dispatch_mig_server @@ -3089,7 +3173,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback) { mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT - | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) + | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AV) | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER; mach_msg_options_t tmp_options; mig_reply_error_t *bufTemp, *bufRequest, *bufReply; @@ -3115,7 +3199,9 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, tmp_options = options; // XXX FIXME -- change this to not starve out the target queue for (;;) { - if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) { + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); + if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (dqf & DSF_CANCELED) || + (--cnt == 0)) { options &= ~MACH_RCV_MSG; tmp_options &= ~MACH_RCV_MSG; diff --git a/src/object.c b/src/object.c index 028800206..8fad3ebad 100644 --- a/src/object.c +++ b/src/object.c @@ -231,6 +231,7 @@ _dispatch_xref_dispose(dispatch_object_t dou) _dispatch_runloop_queue_xref_dispose(dou._dl); break; #endif + } } return _dispatch_release_tailcall(dou._os_obj); } @@ -250,7 +251,7 @@ _dispatch_dispose(dispatch_object_t dou) if (unlikely(tq && tq->dq_serialnum == DISPATCH_QUEUE_SERIAL_NUMBER_WLF)) { // the workloop fallback global queue is never serviced, so redirect // the finalizer onto a global queue - tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)->_as_dq; + tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, 0)->_as_dq; } dx_dispose(dou._do, &allow_free); @@ -314,6 +315,10 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq) if (tq == DISPATCH_TARGET_QUEUE_DEFAULT) { tq = _dispatch_get_default_queue(false); } + + if (_dispatch_queue_is_cooperative(tq)) { + DISPATCH_CLIENT_CRASH(tq, "Cannot target object to cooperative root queue - not implemented"); + } _dispatch_object_set_target_queue_inline(dou._do, tq); } diff --git a/src/object.m b/src/object.m index 936795871..273c5fa3f 100644 --- a/src/object.m +++ b/src/object.m @@ -382,13 +382,16 @@ - (void)_xref_dispose { @end -#define DISPATCH_CLASS_IMPL(name) \ +#define EMPTY_OS_OBJECT_CLASS_IMPL(name) \ OS_OBJECT_NONLAZY_CLASS \ - @implementation DISPATCH_CLASS(name) \ + @implementation name \ OS_OBJECT_NONLAZY_CLASS_LOAD \ DISPATCH_UNAVAILABLE_INIT() \ @end +#define DISPATCH_CLASS_IMPL(name) \ + EMPTY_OS_OBJECT_CLASS_IMPL(DISPATCH_CLASS(name)) + #if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA DISPATCH_CLASS_IMPL(data) #endif @@ -402,6 +405,7 @@ - (void)_xref_dispose { #if DISPATCH_USE_PTHREAD_ROOT_QUEUES DISPATCH_CLASS_IMPL(queue_pthread_root) #endif +DISPATCH_CLASS_IMPL(queue_cooperative) DISPATCH_CLASS_IMPL(queue_mgr) DISPATCH_CLASS_IMPL(queue_attr) DISPATCH_CLASS_IMPL(mach_msg) @@ -409,6 +413,67 @@ - (void)_xref_dispose { DISPATCH_CLASS_IMPL(operation) DISPATCH_CLASS_IMPL(disk) +#pragma mark os_workgroups + +@implementation OS_OBJECT_CLASS(os_workgroup) +DISPATCH_UNAVAILABLE_INIT() +OS_OBJECT_USES_XREF_DISPOSE() + +- (void)_xref_dispose { + _os_workgroup_xref_dispose(self); + [super _xref_dispose]; +} + +- (void) dealloc { + _os_workgroup_dispose(self); + [super dealloc]; +} + +- (NSString *) debugDescription { + Class nsstring = objc_lookUpClass("NSString"); + if (!nsstring) return nil; + char buf[2048]; + + os_workgroup_t wg = (os_workgroup_t) self; + _os_workgroup_debug(wg, buf, sizeof(buf)); + + return [nsstring stringWithUTF8String:buf]; +} +@end + +@implementation OS_OBJECT_CLASS(os_workgroup_interval) +DISPATCH_UNAVAILABLE_INIT() + +- (void) _xref_dispose { + _os_workgroup_interval_xref_dispose(self); + [super _xref_dispose]; +} + +- (void) dealloc { + _os_workgroup_interval_dispose(self); + [super dealloc]; +} +@end + +@implementation OS_OBJECT_CLASS(os_workgroup_parallel) +DISPATCH_UNAVAILABLE_INIT() +@end + +#pragma mark eventlink + +@implementation OS_OBJECT_CLASS(os_eventlink) +DISPATCH_UNAVAILABLE_INIT() + +- (void) dealloc { + _os_eventlink_dispose(self); + [super dealloc]; +} + +@end + + +#pragma mark vouchers + OS_OBJECT_NONLAZY_CLASS @implementation OS_OBJECT_CLASS(voucher) OS_OBJECT_NONLAZY_CLASS_LOAD @@ -540,6 +605,18 @@ - (NSString *)debugDescription { } #endif // HAVE_MACH +#undef _dispatch_client_callout3_a +void +_dispatch_client_callout3_a(void *ctxt, size_t i, size_t w, dispatch_apply_attr_function_t f) +{ + @try { + return f(ctxt, i, w); + } + @catch (...) { + objc_terminate(); + } +} + #endif // DISPATCH_USE_CLIENT_CALLOUT #endif // USE_OBJC diff --git a/src/object_internal.h b/src/object_internal.h index e82c469e7..f11b9c66c 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -178,10 +178,12 @@ #define DISPATCH_OBJECT_VTABLE_HEADER(x) \ unsigned long const do_type; \ - void (*const do_dispose)(struct x##_s *, bool *allow_free); \ - size_t (*const do_debug)(struct x##_s *, char *, size_t); \ - void (*const do_invoke)(struct x##_s *, dispatch_invoke_context_t, \ - dispatch_invoke_flags_t) + void DISPATCH_VTABLE_ENTRY(do_dispose)(struct x##_s *, \ + bool *allow_free); \ + size_t DISPATCH_VTABLE_ENTRY(do_debug)(struct x##_s *, \ + char *, size_t); \ + void DISPATCH_VTABLE_ENTRY(do_invoke)(struct x##_s *, \ + dispatch_invoke_context_t, dispatch_invoke_flags_t) #else #define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, ctype, ...) \ OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(dispatch_##name, dispatch_##ctype, \ @@ -191,19 +193,21 @@ #define DISPATCH_OBJECT_VTABLE_HEADER(x) \ unsigned long const do_type; \ const char *const do_kind; \ - void (*const do_dispose)(struct x##_s *, bool *allow_free); \ - size_t (*const do_debug)(struct x##_s *, char *, size_t); \ - void (*const do_invoke)(struct x##_s *, dispatch_invoke_context_t, \ - dispatch_invoke_flags_t) + void DISPATCH_VTABLE_ENTRY(do_dispose)(struct x##_s *, \ + bool *allow_free); \ + size_t DISPATCH_VTABLE_ENTRY(do_debug)(struct x##_s *, \ + char *, size_t); \ + void DISPATCH_VTABLE_ENTRY(do_invoke)(struct x##_s *, \ + dispatch_invoke_context_t, dispatch_invoke_flags_t) #endif #define DISPATCH_QUEUE_VTABLE_HEADER(x); \ DISPATCH_OBJECT_VTABLE_HEADER(x); \ - void (*const dq_activate)(dispatch_queue_class_t); \ - void (*const dq_wakeup)(dispatch_queue_class_t, dispatch_qos_t, \ - dispatch_wakeup_flags_t); \ - void (*const dq_push)(dispatch_queue_class_t, dispatch_object_t, \ - dispatch_qos_t) + void DISPATCH_VTABLE_ENTRY(dq_activate)(dispatch_queue_class_t); \ + void DISPATCH_VTABLE_ENTRY(dq_wakeup)(dispatch_queue_class_t, \ + dispatch_qos_t, dispatch_wakeup_flags_t); \ + void DISPATCH_VTABLE_ENTRY(dq_push)(dispatch_queue_class_t, \ + dispatch_object_t, dispatch_qos_t) #define dx_vtable(x) (&(x)->do_vtable->_os_obj_vtable) #define dx_type(x) dx_vtable(x)->do_type @@ -281,14 +285,22 @@ typedef struct dispatch_invoke_context_s { #if DISPATCH_USE_WORKQUEUE_NARROWING #define DISPATCH_THREAD_IS_NARROWING 1 -#define dispatch_with_disabled_narrowing(dic, ...) ({ \ +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE +#define dispatch_with_disabled_narrowing(dic, flags, ...) ({ \ + flags |= DISPATCH_INVOKE_DISABLED_NARROWING; \ + __VA_ARGS__; \ + flags &= ~DISPATCH_INVOKE_DISABLED_NARROWING; \ +}) +#else /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ +#define dispatch_with_disabled_narrowing(dic, flags, ...) ({ \ uint64_t suspend_narrow_check = dic->dic_next_narrow_check; \ dic->dic_next_narrow_check = 0; \ __VA_ARGS__; \ dic->dic_next_narrow_check = suspend_narrow_check; \ }) +#endif /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ #else -#define dispatch_with_disabled_narrowing(dic, ...) __VA_ARGS__ +#define dispatch_with_disabled_narrowing(dic, flags, ...) __VA_ARGS__ #endif DISPATCH_OPTIONS(dispatch_invoke_flags, uint32_t, @@ -342,11 +354,15 @@ DISPATCH_OPTIONS(dispatch_invoke_flags, uint32_t, // The queue at the bottom of this drain is a workloop that supports // reordering. // + // @const DISPATCH_INVOKE_COOPERATIVE_DRAIN + // The queue at the bottom of this drain is a cooperative global queue + // DISPATCH_INVOKE_WORKER_DRAIN = 0x00010000, DISPATCH_INVOKE_REDIRECTING_DRAIN = 0x00020000, DISPATCH_INVOKE_MANAGER_DRAIN = 0x00040000, DISPATCH_INVOKE_THREAD_BOUND = 0x00080000, DISPATCH_INVOKE_WORKLOOP_DRAIN = 0x00100000, + DISPATCH_INVOKE_COOPERATIVE_DRAIN = 0x00200000, #define _DISPATCH_INVOKE_DRAIN_MODE_MASK 0x00ff0000u // Autoreleasing modes @@ -360,6 +376,10 @@ DISPATCH_OPTIONS(dispatch_invoke_flags, uint32_t, DISPATCH_INVOKE_AUTORELEASE_ALWAYS = 0x01000000, DISPATCH_INVOKE_AUTORELEASE_NEVER = 0x02000000, #define _DISPATCH_INVOKE_AUTORELEASE_MASK 0x03000000u + + // @const DISPATCH_INVOKE_DISABLED_NARROWING + // Don't check for narrowing during this invoke + DISPATCH_INVOKE_DISABLED_NARROWING = 0x4000000, ); DISPATCH_OPTIONS(dispatch_object_flags, unsigned long, @@ -370,11 +390,12 @@ DISPATCH_OPTIONS(dispatch_object_flags, unsigned long, _DISPATCH_OBJECT_CLUSTER = 0x00000000, // dispatch object cluster _DISPATCH_CONTINUATION_TYPE = 0x00000000, // meta-type for continuations - _DISPATCH_SEMAPHORE_TYPE = 0x00000001, // meta-type for semaphores - _DISPATCH_NODE_TYPE = 0x00000002, // meta-type for data node - _DISPATCH_IO_TYPE = 0x00000003, // meta-type for io channels - _DISPATCH_OPERATION_TYPE = 0x00000004, // meta-type for io operations - _DISPATCH_DISK_TYPE = 0x00000005, // meta-type for io disks + _DISPATCH_SWIFT_JOB_TYPE = 0x00000001, // meta-type for swift jobs + _DISPATCH_SEMAPHORE_TYPE = 0x00000002, // meta-type for semaphores + _DISPATCH_NODE_TYPE = 0x00000003, // meta-type for data node + _DISPATCH_IO_TYPE = 0x00000004, // meta-type for io channels + _DISPATCH_OPERATION_TYPE = 0x00000005, // meta-type for io operations + _DISPATCH_DISK_TYPE = 0x00000006, // meta-type for io disks _DISPATCH_QUEUE_CLUSTER = 0x00000010, // dispatch queue cluster _DISPATCH_LANE_TYPE = 0x00000011, // meta-type for lanes @@ -403,6 +424,8 @@ DISPATCH_OPTIONS(dispatch_object_flags, unsigned long, DISPATCH_OPERATION_TYPE = DISPATCH_OBJECT_SUBTYPE(0, OPERATION), DISPATCH_DISK_TYPE = DISPATCH_OBJECT_SUBTYPE(0, DISK), + DISPATCH_SWIFT_JOB_TYPE = DISPATCH_OBJECT_SUBTYPE(0, SWIFT_JOB), + DISPATCH_QUEUE_SERIAL_TYPE = DISPATCH_OBJECT_SUBTYPE(1, LANE), DISPATCH_QUEUE_CONCURRENT_TYPE = DISPATCH_OBJECT_SUBTYPE(2, LANE), DISPATCH_QUEUE_GLOBAL_ROOT_TYPE = DISPATCH_OBJECT_SUBTYPE(3, LANE) | @@ -417,6 +440,8 @@ DISPATCH_OPTIONS(dispatch_object_flags, unsigned long, _DISPATCH_QUEUE_BASE_TYPEFLAG | _DISPATCH_NO_CONTEXT_TYPEFLAG, DISPATCH_QUEUE_NETWORK_EVENT_TYPE = DISPATCH_OBJECT_SUBTYPE(8, LANE) | _DISPATCH_QUEUE_BASE_TYPEFLAG, + DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE= DISPATCH_OBJECT_SUBTYPE(9, LANE) | + _DISPATCH_QUEUE_ROOT_TYPEFLAG | _DISPATCH_NO_CONTEXT_TYPEFLAG, DISPATCH_WORKLOOP_TYPE = DISPATCH_OBJECT_SUBTYPE(0, WORKLOOP) | _DISPATCH_QUEUE_BASE_TYPEFLAG, @@ -432,7 +457,7 @@ typedef struct _os_object_vtable_s { typedef struct _os_object_s { _OS_OBJECT_HEADER( - const _os_object_vtable_s *os_obj_isa, + const _os_object_vtable_s *__ptrauth_objc_isa_pointer os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); } _os_object_s; @@ -447,18 +472,25 @@ typedef struct _os_object_s { #else #define OS_OBJECT_STRUCT_HEADER(x) \ _OS_OBJECT_HEADER(\ - const struct x##_vtable_s *do_vtable, \ + const struct x##_vtable_s *__ptrauth_objc_isa_pointer do_vtable, \ do_ref_cnt, \ do_xref_cnt) #endif -#define _DISPATCH_OBJECT_HEADER(x) \ +#define _DISPATCH_OBJECT_HEADER_INTERNAL(x) \ struct _os_object_s _as_os_obj[0]; \ OS_OBJECT_STRUCT_HEADER(dispatch_##x); \ - struct dispatch_##x##_s *volatile do_next; \ + struct dispatch_##x##_s *volatile do_next; + + +#define _DISPATCH_OBJECT_HEADER(x) \ + _DISPATCH_OBJECT_HEADER_INTERNAL(x) \ struct dispatch_queue_s *do_targetq; \ void *do_ctxt; \ - void *do_finalizer + union { \ + dispatch_function_t DISPATCH_FUNCTION_POINTER do_finalizer; \ + void *do_introspection_ctxt; \ + } #define DISPATCH_OBJECT_HEADER(x) \ struct dispatch_object_s _as_do[0]; \ @@ -533,7 +565,7 @@ OS_OBJECT_OBJC_CLASS_DECL(object); // This is required by the dispatch_data_t/NSData bridging, which is not // supported on the old runtime. #define DISPATCH_OBJECT_TFB(f, o, ...) \ - if (unlikely(((uintptr_t)((o)._os_obj->os_obj_isa) & 1) || \ + if (unlikely(((*(uintptr_t *)&((o)._os_obj->os_obj_isa)) & 1) || \ (Class)((o)._os_obj->os_obj_isa) < \ (Class)OS_OBJECT_VTABLE(dispatch_object) || \ (Class)((o)._os_obj->os_obj_isa) >= \ @@ -589,7 +621,7 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); */ #define _os_atomic_refcnt_perform2o(o, f, op, n, m) ({ \ __typeof__(o) _o = (o); \ - int _ref_cnt = _o->f; \ + int _ref_cnt = os_atomic_load(&_o->f, relaxed); \ if (likely(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \ _ref_cnt = os_atomic_##op##2o(_o, f, n, m); \ } \ diff --git a/src/queue.c b/src/queue.c index 67a57406f..44cdb4aa5 100644 --- a/src/queue.c +++ b/src/queue.c @@ -136,9 +136,14 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, // it from the defaultpri, see _dispatch_priority_compute_update pp |= (_dispatch_get_basepri() & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + + // TODO (rokhinip): Right now there is no binding and unbinding + // to a kqueue for a cooperative thread. We'll need to do this + // right once we get that support } else { - // else we need to keep the one that is set in the current pri - pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + // else we need to keep the overcommit/cooperative one that is set on the current + // thread + pp |= (old_pri & _PTHREAD_PRIORITY_THREAD_TYPE_MASK); } if (likely(old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { pflags |= _PTHREAD_SET_SELF_QOS_FLAG; @@ -302,6 +307,22 @@ _dispatch_block_flags_valid(dispatch_block_flags_t flags) return ((flags & ~DISPATCH_BLOCK_API_MASK) == 0); } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_block_remember_async_queue(dispatch_block_private_data_t dbpd, + dispatch_queue_t dq) +{ + // balanced in d_block_sync_invoke or d_block_wait + // + // Note: we need to retain _before_ we publish it, + // because dispatch_block_wait() will eagerly + // consume the refcounts. + _dispatch_retain_2(dq); + if (!os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { + _dispatch_release_2(dq); + } +} + DISPATCH_ALWAYS_INLINE static inline dispatch_block_flags_t _dispatch_block_normalize_flags(dispatch_block_flags_t flags) @@ -666,10 +687,7 @@ _dispatch_continuation_init_slow(dispatch_continuation_t dc, uintptr_t dc_flags = dc->dc_flags; pthread_priority_t pp = 0; - // balanced in d_block_async_invoke_and_release or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain_2(dq); - } + _dispatch_block_remember_async_queue(dbpd, dq); if (dc_flags & DC_FLAG_CONSUME) { dc->dc_func = _dispatch_block_async_invoke_and_release; @@ -1066,9 +1084,10 @@ _dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq, // these bits should be set if the lock was never contended/discovered. const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK | DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY | - DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER | + DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; uint64_t old_state, new_state; + dispatch_wakeup_flags_t flags = 0; // similar to _dispatch_queue_drain_try_unlock os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { @@ -1077,7 +1096,7 @@ _dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq, new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; if (unlikely(old_state & fail_unlock_mask)) { os_atomic_rmw_loop_give_up({ - return _dispatch_lane_barrier_complete(dq, 0, 0); + return _dispatch_lane_barrier_complete(dq, 0, flags); }); } }); @@ -1107,7 +1126,6 @@ _dispatch_waiter_wake(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, { dispatch_wlh_t waiter_wlh = dsc->dc_data; -#if DISPATCH_USE_KEVENT_WORKLOOP // // We need to interact with a workloop if any of the following 3 cases: // 1. the current owner of the lock has a SYNC_WAIT knote to destroy @@ -1120,10 +1138,9 @@ _dispatch_waiter_wake(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, // without pushing (waiter_wlh == DISPATCH_WLH_ANON), in which case the next // owner is really woken up when the thread event is signaled. // -#endif - if (_dq_state_in_sync_transfer(old_state) || - _dq_state_in_sync_transfer(new_state) || - (waiter_wlh != DISPATCH_WLH_ANON)) { + if ((_dq_state_is_base_wlh(old_state) && !dsc->dsc_from_async) || + _dq_state_is_base_wlh(new_state) || + waiter_wlh != DISPATCH_WLH_ANON) { _dispatch_event_loop_wake_owner(dsc, wlh, old_state, new_state); } if (unlikely(waiter_wlh == DISPATCH_WLH_ANON)) { @@ -1253,9 +1270,7 @@ _dispatch_barrier_waiter_redirect_or_wake(dispatch_queue_class_t dqu, } // passing the QoS of `dq` helps pushing on low priority waiters with // legacy workloops. -#if DISPATCH_INTROSPECTION dsc->dsc_from_async = false; -#endif return dx_push(tq, dsc, _dq_state_max_qos(old_state)); } @@ -1290,13 +1305,18 @@ _dispatch_lane_drain_barrier_waiter(dispatch_lane_t dq, transfer_lock_again: os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { + _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dq); + _dispatch_queue_move_to_contended_sync(dq->_as_dq); + os_atomic_rmw_loop_give_up(goto transfer_lock_again); + } + new_state = old_state; new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; new_state &= ~DISPATCH_QUEUE_DIRTY; new_state |= next_owner; if (_dq_state_is_base_wlh(old_state)) { - new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; if (next_dc) { // we know there's a next item, keep the enqueued bit if any } else if (unlikely(_dq_state_is_dirty(old_state))) { @@ -1338,7 +1358,13 @@ _dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, enqueue = 0; } +again: os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { + _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dq); + _dispatch_queue_move_to_contended_sync(dq->_as_dq); + os_atomic_rmw_loop_give_up(goto again); + } new_state = _dq_state_merge_qos(old_state - owned, qos); new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; if (unlikely(_dq_state_is_suspended(old_state))) { @@ -1385,8 +1411,7 @@ _dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, dispatch_assert(!_dq_state_is_enqueued_on_manager(new_state)); if (_dq_state_is_enqueued_on_target(old_state) || _dq_state_is_enqueued_on_target(new_state) || - _dq_state_received_sync_wait(old_state) || - _dq_state_in_sync_transfer(old_state)) { + !_dq_state_in_uncontended_sync(old_state)) { return _dispatch_event_loop_end_ownership((dispatch_wlh_t)dq, old_state, new_state, flags); } @@ -1560,11 +1585,8 @@ _dispatch_wait_prepare(dispatch_queue_t dq) os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { if (_dq_state_is_suspended(old_state) || - !_dq_state_is_base_wlh(old_state)) { - os_atomic_rmw_loop_give_up(return old_state); - } - if (!_dq_state_drain_locked(old_state) || - _dq_state_in_sync_transfer(old_state)) { + !_dq_state_is_base_wlh(old_state) || + !_dq_state_in_uncontended_sync(old_state)) { os_atomic_rmw_loop_give_up(return old_state); } new_state = old_state | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; @@ -1641,13 +1663,19 @@ __DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq) (uint8_t)_dispatch_get_basepri_override_qos_floor(); _dispatch_thread_event_init(&dsc->dsc_event); } + + _dispatch_set_current_dsc((void *) dsc); dx_push(dq, dsc, _dispatch_qos_from_pp(dsc->dc_priority)); + _dispatch_trace_runtime_event(sync_wait, dq, 0); if (dsc->dc_data == DISPATCH_WLH_ANON) { _dispatch_thread_event_wait(&dsc->dsc_event); // acquire - } else { + } else if (!dsc->dsc_wlh_self_wakeup) { _dispatch_event_loop_wait_for_ownership(dsc); } + + _dispatch_clear_current_dsc(); + if (dsc->dc_data == DISPATCH_WLH_ANON) { _dispatch_thread_event_destroy(&dsc->dsc_event); // If _dispatch_sync_waiter_wake() gave this thread an override, @@ -1892,10 +1920,8 @@ _dispatch_sync_block_with_privdata(dispatch_queue_t dq, dispatch_block_t work, } ov = _dispatch_set_priority_and_voucher(p, v, 0); - // balanced in d_block_sync_invoke or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain_2(dq); - } + _dispatch_block_remember_async_queue(dbpd, dq); + if (dc_flags & DC_FLAG_BARRIER) { _dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke, dc_flags); @@ -2173,10 +2199,7 @@ _dispatch_async_and_wait_block_with_privdata(dispatch_queue_t dq, v = _voucher_get(); } - // balanced in d_block_sync_invoke or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain_2(dq); - } + _dispatch_block_remember_async_queue(dbpd, dq); dispatch_tid tid = _dispatch_tid_self(); struct dispatch_sync_context_s dsc = { @@ -2449,6 +2472,11 @@ _dispatch_lane_inherit_wlh_from_target(dispatch_lane_t dq, dispatch_queue_t tq) { uint64_t old_state, new_state, role; + /* TODO (rokhinip): We're going to have to change this in the future when we + * allow targetting queues to a cooperative pool and need to figure out what + * kind of a role that gives the queue */ + dispatch_assert(!_dispatch_queue_is_cooperative(tq)); + if (!dx_hastypeflag(tq, QUEUE_ROOT)) { role = DISPATCH_QUEUE_ROLE_INNER; } else if (_dispatch_base_lane_is_wlh(dq, tq)) { @@ -2556,7 +2584,7 @@ _dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, rqp &= DISPATCH_PRIORITY_REQUESTED_MASK; if (p < rqp) p = rqp; - p |= (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + p |= (tq->dq_priority & DISPATCH_PRIORITY_THREAD_TYPE_MASK); if ((dpri & DISPATCH_PRIORITY_FLAG_FLOOR) || !(dpri & DISPATCH_PRIORITY_REQUESTED_MASK)) { p |= (dpri & DISPATCH_PRIORITY_FLAG_FLOOR); @@ -2573,19 +2601,6 @@ _dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, return DISPATCH_PRIORITY_FLAG_MANAGER; } -DISPATCH_ALWAYS_INLINE -static void -_dispatch_queue_setter_assert_inactive(dispatch_queue_class_t dq) -{ - uint64_t dq_state = os_atomic_load2o(dq._dq, dq_state, relaxed); - if (likely(dq_state & DISPATCH_QUEUE_INACTIVE)) return; -#if DISPATCH_SIZEOF_PTR == 4 - dq_state >>= 32; -#endif - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "dispatch queue/source property setter called after activation"); -} - DISPATCH_ALWAYS_INLINE static void _dispatch_workloop_attributes_alloc_if_needed(dispatch_workloop_t dwl) @@ -2673,8 +2688,12 @@ _dispatch_queue_priority_inherit_from_target(dispatch_lane_class_t dq, if (_dispatch_is_in_root_queues_array(tq)) { dispatch_qos_t qos = _dispatch_priority_qos(pri); if (!qos) qos = DISPATCH_QOS_DEFAULT; - tq = _dispatch_get_root_queue(qos, - pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)->_as_dq; + + // TODO (rokhinip): In future, might want to consider whether dq + // itself might be tagged cooperative and therefore we need to + // adjust tq accordingly + uintptr_t flags = (pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? DISPATCH_QUEUE_OVERCOMMIT : 0; + tq = _dispatch_get_root_queue(qos, flags)->_as_dq; } return tq; } @@ -2740,6 +2759,8 @@ _dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa, qos = _dispatch_priority_qos(tq->dq_priority); } tq = NULL; + } else if (tq && _dispatch_queue_is_cooperative(tq)) { + DISPATCH_CLIENT_CRASH(tq, "Cannot target object to cooperative root queue - not implemented"); } else if (tq && !tq->do_targetq) { // target is a pthread or runloop root queue, setting QoS or overcommit // is disallowed @@ -2756,9 +2777,10 @@ _dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa, } } if (!tq) { + uintptr_t flags = (overcommit == _dispatch_queue_attr_overcommit_enabled) ? DISPATCH_QUEUE_OVERCOMMIT : 0; tq = _dispatch_get_root_queue( qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos, - overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq; + flags)->_as_dq; if (unlikely(!tq)) { DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute"); } @@ -2902,7 +2924,7 @@ _dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, "Release of a locked queue"); } -#ifndef __LP64__ +#if DISPATCH_SIZEOF_PTR == 4 orig_dq_state >>= 32; #endif DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, @@ -3051,7 +3073,7 @@ _dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; uint64_t set_owner_and_set_full_width_and_in_barrier = _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | - DISPATCH_QUEUE_IN_BARRIER; + DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_UNCONTENDED_SYNC; // backward compatibility: only dispatch sources can abuse // dispatch_resume() to really mean dispatch_activate() @@ -3207,8 +3229,6 @@ _dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) } return _dispatch_release_2(dq); } - dispatch_assert(!_dq_state_received_sync_wait(old_state)); - dispatch_assert(!_dq_state_in_sync_transfer(old_state)); return dx_wakeup(dq, _dq_state_max_qos(old_state), flags); over_resume: @@ -3524,11 +3544,13 @@ _dispatch_poll_for_events_4launchd(void) } #if DISPATCH_USE_WORKQUEUE_NARROWING + +#if !DISPATCH_USE_COOPERATIVE_WORKQUEUE DISPATCH_STATIC_GLOBAL(os_atomic(uint64_t) _dispatch_narrowing_deadlines[DISPATCH_QOS_NBUCKETS]); #if !DISPATCH_TIME_UNIT_USES_NANOSECONDS DISPATCH_STATIC_GLOBAL(uint64_t _dispatch_narrow_check_interval_cache); -#endif +#endif /* !DISPATCH_TIME_UNIT_USES_NANOSECONDS */ DISPATCH_ALWAYS_INLINE static inline uint64_t @@ -3567,7 +3589,7 @@ _dispatch_queue_drain_should_narrow_slow(uint64_t now, if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) { DISPATCH_CLIENT_CRASH(pp, "Thread QoS corruption"); } - size_t idx = DISPATCH_QOS_BUCKET(qos); + int idx = DISPATCH_QOS_BUCKET(qos); os_atomic(uint64_t) *deadline = &_dispatch_narrowing_deadlines[idx]; uint64_t oldval, newval = now + _dispatch_narrow_check_interval(); @@ -3599,9 +3621,50 @@ _dispatch_queue_drain_should_narrow(dispatch_invoke_context_t dic) } return false; } + +bool +dispatch_swift_job_should_yield(void) +{ + return false; +} + +#else /* !DISPATCH_USE_COOPERATIVE_WORKQUEUE */ + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_drain_should_narrow(dispatch_invoke_context_t __unused dic) +{ + uint64_t quantum_expiry_action = _dispatch_get_quantum_expiry_action(); + return (quantum_expiry_action & PTHREAD_WQ_QUANTUM_EXPIRY_NARROW) != 0; +} +#define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) + +bool +dispatch_swift_job_should_yield(void) +{ + uint64_t quantum_expiry_action = _dispatch_get_quantum_expiry_action(); + /* We want to return true here regardless of what the quantum expiry action + * is. There will be specific logic in root queue drain to handle the + * various specific reasons. + * + * TODO (rokhinip): There is room for some potential optmization to return + * false here if there is nothing else enqueued on the root queue we're + * draining + */ + return quantum_expiry_action != 0; +} + +#endif /* !DISPATCH_USE_COOPERATIVE_WORKQUEUE */ + #else #define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) #define _dispatch_queue_drain_should_narrow(dic) false + +bool +dispatch_swift_job_should_yield(void) +{ + return false; +} #endif /* @@ -3671,7 +3734,8 @@ _dispatch_lane_drain(dispatch_lane_t dq, dispatch_invoke_context_t dic, if (unlikely(serial_drain != (dq->dq_width == 1))) { break; } - if (unlikely(_dispatch_queue_drain_should_narrow(dic))) { + if (unlikely(!(flags & DISPATCH_INVOKE_DISABLED_NARROWING) && + _dispatch_queue_drain_should_narrow(dic))) { break; } if (likely(flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { @@ -3796,13 +3860,11 @@ _dispatch_queue_invoke_finish(dispatch_queue_t dq, struct dispatch_object_s *dc = dic->dic_barrier_waiter; dispatch_qos_t qos = dic->dic_barrier_waiter_bucket; if (dc) { + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; + dsc->dsc_from_async = true; dic->dic_barrier_waiter = NULL; dic->dic_barrier_waiter_bucket = DISPATCH_QOS_UNSPECIFIED; owned &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; -#if DISPATCH_INTROSPECTION - dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; - dsc->dsc_from_async = true; -#endif if (qos) { return _dispatch_workloop_drain_barrier_waiter(upcast(dq)._dwl, dc, qos, DISPATCH_WAKEUP_CONSUME_2, owned); @@ -3994,6 +4056,9 @@ static void _dispatch_workloop_attributes_dispose(dispatch_workloop_t dwl) { if (dwl->dwl_attr) { + if (dwl->dwl_attr->workgroup) { + _os_object_release(dwl->dwl_attr->workgroup->_as_os_obj); + } free(dwl->dwl_attr); } } @@ -4062,6 +4127,22 @@ dispatch_workloop_set_qos_class_floor(dispatch_workloop_t dwl, #endif // TARGET_OS_MAC } +void +dispatch_workloop_set_os_workgroup(dispatch_workloop_t dwl, os_workgroup_t wg) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + os_workgroup_t old_wg = dwl->dwl_attr->workgroup; + if (old_wg) { + _os_object_release(old_wg->_as_os_obj); + } + + /* Take an external ref count on the workgroup */ + _os_object_retain(wg->_as_os_obj); + dwl->dwl_attr->workgroup = wg; +} + void dispatch_workloop_set_qos_class(dispatch_workloop_t dwl, qos_class_t cls, uint64_t flags) @@ -4108,8 +4189,11 @@ _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, uint64_t old_state, new_state; dispatch_queue_global_t dprq; +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated" dprq = dispatch_pthread_root_queue_create( "com.apple.libdispatch.workloop_fallback", 0, attr, NULL); +#pragma clang diagnostic pop dwl->do_targetq = dprq->_as_dq; _dispatch_retain(dprq); @@ -4120,18 +4204,6 @@ _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, new_state |= DISPATCH_QUEUE_ROLE_BASE_ANON; }); } - -static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { - DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), - .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, - .do_ctxt = NULL, - .dq_label = "com.apple.root.workloop-custom", - .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), - .dq_priority = _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT) | - DISPATCH_PRIORITY_SATURATED_OVERRIDE, - .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, - .dgq_thread_pool_size = 1, -}; #endif // TARGET_OS_MAC static void @@ -4155,6 +4227,17 @@ _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) dwl->do_targetq = (dispatch_queue_t)_dispatch_custom_workloop_root_queue._as_dq; } + + if (dwla->workgroup != NULL) { + // _dispatch_async_and_wait_should_always_async detects when a queue + // targets a root queue that is not part of the root queues array in + // order to force async_and_wait to async. We want this path to always + // be taken on workloops that have an associated workgroup with them + // because there is no easy way to join and leave a workgroup for just a + // single block + dwl->do_targetq = + (dispatch_queue_t)_dispatch_custom_workloop_root_queue._as_dq; + } if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_POLICY) { pthread_attr_setschedpolicy(&attr, dwla->dwla_policy); } @@ -4395,13 +4478,17 @@ _dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, } os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { + _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dwl); + _dispatch_queue_move_to_contended_sync(dwl->_as_dq); + os_atomic_rmw_loop_give_up(goto transfer_lock_again); + } new_state = old_state; new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; new_state &= ~DISPATCH_QUEUE_DIRTY; new_state |= next_owner; if (likely(_dq_state_is_base_wlh(old_state))) { - new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; if (has_more_work) { // we know there's a next item, keep the enqueued bit if any } else if (unlikely(_dq_state_is_dirty(old_state))) { @@ -4452,7 +4539,13 @@ _dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, uint64_t old_state, new_state; +transfer_lock_again: os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { + _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dwl); + _dispatch_queue_move_to_contended_sync(dwl->_as_dq); + os_atomic_rmw_loop_give_up(goto transfer_lock_again); + } new_state = _dq_state_merge_qos(old_state, qos); new_state -= DISPATCH_QUEUE_IN_BARRIER; new_state -= DISPATCH_QUEUE_WIDTH_INTERVAL; @@ -4496,8 +4589,7 @@ _dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, dispatch_assert(!_dq_state_is_enqueued_on_manager(new_state)); if (_dq_state_is_enqueued_on_target(old_state) || _dq_state_is_enqueued_on_target(new_state) || - _dq_state_received_sync_wait(old_state) || - _dq_state_in_sync_transfer(old_state)) { + !_dq_state_in_uncontended_sync(old_state)) { return _dispatch_event_loop_end_ownership((dispatch_wlh_t)dwl, old_state, new_state, flags); } @@ -4588,6 +4680,9 @@ _dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); if (_dq_state_max_qos(new_state)) { + // We need to make sure we have the enqueued bit when we are making + // the syscall to update QoS and we know that we will do it since + // we're at the base anyways new_state |= DISPATCH_QUEUE_ENQUEUED; } if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { @@ -4636,7 +4731,8 @@ _dispatch_workloop_push_waiter(dispatch_workloop_t dwl, uint64_t set_owner_and_set_full_width_and_in_barrier = _dispatch_lock_value_for_self() | - DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | + DISPATCH_QUEUE_UNCONTENDED_SYNC; uint64_t old_state, new_state; os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { @@ -4659,14 +4755,30 @@ _dispatch_workloop_push_waiter(dispatch_workloop_t dwl, dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; } - dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); + if (_dq_state_is_base_wlh(new_state) && dsc->dc_data != DISPATCH_WLH_ANON) { + dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); + } if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { - return _dispatch_workloop_barrier_complete(dwl, qos, 0); + dispatch_wakeup_flags_t flags = 0; + // We came here from __DISPATCH_WAIT_FOR_QUEUE__, if the element + // we pushed is still at the head, we can cheat, dequeue everything, + // and keep pretending we weren't contended. + if (dsc->dsc_wlh_was_first && _dispatch_workloop_get_head(dwl, qos) == dc) { + dsc->dsc_wlh_self_wakeup = true; + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects + // dc_other to be the bottom queue of the graph + dsc->dc_other = dwl; + } + _dispatch_workloop_pop_head(dwl, qos, dc); + return; + } + return _dispatch_workloop_barrier_complete(dwl, qos, flags); } #if HAVE_PTHREAD_WORKQUEUE_QOS if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - if (_dq_state_should_override(new_state)) { + if (_dq_state_should_override_for_waiter(new_state)) { return _dispatch_queue_wakeup_with_override(dwl, new_state, 0); } } @@ -4722,7 +4834,12 @@ _dispatch_queue_override_invoke(dispatch_continuation_t dc, } _dispatch_continuation_pop_forwarded(dc, dc_flags, assumed_rq, { if (_dispatch_object_has_vtable(dou._do)) { - dx_invoke(dou._dq, dic, flags); + if (dx_type(dou._do) == DISPATCH_SWIFT_JOB_TYPE) { + dx_invoke(dou._dsjc, NULL, + _dispatch_invoke_flags_to_swift_invoke_flags(flags)); + } else { + dx_invoke(dou._dq, dic, flags); + } } else { _dispatch_continuation_invoke_inline(dou, flags, assumed_rq); } @@ -4750,8 +4867,13 @@ static void _dispatch_root_queue_push_override(dispatch_queue_global_t orig_rq, dispatch_object_t dou, dispatch_qos_t qos) { - bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit); + uintptr_t flags = 0; + if (orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { + flags |= DISPATCH_QUEUE_OVERCOMMIT; + } else if (_dispatch_queue_is_cooperative(orig_rq)) { + flags |= DISPATCH_QUEUE_COOPERATIVE; + } + dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, flags); dispatch_continuation_t dc = dou._dc; if (_dispatch_object_is_redirection(dc)) { @@ -4775,8 +4897,13 @@ static void _dispatch_root_queue_push_override_stealer(dispatch_queue_global_t orig_rq, dispatch_queue_t dq, dispatch_qos_t qos) { - bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit); + uintptr_t flags = 0; + if (orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { + flags |= DISPATCH_QUEUE_OVERCOMMIT; + } else if (_dispatch_queue_is_cooperative(orig_rq)) { + flags |= DISPATCH_QUEUE_COOPERATIVE; + } + dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, flags); dispatch_continuation_t dc = _dispatch_continuation_alloc(); dc->do_vtable = DC_VTABLE(OVERRIDE_STEALING); @@ -4924,6 +5051,7 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) { dispatch_queue_t dq = dqu._dq; + uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED; dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT); if (target && !(flags & DISPATCH_WAKEUP_CONSUME_2)) { @@ -4949,7 +5077,6 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, } if (target) { - uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED; if (target == DISPATCH_QUEUE_WAKEUP_MGR) { enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; } @@ -4971,8 +5098,9 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, if (likely(!_dq_state_is_suspended(new_state) && !_dq_state_is_enqueued(old_state) && (!_dq_state_drain_locked(old_state) || - (enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR && - _dq_state_is_base_wlh(old_state))))) { + enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR))) { + // Always set the enqueued bit for async enqueues on all queues + // in the hierachy new_state |= enqueue; } if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { @@ -4981,37 +5109,11 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, os_atomic_rmw_loop_give_up(goto done); } }); - - if (likely((old_state ^ new_state) & enqueue)) { - dispatch_queue_t tq; - if (target == DISPATCH_QUEUE_WAKEUP_TARGET) { - // the rmw_loop above has no acquire barrier, as the last block - // of a queue asyncing to that queue is not an uncommon pattern - // and in that case the acquire would be completely useless - // - // so instead use depdendency ordering to read - // the targetq pointer. - os_atomic_thread_fence(dependency); - tq = os_atomic_load_with_dependency_on2o(dq, do_targetq, - (long)new_state); - } else { - tq = target; - } - dispatch_assert(_dq_state_is_enqueued(new_state)); - return _dispatch_queue_push_queue(tq, dq, new_state); - } #if HAVE_PTHREAD_WORKQUEUE_QOS - if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dq, new_state, - flags); - } - } } else if (qos) { // // Someone is trying to override the last work item of the queue. // - uint64_t old_state, new_state; os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { // Avoid spurious override if the item was drained before we could // apply an override @@ -5020,15 +5122,72 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, os_atomic_rmw_loop_give_up(goto done); } new_state = _dq_state_merge_qos(old_state, qos); + if (_dq_state_is_base_wlh(old_state) && + !_dq_state_is_suspended(old_state) && + /* */ + !_dq_state_is_enqueued_on_manager(old_state)) { + + // Always set the enqueued bit for async enqueues on all queues + // in the hierachy (rdar://62447289) + // + // Scenario: + // - mach channel DM + // - targetting TQ + // + // Thread 1: + // - has the lock on (TQ), uncontended sync + // - causes a wakeup at a low QoS on DM, causing it to have: + // max_qos = UT, enqueued = 1 + // - the enqueue of DM onto TQ hasn't happened yet. + // + // Thread 2: + // - an incoming IN IPC is being merged on the servicer + // - DM having qos=UT, enqueud=1, no further enqueue happens, + // but we need an extra override and go through this code for + // TQ. + // - this causes TQ to be "stashed", which requires the enqueued + // bit set, else try_lock_wlh() will complain and the + // wakeup refcounting will be off. + new_state |= enqueue; + } + if (new_state == old_state) { os_atomic_rmw_loop_give_up(goto done); } }); + + target = DISPATCH_QUEUE_WAKEUP_TARGET; +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + } else { + goto done; + } + + if (likely((old_state ^ new_state) & enqueue)) { + dispatch_queue_t tq; + if (target == DISPATCH_QUEUE_WAKEUP_TARGET) { + // the rmw_loop above has no acquire barrier, as the last block + // of a queue asyncing to that queue is not an uncommon pattern + // and in that case the acquire would be completely useless + // + // so instead use depdendency ordering to read + // the targetq pointer. + os_atomic_thread_fence(dependency); + tq = os_atomic_load_with_dependency_on2o(dq, do_targetq, + (long)new_state); + } else { + tq = target; + } + dispatch_assert(_dq_state_is_enqueued(new_state)); + return _dispatch_queue_push_queue(tq, dq, new_state); + } +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dq, new_state, flags); + return _dispatch_queue_wakeup_with_override(dq, new_state, + flags); } -#endif // HAVE_PTHREAD_WORKQUEUE_QOS } +#endif // HAVE_PTHREAD_WORKQUEUE_QOS done: if (likely(flags & DISPATCH_WAKEUP_CONSUME_2)) { return _dispatch_release_2_tailcall(dq); @@ -5091,7 +5250,8 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; uint64_t set_owner_and_set_full_width_and_in_barrier = _dispatch_lock_value_for_self() | - DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | + DISPATCH_QUEUE_UNCONTENDED_SYNC; os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); new_state |= DISPATCH_QUEUE_DIRTY; @@ -5110,16 +5270,30 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, } }); - if (_dq_state_is_base_wlh(old_state)) { + if (_dq_state_is_base_wlh(old_state) && dsc->dc_data != DISPATCH_WLH_ANON) { dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); } if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + struct dispatch_object_s *dc = (struct dispatch_object_s *)dsc; + // We came here from __DISPATCH_WAIT_FOR_QUEUE__, if the element + // we pushed is still at the head, we can cheat, dequeue everything, + // and keep pretending we weren't contended. + if (dsc->dsc_wlh_was_first && dq->dq_items_head == dc) { + dsc->dsc_wlh_self_wakeup = true; + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects + // dc_other to be the bottom queue of the graph + dsc->dc_other = dq; + } + _dispatch_queue_pop_head(dq, dc); + return; + } return _dispatch_lane_barrier_complete(dq, qos, 0); } #if HAVE_PTHREAD_WORKQUEUE_QOS if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - if (_dq_state_should_override(new_state)) { + if (_dq_state_should_override_for_waiter(new_state)) { return _dispatch_queue_wakeup_with_override(dq, new_state, 0); } } @@ -5130,7 +5304,7 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, os_atomic_rmw_loop_give_up(return); } }); - if (_dq_state_should_override(new_state)) { + if (_dq_state_should_override_for_waiter(new_state)) { return _dispatch_queue_wakeup_with_override(dq, new_state, 0); } #endif // HAVE_PTHREAD_WORKQUEUE_QOS @@ -5188,6 +5362,15 @@ void _dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, dispatch_qos_t qos) { + if (unlikely(_dispatch_queue_is_cooperative(dq))) { + /* If we're here, means that we're in the simulator fallback case. We + * still restrict what can target the cooperative thread pool */ + if (_dispatch_object_has_vtable(dou) && + dx_type(dou._do) != DISPATCH_SWIFT_JOB_TYPE) { + DISPATCH_CLIENT_CRASH(dou._do, "Cannot target the cooperative global queue - not implemented"); + } + } + // reserving non barrier width // doesn't fail if only the ENQUEUED bit is set (unlike its barrier // width equivalent), so we have to check that this thread hasn't @@ -5202,6 +5385,21 @@ _dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, _dispatch_lane_push(dq, dou, qos); } +void +dispatch_async_swift_job(dispatch_queue_t dq, void *object, qos_class_t qos) +{ + dispatch_swift_continuation_t swift_dc; + swift_dc = (dispatch_swift_continuation_t) object; + + dispatch_object_flags_t object_flags = dx_type(swift_dc); + if (object_flags != DISPATCH_SWIFT_JOB_TYPE) { + DISPATCH_CLIENT_CRASH(object_flags, + "Used Swift only SPI to enqueue non-Swift runtime objects into dispatch"); + } + + dx_push(dq, swift_dc->_as_do, _dispatch_qos_from_qos_class(qos)); +} + #pragma mark - #pragma mark dispatch_channel_t @@ -5378,7 +5576,7 @@ dispatch_channel_foreach_work_item_peek_f( if (dc == dch->dq_items_tail) { break; } - dc = os_mpsc_get_next(dc, do_next); + dc = os_mpsc_get_next(dc, do_next, &dch->dq_items_tail); } } @@ -5986,7 +6184,8 @@ _dispatch_wlh_worker_thread_init(dispatch_deferred_items_t ddi) // // Also add the NEEDS_UNBIND flag so that // _dispatch_priority_compute_update knows it has to unbind - pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + + pp &= _PTHREAD_PRIORITY_THREAD_TYPE_MASK | ~_PTHREAD_PRIORITY_FLAGS_MASK; if (ddi->ddi_wlh == DISPATCH_WLH_ANON) { pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; } else { @@ -6046,12 +6245,36 @@ _dispatch_wlh_worker_thread_reset(void) } } +static inline os_workgroup_t +_dispatch_wlh_get_workgroup(dispatch_wlh_t wlh) +{ + os_workgroup_t wg = NULL; + dispatch_queue_t dq = (dispatch_queue_t) wlh; + if (wlh != DISPATCH_WLH_ANON && (dx_type(dq) == DISPATCH_WORKLOOP_TYPE)) { + dispatch_workloop_t dwl = (dispatch_workloop_t) dq; + if (dwl->dwl_attr) { + wg = dwl->dwl_attr->workgroup; + } + } + + return wg; +} + DISPATCH_ALWAYS_INLINE static void _dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, int *nevents) { _dispatch_introspection_thread_add(); +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + /* If this thread is not part of the cooperative workq quantum world, + * clearing this field will make sure that we have no bad state lingering. + * + * If the thread is part of the cooperative workq quantum world, we know + * that the thread has just had its workq quantum armed before coming out to + * userspace, so we clobber this to make sure that we start fresh */ + _dispatch_ack_quantum_expiry_action(); +#endif DISPATCH_PERF_MON_VAR_INIT @@ -6061,6 +6284,13 @@ _dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, }; bool is_manager; + os_workgroup_t wg = _dispatch_wlh_get_workgroup(wlh); + os_workgroup_join_token_s join_token = {0}; + if (wg) { + int rv = os_workgroup_join(wg, &join_token); + dispatch_assert(rv == 0); + } + is_manager = _dispatch_wlh_worker_thread_init(&ddi); if (!is_manager) { _dispatch_trace_runtime_event(worker_event_delivery, @@ -6095,12 +6325,25 @@ _dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, } } + if (wg) { + os_workgroup_leave(wg, &join_token); + } + _dispatch_deferred_items_set(NULL); if (!is_manager && !ddi.ddi_stashed_dou._do) { _dispatch_perfmon_end(perfmon_thread_event_no_steal); } _dispatch_debug("returning %d deferred kevents", ddi.ddi_nevents); _dispatch_clear_return_to_kernel(); +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + /* If this thread is not part of the cooperative workq quantum world, + * clearing this field should be a noop. + * + * If the thread is part of the cooperative workq quantum world, the thread + * is not going to take any action on the workq quantum action regardless + * since it is going to park so we clear it anyways */ + _dispatch_ack_quantum_expiry_action(); +#endif *nevents = ddi.ddi_nevents; _dispatch_trace_runtime_event(worker_park, NULL, 0); @@ -6197,6 +6440,15 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority)); (void)dispatch_assume_zero(r); return; +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + } else if (dx_type(dq) == DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE) { + _dispatch_root_queue_debug("requesting new worker thread for cooperative global " + "queue: %p", dq); + r = _pthread_workqueue_add_cooperativethreads(remaining, + _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority)); + (void)dispatch_assume_zero(r); + return; +#endif /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ } #endif // !DISPATCH_USE_INTERNAL_WORKQUEUE #if DISPATCH_USE_PTHREAD_POOL @@ -6295,7 +6547,8 @@ _dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor) } #if !DISPATCH_USE_INTERNAL_WORKQUEUE #if DISPATCH_USE_PTHREAD_POOL - if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) + if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE || + dx_type(dq) == DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE)) #endif { if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, relaxed))) { @@ -6432,7 +6685,7 @@ _dispatch_root_queue_drain_one(dispatch_queue_global_t dq) goto out; } // There must be a next item now. - next = os_mpsc_get_next(head, do_next); + next = os_mpsc_get_next(head, do_next, &dq->dq_items_tail); } os_atomic_store2o(dq, dq_items_head, next, relaxed); @@ -6464,15 +6717,12 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi if (_dispatch_queue_drain_try_lock_wlh(dq, &dq_state)) { dx_invoke(dq, &dic, flags); -#if DISPATCH_USE_KEVENT_WORKLOOP // // dx_invoke() will always return `dq` unlocked or locked by another // thread, and either have consumed the +2 or transferred it to the // other thread. // -#endif if (!ddi->ddi_wlh_needs_delete) { -#if DISPATCH_USE_KEVENT_WORKLOOP // // The fate of the workloop thread request has already been dealt // with, which can happen for 4 reasons, for which we just want @@ -6482,10 +6732,8 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi // - the workloop has been re-enqueued on the manager queue // - the workloop ownership has been handed off to a sync owner // -#endif goto park; } -#if DISPATCH_USE_KEVENT_WORKLOOP // // The workloop has been drained to completion or suspended. // dx_invoke() has cleared the enqueued bit before it returned. @@ -6506,7 +6754,6 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi // Take over that +1, and add our own to make the +2 this loop expects, // and drain again. // -#endif // DISPATCH_USE_KEVENT_WORKLOOP dq_state = os_atomic_load2o(dq, dq_state, relaxed); if (unlikely(!_dq_state_is_base_wlh(dq_state))) { // rdar://32671286 goto park; @@ -6517,18 +6764,17 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi goto retry; } } else { -#if DISPATCH_USE_KEVENT_WORKLOOP // // The workloop enters this function with a +2 refcount, however we // couldn't acquire the lock due to suspension or discovering that // the workloop was locked by a sync owner. // // We need to give up, and _dispatch_event_loop_leave_deferred() - // will do a DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC transition to + // will do a DISPATCH_WORKLOOP_SYNC_DISCOVER and + // a DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE transition to // tell the kernel to stop driving this thread request. We leave // a +1 with the thread request, and consume the extra +1 we have. // -#endif if (_dq_state_is_suspended(dq_state)) { dispatch_assert(!_dq_state_is_enqueued(dq_state)); _dispatch_release_2_no_dispose(dq); @@ -6613,6 +6859,16 @@ _dispatch_root_queue_drain(dispatch_queue_global_t dq, if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) { break; } + +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + /* There is no need to check to see if we need to shuffle since by + * virtue of the fact that we're here, we're timesharing between the + * work items anyways - just eat the quantum expiry action. + * + * In the future, we'd expand this to include more checks for various + * other quantum expiry actions */ + _dispatch_ack_quantum_expiry_action(); +#endif } // overcommit or not. worker thread @@ -6634,22 +6890,42 @@ _dispatch_root_queue_drain(dispatch_queue_global_t dq, static void _dispatch_worker_thread2(pthread_priority_t pp) { +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + _dispatch_ack_quantum_expiry_action(); +#endif + bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - dispatch_queue_global_t dq; + bool cooperative = pp & _PTHREAD_PRIORITY_COOPERATIVE_FLAG; - pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp &= (_PTHREAD_PRIORITY_THREAD_TYPE_MASK | ~_PTHREAD_PRIORITY_FLAGS_MASK); _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); - dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit); + + dispatch_queue_global_t dq; + dispatch_invoke_flags_t invoke_flags = 0; + + uintptr_t rq_flags = 0; + if (cooperative) { + rq_flags |= DISPATCH_QUEUE_COOPERATIVE; + invoke_flags |= DISPATCH_INVOKE_COOPERATIVE_DRAIN; + } else { + rq_flags |= (overcommit ? DISPATCH_QUEUE_OVERCOMMIT : 0); + } + dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), rq_flags); _dispatch_introspection_thread_add(); _dispatch_trace_runtime_event(worker_unpark, dq, 0); int pending = os_atomic_dec2o(dq, dgq_pending, relaxed); dispatch_assert(pending >= 0); - _dispatch_root_queue_drain(dq, dq->dq_priority, - DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN); + + invoke_flags |= DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN; + _dispatch_root_queue_drain(dq, dq->dq_priority, invoke_flags); _dispatch_voucher_debug("root queue clear", NULL); _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); + +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + _dispatch_ack_quantum_expiry_action(); +#endif _dispatch_trace_runtime_event(worker_park, NULL, 0); } #endif // !DISPATCH_USE_INTERNAL_WORKQUEUE @@ -6704,6 +6980,13 @@ _dispatch_worker_thread(void *context) _dispatch_set_pthread_root_queue_observer_hooks( &pqc->dpq_observer_hooks); } + + /* Set it up before the configure block so that it can get overridden by + * client if they want to name their threads differently */ + if (dq->_as_dq->dq_label) { + pthread_setname_np(dq->_as_dq->dq_label); + } + if (pqc->dpq_thread_configure) { pqc->dpq_thread_configure(); } @@ -6793,6 +7076,15 @@ _dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou, dispatch_priority_t rq_overcommit; rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + // TODO (rokhinip): When we add kevent support for the cooperative pool, + // we need to fix this logic to make sure that we have the following + // ranking: + // + // non_overcommit < cooperative < overcommit + + // After parsing kevents, we could have stashed a non-overcommit work + // item to do but if an overcommit/cooperative request comes in, prefer + // that. if (likely(!old_dou._do || rq_overcommit)) { dispatch_queue_global_t old_rq = ddi->ddi_stashed_rq; dispatch_qos_t old_qos = ddi->ddi_stashed_qos; @@ -6814,6 +7106,16 @@ _dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou, } } #endif + + if (_dispatch_queue_is_cooperative(rq)) { + /* We only allow enqueueing of continuations or swift job objects on the + * cooperative pool, no other objects */ + if (_dispatch_object_has_vtable(dou) && + dx_type(dou._do) != DISPATCH_SWIFT_JOB_TYPE) { + DISPATCH_CLIENT_CRASH(dou._do, "Cannot target the cooperative global queue - not implemented"); + } + } + #if HAVE_PTHREAD_WORKQUEUE_QOS if (_dispatch_root_queue_push_needs_override(rq, qos)) { return _dispatch_root_queue_push_override(rq, dou, qos); @@ -7151,6 +7453,7 @@ _dispatch_runloop_queue_poke(dispatch_lane_t dq, dispatch_qos_t qos, _dispatch_runloop_queue_handle_init); } + qos = _dispatch_queue_wakeup_qos(dq, qos); os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { new_state = _dq_state_merge_qos(old_state, qos); if (old_state == new_state) { @@ -7648,9 +7951,42 @@ _dispatch_context_cleanup(void *ctxt) DISPATCH_INTERNAL_CRASH(ctxt, "Premature thread exit while a dispatch context is set"); } + #pragma mark - #pragma mark dispatch_init +#if !DISPATCH_USE_COOPERATIVE_WORKQUEUE +static void +_dispatch_cooperative_root_queue_init_fallback(dispatch_queue_global_t dq) +{ + uint16_t max_cpus = (uint16_t) dispatch_hw_config(logical_cpus); + uint16_t width_per_cooperative_queue; + + if (_dispatch_mode & DISPATCH_COOPERATIVE_POOL_STRICT) { + /* We want width 1 for a strict runtime - implement it as a width 1 + * concurrent queue */ + width_per_cooperative_queue = 1; + } else { + /* Concurrent queue with limited width */ + width_per_cooperative_queue = MAX(max_cpus/DISPATCH_QOS_NBUCKETS, 1); + } + + dispatch_priority_t pri = dq->dq_priority; + dispatch_qos_t qos = (pri & DISPATCH_PRIORITY_FLAG_FALLBACK) ? + _dispatch_priority_fallback_qos(pri) : _dispatch_priority_qos(pri); + + /* _dispatch_queue_init will clobber the serial num so just save it and + * restore it back */ + unsigned long dq_serialnum = dq->dq_serialnum; + _dispatch_queue_init(dq, 0, width_per_cooperative_queue, DISPATCH_QUEUE_ROLE_BASE_ANON); + dq->dq_serialnum = dq_serialnum; + + dispatch_queue_t tq = _dispatch_get_root_queue(qos, 0)->_as_dq; + _dispatch_retain(tq); + dq->do_targetq = tq; +} +#endif + static void _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) { @@ -7670,6 +8006,14 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) "QoS Maintenance support required"); } +#if !DISPATCH_USE_COOPERATIVE_WORKQUEUE + for (int i = DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_COOPERATIVE; + i < _DISPATCH_ROOT_QUEUE_IDX_COUNT; i += DISPATCH_ROOT_QUEUE_FLAVORS) + { + _dispatch_cooperative_root_queue_init_fallback(&_dispatch_root_queues[i]); + } +#endif + #if DISPATCH_USE_KEVENT_SETUP struct pthread_workqueue_config cfg = { .version = PTHREAD_WORKQUEUE_CONFIG_VERSION, @@ -7732,6 +8076,19 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) DISPATCH_INTERNAL_CRASH((r << 16) | wq_supported, "Root queue initialization failed"); } + +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + if (_dispatch_mode & DISPATCH_COOPERATIVE_POOL_STRICT) { + int pool_size_limit = -1; /* strict per QoS bucket */ + r = sysctlbyname("kern.wq_limit_cooperative_threads", NULL, NULL, &pool_size_limit, + sizeof(int)); + + if (r != 0) { + DISPATCH_INTERNAL_CRASH(errno, "Unable to limit cooperative pool size"); + } + } +#endif + #endif // DISPATCH_USE_INTERNAL_WORKQUEUE } @@ -7744,6 +8101,38 @@ _dispatch_root_queues_init(void) _dispatch_root_queues_init_once); } +dispatch_queue_global_t +dispatch_get_global_queue(intptr_t priority, uintptr_t flags) +{ + if (flags & ~(unsigned long)(DISPATCH_QUEUE_OVERCOMMIT | DISPATCH_QUEUE_COOPERATIVE)) { + return DISPATCH_BAD_INPUT; + } + + if ((flags & DISPATCH_QUEUE_OVERCOMMIT) && (flags & DISPATCH_QUEUE_COOPERATIVE)) { + return DISPATCH_BAD_INPUT; + } + + dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority); +#if !HAVE_PTHREAD_WORKQUEUE_QOS + if (qos == QOS_CLASS_MAINTENANCE) { + qos = DISPATCH_QOS_BACKGROUND; + } else if (qos == QOS_CLASS_USER_INTERACTIVE) { + qos = DISPATCH_QOS_USER_INITIATED; + } +#endif + if (qos == DISPATCH_QOS_UNSPECIFIED) { + return DISPATCH_BAD_INPUT; + } + +#if !DISPATCH_USE_COOPERATIVE_WORKQUEUE + /* The fallback implementation of the cooperative root queues need to be + * fully initialized before work can be enqueued on these queues */ + _dispatch_root_queues_init(); +#endif + + return _dispatch_get_root_queue(qos, flags); +} + DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void) @@ -7754,14 +8143,10 @@ libdispatch_init(void) if (_dispatch_getenv_bool("LIBDISPATCH_STRICT", false)) { _dispatch_mode |= DISPATCH_MODE_STRICT; } -#if HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - if (_dispatch_getenv_bool("LIBDISPATCH_NO_FAULTS", false)) { - _dispatch_mode |= DISPATCH_MODE_NO_FAULTS; - } else if (getpid() == 1 || - !os_variant_has_internal_diagnostics("com.apple.libdispatch")) { - _dispatch_mode |= DISPATCH_MODE_NO_FAULTS; + + if (_dispatch_getenv_bool("LIBDISPATCH_COOPERATIVE_POOL_STRICT", false)) { + _dispatch_mode |= DISPATCH_COOPERATIVE_POOL_STRICT; } -#endif // HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR #if DISPATCH_DEBUG || DISPATCH_PROFILE @@ -7803,8 +8188,11 @@ libdispatch_init(void) _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); _dispatch_thread_key_create(&dispatch_deferred_items_key, _dispatch_deferred_items_cleanup); + _dispatch_thread_key_create(&dispatch_quantum_key, NULL); + _dispatch_thread_key_create(&dispatch_dsc_key, NULL); + _dispatch_thread_key_create(&os_workgroup_key, _os_workgroup_tsd_cleanup); + _dispatch_thread_key_create(&dispatch_enqueue_key, NULL); #endif - #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 _dispatch_main_q.do_targetq = _dispatch_get_default_queue(true); #endif @@ -7821,6 +8209,9 @@ libdispatch_init(void) _dispatch_vtable_init(); _os_object_init(); _voucher_init(); +#if TARGET_OS_MAC + _workgroup_init(); +#endif _dispatch_introspection_init(); } @@ -7946,6 +8337,9 @@ _libdispatch_tsd_cleanup(void *ctx) _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup); _tsd_call_cleanup(dispatch_deferred_items_key, _dispatch_deferred_items_cleanup); + _tsd_call_cleanup(dispatch_quantum_key, NULL); + _tsd_call_cleanup(dispatch_enqueue_key, NULL); + _tsd_call_cleanup(dispatch_dsc_key, NULL); #ifdef __ANDROID__ if (_dispatch_thread_detach_callback) { _dispatch_thread_detach_callback(); diff --git a/src/queue_internal.h b/src/queue_internal.h index 34e22e296..68a5fec23 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -318,7 +318,7 @@ DISPATCH_OPTIONS(dispatch_queue_flags, uint32_t, * * sw: has received sync wait (bit 35, if role DISPATCH_QUEUE_ROLE_BASE_WLH) * Set when a queue owner has been exposed to the kernel because of - * dispatch_sync() contention. + * contention with dispatch_sync(). */ #define DISPATCH_QUEUE_RECEIVED_OVERRIDE 0x0000000800000000ull #define DISPATCH_QUEUE_RECEIVED_SYNC_WAIT 0x0000000800000000ull @@ -334,14 +334,14 @@ DISPATCH_OPTIONS(dispatch_queue_flags, uint32_t, * drain stealers (like the QoS Override codepath). It holds the identity * (thread port) of the current drainer. * - * st: sync transfer (bit 1 or 30) - * Set when a dispatch_sync() is transferred to + * us: uncontended sync (bit 1 or 30) + * Set when a dispatch_sync() isn't contending * * e: enqueued bit (bit 0 or 31) * Set when a queue is enqueued on its target queue */ #define DISPATCH_QUEUE_DRAIN_OWNER_MASK ((uint64_t)DLOCK_OWNER_MASK) -#define DISPATCH_QUEUE_SYNC_TRANSFER ((uint64_t)DLOCK_FAILED_TRYLOCK_BIT) +#define DISPATCH_QUEUE_UNCONTENDED_SYNC ((uint64_t)DLOCK_FAILED_TRYLOCK_BIT) #define DISPATCH_QUEUE_ENQUEUED ((uint64_t)DLOCK_WAITERS_BIT) #define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \ @@ -350,7 +350,7 @@ DISPATCH_OPTIONS(dispatch_queue_flags, uint32_t, #define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK \ (DISPATCH_QUEUE_DRAIN_OWNER_MASK | DISPATCH_QUEUE_RECEIVED_OVERRIDE | \ - DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | DISPATCH_QUEUE_SYNC_TRANSFER) + DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | DISPATCH_QUEUE_UNCONTENDED_SYNC) /* ******************************************************************************* @@ -475,6 +475,7 @@ typedef struct dispatch_workloop_attr_s { uint8_t percent; uint32_t refillms; } dwla_cpupercent; + os_workgroup_t workgroup; dispatch_pthread_root_queue_observer_hooks_s dwla_observers; } dispatch_workloop_attr_s; @@ -739,6 +740,7 @@ DISPATCH_SUBCLASS_DECL(queue_global, queue, lane); #if DISPATCH_USE_PTHREAD_ROOT_QUEUES DISPATCH_INTERNAL_SUBCLASS_DECL(queue_pthread_root, queue, lane); #endif +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_cooperative, queue, lane); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue_serial, lane); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue_serial, lane); @@ -863,45 +865,56 @@ DISPATCH_COLD size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char *buf, size_t bufsiz); -#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_NBUCKETS * 2) +#define DISPATCH_ROOT_QUEUE_FLAVORS 3 +#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_NBUCKETS * DISPATCH_ROOT_QUEUE_FLAVORS) // must be in lowest to highest qos order (as encoded in dispatch_qos_t) -// overcommit qos index values need bit 1 set enum { DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0, DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS, DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS, DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS, DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS, DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS, DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_COOPERATIVE, _DISPATCH_ROOT_QUEUE_IDX_COUNT, }; +#define DISPATCH_ROOT_QUEUE_IDX_OFFSET_OVERCOMMIT \ + (DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS) +#define DISPATCH_ROOT_QUEUE_IDX_OFFSET_COOPERATIVE \ + (DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_COOPERATIVE - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS) + // skip zero // 1 - main_q // 2 - mgr_q // 3 - mgr_root_q -// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues -// 17 - workloop_fallback_q +// 4 - 21 - global queues +// 22 - workloop_fallback_q // we use 'xadd' on Intel, so the initial value == next assigned -#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 17 +#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 22 extern unsigned long volatile _dispatch_queue_serial_numbers; // mark the workloop fallback queue to avoid finalizing objects on the base // queue of custom outside-of-qos workloops -#define DISPATCH_QUEUE_SERIAL_NUMBER_WLF 16 +#define DISPATCH_QUEUE_SERIAL_NUMBER_WLF 22 extern struct dispatch_queue_static_s _dispatch_mgr_q; // serial 2 #if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES extern struct dispatch_queue_global_s _dispatch_mgr_root_queue; // serial 3 #endif -extern struct dispatch_queue_global_s _dispatch_root_queues[]; // serials 4 - 15 +extern struct dispatch_queue_global_s _dispatch_root_queues[]; // serials 4 - 21 #if DISPATCH_DEBUG #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ @@ -967,7 +980,7 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); #if DISPATCH_SIZEOF_PTR == 8 #define DISPATCH_CONTINUATION_HEADER(x) \ union { \ - const void *do_vtable; \ + const void *__ptrauth_objc_isa_pointer do_vtable; \ uintptr_t dc_flags; \ }; \ union { \ @@ -991,7 +1004,7 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); }; \ struct voucher_s *dc_voucher; \ union { \ - const void *do_vtable; \ + const void *__ptrauth_objc_isa_pointer do_vtable; \ uintptr_t dc_flags; \ }; \ struct dispatch_##x##_s *volatile do_next; \ @@ -1001,7 +1014,7 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); #else #define DISPATCH_CONTINUATION_HEADER(x) \ union { \ - const void *do_vtable; \ + const void *__ptrauth_objc_isa_pointer do_vtable; \ uintptr_t dc_flags; \ }; \ union { \ @@ -1062,6 +1075,36 @@ typedef struct dispatch_continuation_s { dispatch_assert_aliases(dispatch_continuation_s, dispatch_object_s, do_next); dispatch_assert_aliases(dispatch_continuation_s, dispatch_object_s, do_vtable); +/* Swift runtime objects to be enqueued into dispatch */ +struct dispatch_swift_continuation_s; + +struct dispatch_swift_continuation_extra_vtable_s { + unsigned long const do_type; + void DISPATCH_VTABLE_ENTRY(do_invoke)(struct dispatch_swift_continuation_s *, + void *, dispatch_swift_job_invoke_flags_t flags); +}; + +typedef struct dispatch_swift_continuation_vtable_s { + _OS_OBJECT_CLASS_HEADER(); + struct dispatch_swift_continuation_extra_vtable_s _os_obj_vtable; +} const *dispatch_swift_continuation_vtable_t; + +/* This is the internal representation of a Swift object that will be enqueued + * onto dispatch. The actual object may be bigger but we only care about this + * piece of it. The vtable the continuation points to, will be interpreted as a + * dispatch_swift_continuation_vtable_t even if it is bigger. + */ +typedef struct dispatch_swift_continuation_s { + struct dispatch_object_s _as_do[0]; + _DISPATCH_OBJECT_HEADER_INTERNAL(swift_continuation); + void *opaque1; + void *opaque2; + void *opaque3; +} *dispatch_swift_continuation_t; + +dispatch_static_assert(sizeof(struct dispatch_swift_continuation_s) == + sizeof(struct dispatch_object_s)); + typedef struct dispatch_sync_context_s { struct dispatch_continuation_s _as_dc[0]; DISPATCH_CONTINUATION_HEADER(continuation); @@ -1074,12 +1117,11 @@ typedef struct dispatch_sync_context_s { uint8_t dsc_override_qos; uint16_t dsc_autorelease : 2; uint16_t dsc_wlh_was_first : 1; + uint16_t dsc_wlh_self_wakeup : 1; uint16_t dsc_wlh_is_workloop : 1; uint16_t dsc_waiter_needs_cancel : 1; uint16_t dsc_release_storage : 1; -#if DISPATCH_INTROSPECTION uint16_t dsc_from_async : 1; -#endif } *dispatch_sync_context_t; typedef struct dispatch_continuation_vtable_s { @@ -1174,7 +1216,8 @@ struct dispatch_apply_s { #if !OS_OBJECT_HAVE_OBJC1 dispatch_continuation_t da_dc; #endif - size_t volatile da_index, da_todo; + size_t _Atomic da_index; + size_t _Atomic da_todo; size_t da_iterations; #if OS_OBJECT_HAVE_OBJC1 dispatch_continuation_t da_dc; @@ -1183,12 +1226,29 @@ struct dispatch_apply_s { dispatch_thread_event_s da_event; dispatch_invoke_flags_t da_flags; int32_t da_thr_cnt; + uint32_t _Atomic da_worker_index; + dispatch_apply_attr_t da_attr; }; dispatch_static_assert(offsetof(struct dispatch_continuation_s, dc_flags) == offsetof(struct dispatch_apply_s, da_dc), "These fields must alias so that leaks instruments work"); typedef struct dispatch_apply_s *dispatch_apply_t; +#define DISPATCH_APPLY_ATTR_SIG 0xA11AB000 +struct dispatch_apply_attr_s { + uint32_t sig; + uint32_t flags; + size_t per_cluster_parallelism; + uintptr_t guard; /* To prevent copying */ +#if defined(__LP64__) + uint8_t unused[40]; +#else + uint8_t unused[48]; +#endif +}; +dispatch_static_assert(sizeof(struct dispatch_apply_attr_s) == __DISPATCH_APPLY_ATTR_SIZE__, + "Opaque dispatch apply attr and internal apply attr size should match"); + #pragma mark - #pragma mark dispatch_block_t diff --git a/src/shims.h b/src/shims.h index bce5d08f1..b611a5a73 100644 --- a/src/shims.h +++ b/src/shims.h @@ -55,8 +55,12 @@ #endif #ifndef DISPATCH_WORKQ_MAX_PTHREAD_COUNT +#if defined(__APPLE__) +#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 64 +#else #define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255 #endif +#endif /* DISPATCH_WORKQ_MAX_PTHREAD_COUNT */ #include "shims/hw_config.h" #include "shims/priority.h" @@ -65,10 +69,6 @@ #include #endif -#if __has_include() -#include -#endif - #if !HAVE_DECL_FD_COPY #define FD_COPY(f, t) (void)(*(t) = *(f)) #endif @@ -154,10 +154,7 @@ _pthread_workqueue_should_narrow(pthread_priority_t priority) } #endif -#if HAVE_PTHREAD_QOS_H && __has_include() && \ - defined(PTHREAD_MAX_PARALLELISM_PHYSICAL) && \ - DISPATCH_HAVE_HW_CONFIG_COMMPAGE && \ - DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) +#if HAVE_PTHREAD_QOS_H && __has_include() #define DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM 1 #define DISPATCH_MAX_PARALLELISM_PHYSICAL PTHREAD_MAX_PARALLELISM_PHYSICAL #else @@ -199,6 +196,98 @@ _dispatch_qos_max_parallelism(dispatch_qos_t qos, unsigned long flags) return p; } +#if DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) && !TARGET_OS_SIMULATOR +#include + +#if defined(_PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC) && __arm64__ +#define DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM 1 +#else // defined(_PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC) && __arm64__ +#define DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM 0 +#endif // defined(_PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC) && __arm64__ + +#else // DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) && !TARGET_OS_SIMULATOR +#define DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM 0 +#endif // DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) && !TARGET_OS_SIMULATOR + +#if DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM +extern int __bsdthread_ctl(uintptr_t cmd, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); +#include +// the sysctl wants thread_qos_t not dispatch_qos_t +DISPATCH_ALWAYS_INLINE +static inline uint8_t +_dispatch_qos2threadqos(dispatch_qos_t q) +{ + switch (q) { + case DISPATCH_QOS_USER_INTERACTIVE: return THREAD_QOS_USER_INTERACTIVE; + case DISPATCH_QOS_USER_INITIATED: return THREAD_QOS_USER_INITIATED; + case DISPATCH_QOS_DEFAULT: return THREAD_QOS_LEGACY; + case DISPATCH_QOS_UTILITY: return THREAD_QOS_UTILITY; + case DISPATCH_QOS_BACKGROUND: return THREAD_QOS_BACKGROUND; + case DISPATCH_QOS_MAINTENANCE: return THREAD_QOS_MAINTENANCE; + default: return THREAD_QOS_UNSPECIFIED; + } +} +#endif + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_cluster_max_parallelism(dispatch_qos_t qos) +{ + uint32_t cluster_count = 0; + +#if DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM + int r = pthread_qos_max_parallelism(_dispatch_qos_to_qos_class(qos), PTHREAD_MAX_PARALLELISM_CLUSTER); + if (likely(r > 0)) { + cluster_count = (uint32_t) r; + } +#else + (void)qos; +#endif + return cluster_count; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_attr_apply_cluster_set(size_t worker_index, size_t cluster_concurrency) +{ +#if DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM + int rc = 0; + rc = __bsdthread_ctl(BSDTHREAD_CTL_DISPATCH_APPLY_ATTR, _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_SET, worker_index, cluster_concurrency); + if (rc != 0) { + if (errno != ENOTSUP) { + /* ENOTSUP = Trying to get on a cluster it is not recommended for. + * + * Other error means something very bad has happened! On things + * like the Simulator we shouldn't even be in here. + * DISPATCH_INTERNAL_CRASH isn't available here + */ + __builtin_trap(); + } + } +#else + (void)worker_index; + (void)cluster_concurrency; +#endif + return; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_attr_apply_cluster_clear(size_t worker_index, size_t cluster_concurrency) +{ +#if DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM + int rc = 0; + rc = __bsdthread_ctl(BSDTHREAD_CTL_DISPATCH_APPLY_ATTR, _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_CLEAR, worker_index, cluster_concurrency); + if (rc != 0) { + __builtin_trap(); + } +#else + (void)worker_index; + (void)cluster_concurrency; +#endif + return; +} + #if !HAVE_NORETURN_BUILTIN_TRAP /* * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not diff --git a/src/shims/lock.c b/src/shims/lock.c index 2f91d8d1d..4a750b3bd 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -59,7 +59,7 @@ _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags, #pragma mark - semaphores #if USE_MACH_SEM -#if __has_include() +#if __has_include() && !TARGET_OS_SIMULATOR #include #define DISPATCH_USE_OS_SEMAPHORE_CACHE 1 #else @@ -109,7 +109,7 @@ _dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy) } void -_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy) +_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int __unused policy) { semaphore_t sema_port = *sema; *sema = MACH_PORT_DEAD; diff --git a/src/shims/priority.h b/src/shims/priority.h index 3a79c5efb..aa0008ce2 100644 --- a/src/shims/priority.h +++ b/src/shims/priority.h @@ -45,6 +45,13 @@ #ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG #define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 #endif +#ifndef _PTHREAD_PRIORITY_COOPERATIVE_FLAG +#define _PTHREAD_PRIORITY_COOPERATIVE_FLAG 0x08000000 +#endif +#ifndef _PTHREAD_PRIORITY_THREAD_TYPE_MASK +#define _PTHREAD_PRIORITY_THREAD_TYPE_MASK 0x88000000 +#endif + #else // HAVE_PTHREAD_QOS_H OS_ENUM(qos_class, unsigned int, QOS_CLASS_USER_INTERACTIVE = 0x21, @@ -64,9 +71,12 @@ typedef unsigned long pthread_priority_t; #define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 #define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 #define _PTHREAD_PRIORITY_FALLBACK_FLAG 0x04000000 +#define _PTHREAD_PRIORITY_COOPERATIVE_FLAG 0x08000000 #define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 #define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 #define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 +#define _PTHREAD_PRIORITY_THREAD_TYPE_MASK \ + (_PTHREAD_PRIORITY_OVERCOMMIT_FLAG | _PTHREAD_PRIORITY_COOPERATIVE_FLAG) #endif // HAVE_PTHREAD_QOS_H @@ -108,9 +118,12 @@ typedef uint32_t dispatch_priority_t; #define DISPATCH_PRIORITY_FLAG_OVERCOMMIT ((dispatch_priority_t)0x80000000) // _PTHREAD_PRIORITY_OVERCOMMIT_FLAG #define DISPATCH_PRIORITY_FLAG_FALLBACK ((dispatch_priority_t)0x04000000) // _PTHREAD_PRIORITY_FALLBACK_FLAG #define DISPATCH_PRIORITY_FLAG_MANAGER ((dispatch_priority_t)0x02000000) // _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG +#define DISPATCH_PRIORITY_FLAG_COOPERATIVE ((dispatch_priority_t)0x08000000) // _PTHREAD_PRIORITY_COOPERATIVE_FLAG #define DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK \ (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_FALLBACK | \ - DISPATCH_PRIORITY_FLAG_MANAGER) + DISPATCH_PRIORITY_FLAG_MANAGER | DISPATCH_PRIORITY_FLAG_COOPERATIVE) +#define DISPATCH_PRIORITY_THREAD_TYPE_MASK \ + (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_COOPERATIVE) // not passed to pthread #define DISPATCH_PRIORITY_FLAG_FLOOR ((dispatch_priority_t)0x40000000) // _PTHREAD_PRIORITY_INHERIT_FLAG diff --git a/src/shims/target.h b/src/shims/target.h index a59dd3c3b..425279b19 100644 --- a/src/shims/target.h +++ b/src/shims/target.h @@ -38,15 +38,15 @@ #if TARGET_OS_OSX # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ (__MAC_OS_X_VERSION_MIN_REQUIRED >= (x)) -# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) -# error "OS X hosts older than OS X 10.12 aren't supported anymore" -# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) +# error "OS X hosts older than OS X 10.14 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) #elif TARGET_OS_SIMULATOR # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ (IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED >= (x)) -# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) -# error "Simulator hosts older than OS X 10.12 aren't supported anymore" -# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) +# error "Simulator hosts older than OS X 10.14 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) #else # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 1 # if !TARGET_OS_DRIVERKIT && __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 diff --git a/src/shims/time.h b/src/shims/time.h index b57731c9a..851b819c4 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -254,17 +254,34 @@ _dispatch_time_now_cached(dispatch_clock_t clock, DISPATCH_ALWAYS_INLINE static inline void -_dispatch_time_to_clock_and_value(dispatch_time_t time, +_dispatch_time_to_clock_and_value(dispatch_time_t time, bool allow_now, dispatch_clock_t *clock, uint64_t *value) { uint64_t actual_value; + + if (allow_now) { + switch (time) { + case DISPATCH_TIME_NOW: + *clock = DISPATCH_CLOCK_UPTIME; + *value = _dispatch_uptime(); + return; + case DISPATCH_MONOTONICTIME_NOW: + *clock = DISPATCH_CLOCK_MONOTONIC; + *value = _dispatch_monotonic_time(); + return; + case DISPATCH_WALLTIME_NOW: + *clock = DISPATCH_CLOCK_WALL; + *value = _dispatch_get_nanoseconds(); + return; + } + } + if ((int64_t)time < 0) { // Wall time or mach continuous time if (time & DISPATCH_WALLTIME_MASK) { // Wall time (value 11 in bits 63, 62) *clock = DISPATCH_CLOCK_WALL; - actual_value = time == DISPATCH_WALLTIME_NOW ? - _dispatch_get_nanoseconds() : (uint64_t)-time; + actual_value = (uint64_t)-time; } else { // Continuous time (value 10 in bits 63, 62). *clock = DISPATCH_CLOCK_MONOTONIC; diff --git a/src/shims/tsd.h b/src/shims/tsd.h index 446c4d796..cf568d90f 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -40,6 +40,11 @@ #include #endif +#if __has_include() +#include +#endif +#include + #if !defined(OS_GS_RELATIVE) && (defined(__i386__) || defined(__x86_64__)) #define OS_GS_RELATIVE __attribute__((address_space(256))) #endif @@ -65,16 +70,8 @@ typedef struct { void *a; void *b; } dispatch_tsd_pair_t; #endif #if DISPATCH_USE_DIRECT_TSD -#ifndef __TSD_THREAD_QOS_CLASS -#define __TSD_THREAD_QOS_CLASS 4 -#endif -#ifndef __TSD_RETURN_TO_KERNEL -#define __TSD_RETURN_TO_KERNEL 5 -#endif -#ifndef __TSD_MACH_SPECIAL_REPLY -#define __TSD_MACH_SPECIAL_REPLY 8 -#endif - +#undef errno +#define errno (*_pthread_errno_address_direct()) static const unsigned long dispatch_priority_key = __TSD_THREAD_QOS_CLASS; static const unsigned long dispatch_r2k_key = __TSD_RETURN_TO_KERNEL; @@ -94,8 +91,13 @@ static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY6; static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY6; #endif static const unsigned long dispatch_wlh_key = __PTK_LIBDISPATCH_KEY7; -static const unsigned long dispatch_voucher_key = __PTK_LIBDISPATCH_KEY8; +static const unsigned long dispatch_voucher_key = OS_VOUCHER_TSD_KEY; static const unsigned long dispatch_deferred_items_key = __PTK_LIBDISPATCH_KEY9; +static const unsigned long dispatch_quantum_key = __PTK_LIBDISPATCH_KEY10; +static const unsigned long dispatch_dsc_key = __PTK_LIBDISPATCH_KEY11; +static const unsigned long dispatch_enqueue_key = __PTK_LIBDISPATCH_KEY12; + +static const unsigned long os_workgroup_key = __PTK_LIBDISPATCH_WORKGROUP_KEY0; DISPATCH_TSD_INLINE static inline void @@ -153,6 +155,11 @@ struct dispatch_tsd { void *dispatch_wlh_key; void *dispatch_voucher_key; void *dispatch_deferred_items_key; + void *dispatch_quantum_key; + void *dispatch_dsc_key; + void *dispatch_enqueue_key; + + void *os_workgroup_key; }; extern _Thread_local struct dispatch_tsd __dispatch_tsd; @@ -209,6 +216,11 @@ extern pthread_key_t dispatch_bcounter_key; extern pthread_key_t dispatch_wlh_key; extern pthread_key_t dispatch_voucher_key; extern pthread_key_t dispatch_deferred_items_key; +extern pthread_key_t dispatch_quantum_key; +extern pthread_key_t dispatch_dsc_key; +extern pthread_key_t dispatch_enqueue_key; + +extern pthread_key_t os_workgroup_key; DISPATCH_TSD_INLINE static inline void @@ -353,7 +365,11 @@ DISPATCH_TSD_INLINE DISPATCH_CONST static inline unsigned int _dispatch_cpu_number(void) { -#if __has_include() +#if TARGET_OS_SIMULATOR + size_t n; + pthread_cpu_number_np(&n); + return (unsigned int)n; +#elif __has_include() return _os_cpu_number(); #elif defined(__x86_64__) || defined(__i386__) struct { uintptr_t p1, p2; } p; diff --git a/src/shims/yield.c b/src/shims/yield.c index 43f0017ee..cd7e1acf8 100644 --- a/src/shims/yield.c +++ b/src/shims/yield.c @@ -22,18 +22,19 @@ DISPATCH_NOINLINE static void * -__DISPATCH_WAIT_FOR_ENQUEUER__(void **ptr) +__DISPATCH_WAIT_FOR_ENQUEUER__(void **ptr, void **tailp) { - int spins = 0; + unsigned int spins = 0; void *value; while ((value = os_atomic_load(ptr, relaxed)) == NULL) { - _dispatch_preemption_yield(++spins); + /* ptr == &prev->do_next */ + _dispatch_yield_to_enqueuer(tailp, ++spins); } return value; } void * -_dispatch_wait_for_enqueuer(void **ptr) +_dispatch_wait_for_enqueuer(void **ptr, void **tailp) { #if !DISPATCH_HW_CONFIG_UP #if defined(__arm__) || defined(__arm64__) @@ -57,5 +58,5 @@ _dispatch_wait_for_enqueuer(void **ptr) } #endif #endif // DISPATCH_HW_CONFIG_UP - return __DISPATCH_WAIT_FOR_ENQUEUER__(ptr); + return __DISPATCH_WAIT_FOR_ENQUEUER__(ptr, tailp); } diff --git a/src/shims/yield.h b/src/shims/yield.h index 53eb80065..aeb429d44 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -80,7 +80,7 @@ #endif DISPATCH_NOT_TAIL_CALLED DISPATCH_EXPORT -void *_dispatch_wait_for_enqueuer(void **ptr); +void *_dispatch_wait_for_enqueuer(void **ptr, void **tailp); #pragma mark - #pragma mark _dispatch_contention_wait_until @@ -140,12 +140,22 @@ void *_dispatch_wait_for_enqueuer(void **ptr); #pragma mark - #pragma mark _dispatch_preemption_yield +/* Don't allow directed yield to enqueuer if !_pthread_has_direct_tsd() */ +#ifndef DISPATCH_HAVE_YIELD_TO_ENQUEUER +#if PTHREAD_HAVE_YIELD_TO_ENQUEUER && !TARGET_OS_SIMULATOR +#define DISPATCH_HAVE_YIELD_TO_ENQUEUER 1 +#else +#define DISPATCH_HAVE_YIELD_TO_ENQUEUER 0 +#endif +#endif /* DISPATCH_HAVE_YIELD_TO_ENQUEUER */ + #if HAVE_MACH #if defined(SWITCH_OPTION_OSLOCK_DEPRESS) #define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_OSLOCK_DEPRESS #else #define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_DEPRESS #endif + #define _dispatch_preemption_yield(n) thread_switch(MACH_PORT_NULL, \ DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n)) #define _dispatch_preemption_yield_to(th, n) thread_switch(th, \ @@ -161,6 +171,20 @@ void *_dispatch_wait_for_enqueuer(void **ptr); #define _dispatch_preemption_yield_to(th, n) { (void)n; sched_yield(); } #endif // HAVE_MACH +#if DISPATCH_HAVE_YIELD_TO_ENQUEUER +#define _dispatch_set_enqueuer_for(ptr) \ + _dispatch_thread_setspecific(dispatch_enqueue_key, (void *) (ptr)); +#define _dispatch_clear_enqueuer() \ + _dispatch_thread_setspecific(dispatch_enqueue_key, NULL); +#define _dispatch_yield_to_enqueuer(q, n) \ + (void) _pthread_yield_to_enqueuer_4dispatch(dispatch_enqueue_key, q, n) +#else +#define _dispatch_set_enqueuer_for(ptr) +#define _dispatch_clear_enqueuer(ptr) +#define _dispatch_yield_to_enqueuer(q, n) \ + ((void) (q), _dispatch_preemption_yield(n)) +#endif /* DISPATCH_HAVE_YIELD_TO_ENQUEUER */ + #pragma mark - #pragma mark _dispatch_contention_usleep diff --git a/src/source.c b/src/source.c index 376327052..9af2a4a8b 100644 --- a/src/source.c +++ b/src/source.c @@ -60,6 +60,9 @@ dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle, if (unlikely(!dq)) { dq = _dispatch_get_default_queue(true); } else { + if (_dispatch_queue_is_cooperative(dq)) { + DISPATCH_CLIENT_CRASH(dq, "Cannot target object to cooperative root queue - not implemented"); + } _dispatch_retain((dispatch_queue_t _Nonnull)dq); } ds->do_targetq = dq; @@ -441,7 +444,10 @@ _dispatch_source_registration_callout(dispatch_source_t ds, dispatch_queue_t cq, dc = _dispatch_source_handler_take(ds->ds_refs, DS_REGISTN_HANDLER); if (ds->dq_atomic_flags & (DSF_CANCELED | DQF_RELEASED)) { // no registration callout if source is canceled rdar://problem/8955246 - return _dispatch_source_handler_dispose(dc); + dispatch_invoke_with_autoreleasepool(flags, { + _dispatch_source_handler_dispose(dc); + }); + return; } if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { dc->dc_ctxt = ds->do_ctxt; @@ -458,22 +464,33 @@ _dispatch_source_cancel_callout(dispatch_source_t ds, dispatch_queue_t cq, dispatch_source_refs_t dr = ds->ds_refs; dispatch_continuation_t dc; - dc = _dispatch_source_handler_take(dr, DS_CANCEL_HANDLER); - dr->ds_pending_data = 0; - dr->ds_data = 0; - _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); - _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); - if (!dc) { - return; - } - if (!(ds->dq_atomic_flags & DSF_CANCELED)) { - return _dispatch_source_handler_dispose(dc); - } - if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { - dc->dc_ctxt = ds->do_ctxt; - } - _dispatch_trace_source_callout_entry(ds, DS_CANCEL_HANDLER, cq, dc); - _dispatch_continuation_pop(dc, NULL, flags, cq); + dispatch_invoke_with_autoreleasepool(flags, { + dc = _dispatch_source_handler_take(dr, DS_CANCEL_HANDLER); + dr->ds_pending_data = 0; + dr->ds_data = 0; + _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); + _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); + if (!dc) { + /* nothing to do here */ + } else if (!(ds->dq_atomic_flags & DSF_CANCELED)) { + _dispatch_source_handler_dispose(dc); + } else { + if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { + dc->dc_ctxt = ds->do_ctxt; + } + _dispatch_trace_source_callout_entry(ds, DS_CANCEL_HANDLER, cq, dc); + + // + // Make sure _dispatch_continuation_pop() will not + // add its own autoreleasepool since we have one, + // and there's magic in objc that makes _one_ + // autoreleasepool cheap. + // + flags &= ~DISPATCH_INVOKE_AUTORELEASE_ALWAYS; + _dispatch_continuation_pop(dc, NULL, flags, cq); + } + + }); } DISPATCH_ALWAYS_INLINE @@ -580,7 +597,9 @@ _dispatch_source_latch_and_call(dispatch_source_t ds, dispatch_queue_t cq, } if (dr->du_timer_flags & DISPATCH_TIMER_AFTER) { _dispatch_trace_item_complete(dc); // see _dispatch_after - _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); + dispatch_invoke_with_autoreleasepool(flags, { + _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); + }); dispatch_release(ds); // dispatch_after sources are one-shot } } @@ -730,7 +749,7 @@ _dispatch_source_invoke2(dispatch_source_t ds, dispatch_invoke_context_t dic, // Intentionally always drain even when on the manager queue // and not the source's regular target queue: we need to be able // to drain timer setting and the like there. - dispatch_with_disabled_narrowing(dic, { + dispatch_with_disabled_narrowing(dic, flags, { retq = _dispatch_lane_serial_drain(ds, dic, flags, owned); }); } @@ -1232,15 +1251,7 @@ _dispatch_timer_config_create(dispatch_time_t start, // future, this will default to UPTIME if no clock was set. clock = _dispatch_timer_flags_to_clock(dt->du_timer_flags); } else { - _dispatch_time_to_clock_and_value(start, &clock, &target); - if (target == DISPATCH_TIME_NOW) { - if (clock == DISPATCH_CLOCK_UPTIME) { - target = _dispatch_uptime(); - } else { - dispatch_assert(clock == DISPATCH_CLOCK_MONOTONIC); - target = _dispatch_monotonic_time(); - } - } + _dispatch_time_to_clock_and_value(start, true, &clock, &target); } if (clock != DISPATCH_CLOCK_WALL) { @@ -1399,7 +1410,7 @@ _dispatch_after(dispatch_time_t when, dispatch_queue_t dq, dispatch_clock_t clock; uint64_t target; - _dispatch_time_to_clock_and_value(when, &clock, &target); + _dispatch_time_to_clock_and_value(when, false, &clock, &target); if (clock != DISPATCH_CLOCK_WALL) { leeway = _dispatch_time_nano2mach(leeway); } diff --git a/src/source_internal.h b/src/source_internal.h index d953629eb..9297ac5cd 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -47,8 +47,9 @@ DISPATCH_CLASS_DECL(channel, QUEUE); dm_cancel_handler_called:1, \ dm_is_xpc:1, \ dm_arm_no_senders:1, \ + dm_made_sendrights:1, \ dm_strict_reply:1, \ - __ds_flags_pad : 9; \ + __ds_flags_pad : 8; \ uint16_t __dq_flags_separation[0]; \ uint16_t \ /* set under the send queue lock */ \ diff --git a/src/time.c b/src/time.c index b70f81343..30ed53b26 100644 --- a/src/time.c +++ b/src/time.c @@ -43,7 +43,7 @@ _dispatch_mach_host_time_mach2nano(uint64_t machtime) return INT64_MAX; } long double big_tmp = ((long double)machtime * data->frac) + .5L; - if (unlikely(big_tmp >= INT64_MAX)) { + if (unlikely(big_tmp >= (long double)INT64_MAX)) { return INT64_MAX; } return (uint64_t)big_tmp; @@ -61,7 +61,7 @@ _dispatch_mach_host_time_nano2mach(uint64_t nsec) return INT64_MAX; } long double big_tmp = ((long double)nsec / data->frac) + .5L; - if (unlikely(big_tmp >= INT64_MAX)) { + if (unlikely(big_tmp >= (long double)INT64_MAX)) { return INT64_MAX; } return (uint64_t)big_tmp; @@ -98,7 +98,7 @@ dispatch_time(dispatch_time_t inval, int64_t delta) dispatch_clock_t clock; uint64_t value; - _dispatch_time_to_clock_and_value(inval, &clock, &value); + _dispatch_time_to_clock_and_value(inval, true, &clock, &value); if (value == DISPATCH_TIME_FOREVER) { // Out-of-range for this clock. return value; @@ -122,14 +122,6 @@ dispatch_time(dispatch_time_t inval, int64_t delta) // up time or monotonic time. "value" has the clock type removed, // so the test against DISPATCH_TIME_NOW is correct for either clock. - if (value == DISPATCH_TIME_NOW) { - if (clock == DISPATCH_CLOCK_UPTIME) { - value = _dispatch_uptime(); - } else { - dispatch_assert(clock == DISPATCH_CLOCK_MONOTONIC); - value = _dispatch_monotonic_time(); - } - } if (delta >= 0) { offset = _dispatch_time_nano2mach((uint64_t)delta); if ((int64_t)(value += offset) <= 0) { @@ -145,6 +137,37 @@ dispatch_time(dispatch_time_t inval, int64_t delta) } } +bool +dispatch_time_to_nsecs(dispatch_time_t time, + dispatch_clockid_t *clock_out, uint64_t *nsecs_out) +{ + dispatch_clock_t clock; + uint64_t value; + + if (time != DISPATCH_TIME_FOREVER) { + _dispatch_time_to_clock_and_value(time, true, &clock, &value); + + switch (clock) { + case DISPATCH_CLOCK_WALL: + *clock_out = DISPATCH_CLOCKID_WALLTIME; + *nsecs_out = value; + return true; + case DISPATCH_CLOCK_UPTIME: + *clock_out = DISPATCH_CLOCKID_UPTIME; + *nsecs_out = _dispatch_time_mach2nano(value); + return true; + case DISPATCH_CLOCK_MONOTONIC: + *clock_out = DISPATCH_CLOCKID_MONOTONIC; + *nsecs_out = _dispatch_time_mach2nano(value); + return true; + } + } + + *clock_out = 0; + *nsecs_out = UINT64_MAX; + return false; +} + dispatch_time_t dispatch_walltime(const struct timespec *inval, int64_t delta) { @@ -166,16 +189,19 @@ uint64_t _dispatch_timeout(dispatch_time_t when) { dispatch_time_t now; - if (when == DISPATCH_TIME_FOREVER) { + + switch (when) { + case DISPATCH_TIME_FOREVER: return DISPATCH_TIME_FOREVER; - } - if (when == DISPATCH_TIME_NOW) { + case DISPATCH_TIME_NOW: + case DISPATCH_MONOTONICTIME_NOW: + case DISPATCH_WALLTIME_NOW: return 0; } dispatch_clock_t clock; uint64_t value; - _dispatch_time_to_clock_and_value(when, &clock, &value); + _dispatch_time_to_clock_and_value(when, false, &clock, &value); if (clock == DISPATCH_CLOCK_WALL) { now = _dispatch_get_nanoseconds(); return now >= value ? 0 : value - now; diff --git a/src/trace.h b/src/trace.h index ed69e1b56..e4303dfd3 100644 --- a/src/trace.h +++ b/src/trace.h @@ -71,8 +71,19 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) _dispatch_introspection_callout_return(ctxt, func); } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_client_callout3_a(void *ctxt, size_t i, size_t w, void (*f)(void *, size_t, size_t)) +{ + dispatch_function_t func = (dispatch_function_t)f; + _dispatch_introspection_callout_entry(ctxt, func); + _dispatch_trace_callout(ctxt, func, _dispatch_client_callout3_a(ctxt, i, w, f)); + _dispatch_introspection_callout_return(ctxt, func); +} + #define _dispatch_client_callout _dispatch_trace_client_callout #define _dispatch_client_callout2 _dispatch_trace_client_callout2 +#define _dispatch_client_callout3_a _dispatch_trace_client_callout3_a #endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION #ifdef _COMM_PAGE_KDEBUG_ENABLE diff --git a/src/voucher.c b/src/voucher.c index f06089d64..61f1643df 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -233,6 +233,7 @@ _voucher_insert(voucher_t v) { mach_voucher_t kv = v->v_ipc_kvoucher; if (!kv) return; + _voucher_hash_lock_lock(); if (unlikely(_voucher_hash_is_enqueued(v))) { _dispatch_voucher_debug("corruption", v); @@ -827,6 +828,31 @@ _voucher_activity_debug_channel_init(void) } } +static bool +_voucher_hash_is_empty() { + _voucher_hash_lock_lock(); + + bool empty = true; + for (unsigned int i = 0; i < VL_HASH_SIZE; i++) { + voucher_hash_head_s *head = &_voucher_hash[i]; + if (_voucher_hash_get_next(head->vhh_first) != VOUCHER_NULL) { + empty = false; + break; + } + } + _voucher_hash_lock_unlock(); + + return empty; +} + +void +_voucher_atfork_parent(void) +{ + if (!_voucher_hash_is_empty()){ + _dispatch_fork_becomes_unsafe(); + } +} + void _voucher_atfork_child(void) { @@ -841,6 +867,39 @@ _voucher_atfork_child(void) _firehose_task_buffer = NULL; // firehose buffer is VM_INHERIT_NONE } +static void +_voucher_process_can_use_arbitrary_personas_init(void *__unused ctxt) +{ +#if VOUCHER_USE_PERSONA_ADOPT_ANY + mach_voucher_t kv = _voucher_get_task_mach_voucher(); + kern_return_t kr; + + mach_voucher_attr_content_t result_out; + mach_msg_type_number_t result_out_size; + + boolean_t local_result; + result_out = (mach_voucher_attr_content_t) &local_result; + result_out_size = sizeof(local_result); + + kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_BANK, + BANK_PERSONA_ADOPT_ANY, NULL, 0, result_out, &result_out_size); + if (kr != KERN_SUCCESS) { + DISPATCH_INTERNAL_CRASH(kr, "mach_voucher_attr_command(BANK_PERSONA_ADOPT_ANY) failed"); + } + + _voucher_process_can_use_arbitrary_personas = !!local_result; +#endif /* VOUCHER_USE_PERSONA_ADOPT_ANY */ +} + +bool +voucher_process_can_use_arbitrary_personas(void) +{ + dispatch_once_f(&_voucher_process_can_use_arbitrary_personas_pred, NULL, + _voucher_process_can_use_arbitrary_personas_init); + + return _voucher_process_can_use_arbitrary_personas; +} + voucher_t voucher_copy_with_persona_mach_voucher(mach_voucher_t persona_mach_voucher) { @@ -1153,6 +1212,21 @@ voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) DISPATCH_CLIENT_CRASH(_voucher_libtrace_hooks, "voucher_activity_initialize_4libtrace called twice"); } + + // HACK: we can't call into os_variant until after the initialization of + // dispatch and XPC, but we want to do it before the end of libsystem + // initialization to avoid having to synchronize _dispatch_mode explicitly, + // so this happens to be just the right spot +#if HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR + if (_dispatch_getenv_bool("LIBDISPATCH_NO_FAULTS", false)) { + return; + } else if (getpid() == 1 || + !os_variant_has_internal_diagnostics("com.apple.libdispatch")) { + return; + } + + _dispatch_mode &= ~DISPATCH_MODE_NO_FAULTS; +#endif // HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR } void @@ -1272,6 +1346,7 @@ DISPATCH_ALWAYS_INLINE static inline bool _voucher_activity_disabled(void) { + dispatch_once_f(&_firehose_task_buffer_pred, NULL, _firehose_task_buffer_init); @@ -1618,7 +1693,7 @@ _voucher_debug(voucher_t v, char *buf, size_t bufsiz) v->v_activity, v->v_activity_creator, v->v_parent_activity); } bufprintf(" }"); - + return offset; } @@ -1631,7 +1706,7 @@ format_hex_data(char *prefix, char *desc, uint8_t *data, size_t data_len, uint8_t *pc = data; if (desc) { - bufprintf("%s%s:\n", prefix, desc); + bufprintf("%s%s:\n", prefix, desc); } ssize_t offset_in_row = -1; @@ -1669,10 +1744,6 @@ format_recipe_detail(mach_voucher_attr_recipe_t recipe, char *buf, bufprintf("Content size: %u\n", recipe->content_size); switch (recipe->key) { - case MACH_VOUCHER_ATTR_KEY_ATM: - bufprintprefix(); - bufprintf("ATM ID: %llu", *(uint64_t *)(uintptr_t)recipe->content); - break; case MACH_VOUCHER_ATTR_KEY_IMPORTANCE: bufprintprefix(); bufprintf("IMPORTANCE INFO: %s", (char *)recipe->content); @@ -1737,7 +1808,7 @@ voucher_kvoucher_debug(mach_port_t task, mach_port_name_t voucher, char *buf, } else { bufprintprefix(); bufprintf("Invalid voucher: 0x%x\n", voucher); - } + } done: return offset; @@ -1916,6 +1987,12 @@ voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_inf } #endif // __has_include() +bool +voucher_process_can_use_arbitrary_personas(void) +{ + return false; +} + void _voucher_activity_debug_channel_init(void) { @@ -1935,8 +2012,8 @@ _voucher_init(void) void* voucher_activity_get_metadata_buffer(size_t *length) { - *length = 0; - return NULL; + *length = 0; + return NULL; } voucher_t diff --git a/src/voucher_internal.h b/src/voucher_internal.h index 37d0935ac..c50c36ca4 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -89,6 +89,7 @@ voucher_get_mach_voucher(voucher_t voucher); void _voucher_init(void); void _voucher_atfork_child(void); +void _voucher_atfork_parent(void); void _voucher_activity_debug_channel_init(void); #if OS_VOUCHER_ACTIVITY_SPI && OS_VOUCHER_ACTIVITY_GENERATE_SWAPS void _voucher_activity_swap(firehose_activity_id_t old_id, @@ -155,7 +156,7 @@ OS_ENUM(voucher_fields, uint16_t, typedef struct voucher_s { _OS_OBJECT_HEADER( - struct voucher_vtable_s *os_obj_isa, + struct voucher_vtable_s *__ptrauth_objc_isa_pointer os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); struct voucher_hash_entry_s { @@ -233,7 +234,7 @@ _voucher_hash_store_to_prev_ptr(uintptr_t prev_ptr, struct voucher_s *v) #if VOUCHER_ENABLE_RECIPE_OBJECTS typedef struct voucher_recipe_s { _OS_OBJECT_HEADER( - const _os_object_vtable_s *os_obj_isa, + const _os_object_vtable_s *__ptrauth_objc_isa_pointer os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); size_t vr_allocation_size; @@ -450,6 +451,10 @@ _voucher_get_activity_id(voucher_t v, uint64_t *creator_pid) void _voucher_task_mach_voucher_init(void* ctxt); extern dispatch_once_t _voucher_task_mach_voucher_pred; extern mach_voucher_t _voucher_task_mach_voucher; + +extern dispatch_once_t _voucher_process_can_use_arbitrary_personas_pred; +extern bool _voucher_process_can_use_arbitrary_personas; + #if VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER #define _voucher_default_task_mach_voucher MACH_VOUCHER_NULL #else diff --git a/src/workgroup.c b/src/workgroup.c new file mode 100644 index 000000000..ae47870f5 --- /dev/null +++ b/src/workgroup.c @@ -0,0 +1,1580 @@ +/* + * Copyright (c) 2019-2021 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#define PTHREAD_WORKGROUP_SPI 1 + +#include "internal.h" + +#include +#include +#include + +/* Declares struct symbols */ + +OS_OBJECT_CLASS_DECL(os_workgroup); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(os_workgroup, + (void (*)(_os_object_t))_os_workgroup_explicit_xref_dispose, + (void (*)(_os_object_t))_os_workgroup_dispose); +#endif // !USE_OBJC +#define WORKGROUP_CLASS OS_OBJECT_VTABLE(os_workgroup) + +OS_OBJECT_CLASS_DECL(os_workgroup_interval); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(os_workgroup_interval, + (void (*)(_os_object_t))_os_workgroup_interval_explicit_xref_dispose, + (void (*)(_os_object_t))_os_workgroup_interval_explicit_dispose); +#endif // USE_OBJC +#define WORKGROUP_INTERVAL_CLASS OS_OBJECT_VTABLE(os_workgroup_interval) + +OS_OBJECT_CLASS_DECL(os_workgroup_parallel); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(os_workgroup_parallel, + (void (*)(_os_object_t))_os_workgroup_explicit_xref_dispose, + (void (*)(_os_object_t))_os_workgroup_dispose); +#endif // USE_OBJC +#define WORKGROUP_PARALLEL_CLASS OS_OBJECT_VTABLE(os_workgroup_parallel) + +#pragma mark Internal functions + +/* These are default workgroup attributes to be used when no user attribute is + * passed in in creation APIs. + * + * For all classes, workgroup propagation is currently not supported. + * + * Class Default attribute Eventually supported + * + * os_workgroup_t propagating nonpropagating, propagating + * os_workgroup_interval_t nonpropagating nonpropagating, propagating + * os_workgroup_parallel_t nonpropagating nonpropagating + * + * Class Default attribute supported + * os_workgroup_t differentiated differentiated, undifferentiated + * os_workgroup_interval_t differentiated differentiated + * os_workgroup_parallel_t undifferentiated undifferentiated, differentiated + */ +static const struct os_workgroup_attr_s _os_workgroup_attr_default = { + .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, + .wg_type = OS_WORKGROUP_TYPE_DEFAULT, + .wg_attr_flags = 0, +}; + +static const struct os_workgroup_attr_s _os_workgroup_with_workload_id_attr_default = { + .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, + .wg_type = OS_WORKGROUP_TYPE_DEFAULT, + .wg_attr_flags = OS_WORKGROUP_ATTR_NONPROPAGATING, +}; + +static const struct os_workgroup_attr_s _os_workgroup_interval_attr_default = { + .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, + .wg_type = OS_WORKGROUP_INTERVAL_TYPE_DEFAULT, + .wg_attr_flags = OS_WORKGROUP_ATTR_NONPROPAGATING +}; + +static const struct os_workgroup_attr_s _os_workgroup_parallel_attr_default = { + .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, + .wg_type = OS_WORKGROUP_TYPE_PARALLEL, + .wg_attr_flags = OS_WORKGROUP_ATTR_NONPROPAGATING | + OS_WORKGROUP_ATTR_UNDIFFERENTIATED, +}; + +void +_os_workgroup_xref_dispose(os_workgroup_t wg) +{ + os_workgroup_arena_t arena = wg->wg_arena; + + if (arena == NULL) { + return; + } + + arena->destructor(arena->client_arena); + free(arena); +} + +void +_os_workgroup_interval_xref_dispose(os_workgroup_interval_t wgi) +{ + uint64_t wg_state = wgi->wg_state; + if (wg_state & OS_WORKGROUP_INTERVAL_STARTED) { + os_crash("BUG IN CLIENT: Releasing last reference to workgroup interval " + "while an interval has been started"); + } +} + +#if !USE_OBJC +void +_os_workgroup_explicit_xref_dispose(os_workgroup_t wg) +{ + _os_workgroup_xref_dispose(wg); + _os_object_release_internal(wg->_as_os_obj); +} + +void +_os_workgroup_interval_explicit_xref_dispose(os_workgroup_interval_t wgi) +{ + _os_workgroup_interval_xref_dispose(wgi); + _os_workgroup_explicit_xref_dispose(wgi->_as_wg); +} +#endif + +static inline bool +_os_workgroup_is_configurable(uint64_t wg_state) +{ + return (wg_state & OS_WORKGROUP_OWNER) == OS_WORKGROUP_OWNER; +} + +void +_os_workgroup_dispose(os_workgroup_t wg) +{ + dispatch_assert(wg->joined_cnt == 0); + + kern_return_t kr; + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (_os_workgroup_is_configurable(wg_state)) { + kr = work_interval_destroy(wg->wi); + } else { + kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, -1); + } + os_assumes(kr == KERN_SUCCESS); + if (wg_state & OS_WORKGROUP_LABEL_NEEDS_FREE) { + free((void *)wg->name); + } +} + +void +_os_workgroup_debug(os_workgroup_t wg, char *buf, size_t size) +{ + snprintf(buf, size, "wg[%p] = {xref = %d, ref = %d, name = %s}", + (void *) wg, wg->do_xref_cnt + 1, wg->do_ref_cnt + 1, wg->name); +} + +void +_os_workgroup_interval_dispose(os_workgroup_interval_t wgi) +{ + work_interval_instance_free(wgi->wii); +} + +#if !USE_OBJC +void +_os_workgroup_interval_explicit_dispose(os_workgroup_interval_t wgi) +{ + _os_workgroup_interval_dispose(wgi); + _os_workgroup_dispose(wgi->_as_wg); +} +#endif + +#define os_workgroup_inc_refcount(wg) \ + _os_object_retain_internal(wg->_as_os_obj); + +#define os_workgroup_dec_refcount(wg) \ + _os_object_release_internal(wg->_as_os_obj); + +void +_os_workgroup_tsd_cleanup(void *ctxt) /* Destructor for the tsd key */ +{ + os_workgroup_t wg = (os_workgroup_t) ctxt; + if (wg != NULL) { + char buf[512]; + snprintf(buf, sizeof(buf), "BUG IN CLIENT: Thread exiting without leaving workgroup '%s'", wg->name); + + os_crash(buf); + } +} + +static os_workgroup_t +_os_workgroup_get_current(void) +{ + return (os_workgroup_t) _dispatch_thread_getspecific(os_workgroup_key); +} + +static void +_os_workgroup_set_current(os_workgroup_t new_wg) +{ + if (new_wg != NULL) { + os_workgroup_inc_refcount(new_wg); + } + + os_workgroup_t old_wg = _os_workgroup_get_current(); + _dispatch_thread_setspecific(os_workgroup_key, new_wg); + + if (old_wg != NULL) { + os_workgroup_dec_refcount(old_wg); + } +} + +static inline bool +_os_workgroup_attr_is_resolved(os_workgroup_attr_t attr) +{ + return (attr->sig == _OS_WORKGROUP_ATTR_RESOLVED_INIT); +} + +static inline bool +_os_workgroup_client_attr_initialized(os_workgroup_attr_t attr) +{ + return (attr->sig == _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT) || + (attr->sig == _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT); +} + +static inline bool +_os_workgroup_attr_is_propagating(os_workgroup_attr_t attr) +{ + return (attr->wg_attr_flags & OS_WORKGROUP_ATTR_NONPROPAGATING) == 0; +} + +static inline bool +_os_workgroup_attr_is_differentiated(os_workgroup_attr_t attr) +{ + return (attr->wg_attr_flags & OS_WORKGROUP_ATTR_UNDIFFERENTIATED) == 0; +} + +static inline bool +_os_workgroup_type_is_interval_type(os_workgroup_type_t wg_type) +{ + return (wg_type >= OS_WORKGROUP_INTERVAL_TYPE_DEFAULT) && + (wg_type <= OS_WORKGROUP_INTERVAL_TYPE_ARKIT); +} + +static bool +_os_workgroup_type_is_audio_type(os_workgroup_type_t wg_type) +{ + return (wg_type == OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO) || + (wg_type == OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT); +} + +static inline bool +_os_workgroup_type_is_parallel_type(os_workgroup_type_t wg_type) +{ + return wg_type == OS_WORKGROUP_TYPE_PARALLEL; +} + +static inline bool +_os_workgroup_type_is_default_type(os_workgroup_type_t wg_type) +{ + return wg_type == OS_WORKGROUP_TYPE_DEFAULT; +} + + +static inline bool +_os_workgroup_has_backing_workinterval(os_workgroup_t wg) +{ + return wg->wi != NULL; +} + +static inline uint32_t +_wi_flags_to_wi_type(uint32_t wi_flags) +{ + return wi_flags & WORK_INTERVAL_TYPE_MASK; +} + +#if !TARGET_OS_SIMULATOR +static os_workgroup_type_t +_wi_flags_to_wg_type(uint32_t wi_flags) +{ + uint32_t type = _wi_flags_to_wi_type(wi_flags); + bool is_unrestricted = (wi_flags & WORK_INTERVAL_FLAG_UNRESTRICTED); + + switch (type) { + case WORK_INTERVAL_TYPE_DEFAULT: + /* Technically, this could be OS_WORKGROUP_INTERVAL_TYPE_DEFAULT + * as well but we can't know so we just assume it's a regular + * workgroup + */ + return OS_WORKGROUP_TYPE_DEFAULT; + case WORK_INTERVAL_TYPE_COREAUDIO: + return (is_unrestricted ? OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT : + OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO); + case WORK_INTERVAL_TYPE_COREANIMATION: + /* and WORK_INTERVAL_TYPE_CA_RENDER_SERVER */ + + /* We cannot distinguish between + * OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION and + * OS_WORKGROUP_INTERVAL_TYPE_CA_RENDER_SERVER since + * WORK_INTERVAL_TYPE_COREANIMATION and + * WORK_INTERVAL_TYPE_CA_RENDER_SERVER have the same value */ + return OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION; + case WORK_INTERVAL_TYPE_HID_DELIVERY: + return OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY; + case WORK_INTERVAL_TYPE_COREMEDIA: + return OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA; + case WORK_INTERVAL_TYPE_ARKIT: + return OS_WORKGROUP_INTERVAL_TYPE_ARKIT; + case WORK_INTERVAL_TYPE_CA_CLIENT: + return OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT; + default: + { + char buf[512]; + snprintf(buf, sizeof(buf), "BUG IN DISPATCH: Invalid wi flags = %u", wi_flags); + os_crash(buf); + } + } +} +#endif + +static uint32_t +_wg_type_to_wi_flags(os_workgroup_type_t wg_type) +{ + switch (wg_type) { + case OS_WORKGROUP_INTERVAL_TYPE_DEFAULT: + return WORK_INTERVAL_TYPE_DEFAULT | WORK_INTERVAL_FLAG_UNRESTRICTED; + case OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO: + return (WORK_INTERVAL_TYPE_COREAUDIO | + WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN | + WORK_INTERVAL_FLAG_ENABLE_DEFERRED_FINISH); + case OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION: + return WORK_INTERVAL_TYPE_COREANIMATION; + case OS_WORKGROUP_INTERVAL_TYPE_CA_RENDER_SERVER: + return WORK_INTERVAL_TYPE_CA_RENDER_SERVER; + case OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY: + return WORK_INTERVAL_TYPE_HID_DELIVERY; + case OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA: + return WORK_INTERVAL_TYPE_COREMEDIA; + case OS_WORKGROUP_INTERVAL_TYPE_ARKIT: + return (WORK_INTERVAL_TYPE_ARKIT | + WORK_INTERVAL_FLAG_FINISH_AT_DEADLINE); + case OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT: + return (WORK_INTERVAL_TYPE_COREAUDIO | WORK_INTERVAL_FLAG_UNRESTRICTED | + WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN | + WORK_INTERVAL_FLAG_ENABLE_DEFERRED_FINISH); + case OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT: + return WORK_INTERVAL_TYPE_CA_CLIENT | WORK_INTERVAL_FLAG_UNRESTRICTED; + case OS_WORKGROUP_TYPE_DEFAULT: + /* Non-interval workgroup types */ + return WORK_INTERVAL_FLAG_UNRESTRICTED; + default: + os_crash("Creating an os_workgroup of unknown type"); + } +} + +static inline uint32_t +_wg_type_to_wi_type(os_workgroup_type_t wg_type) +{ + return _wi_flags_to_wi_type(_wg_type_to_wi_flags(wg_type)); +} + +static inline int +_os_workgroup_get_wg_wi_types_from_port(mach_port_t port, + os_workgroup_type_t *out_wg_type, uint32_t *out_wi_type) +{ + os_workgroup_type_t wg_type = OS_WORKGROUP_TYPE_DEFAULT; + uint32_t wi_type = WORK_INTERVAL_TYPE_DEFAULT; + +#if !TARGET_OS_SIMULATOR + uint32_t wi_flags = 0; + int ret = work_interval_get_flags_from_port(port, &wi_flags); + if (ret != 0) { + return ret; + } + wg_type = _wi_flags_to_wg_type(wi_flags); + wi_type = _wi_flags_to_wi_type(wi_flags); +#else + (void)port; +#endif + + if (out_wg_type) *out_wg_type = wg_type; + if (out_wi_type) *out_wi_type = wi_type; + + return 0; +} + +static work_interval_t +_os_workgroup_create_work_interval(os_workgroup_attr_t attr) +{ + /* All workgroups are joinable */ + uint32_t flags = WORK_INTERVAL_FLAG_JOINABLE; + + flags |= _wg_type_to_wi_flags(attr->wg_type); + + if (_os_workgroup_attr_is_differentiated(attr)) { + flags |= WORK_INTERVAL_FLAG_GROUP; + } + + work_interval_t wi; + int rv = work_interval_create(&wi, flags); + if (rv) { + return NULL; + } + + return wi; +} + +struct os_workgroup_workload_id_table_entry_s { + const char* wl_id; + os_workgroup_type_t wl_type; +}; + +#if !TARGET_OS_SIMULATOR +static const struct os_workgroup_workload_id_table_entry_s + _os_workgroup_workload_id_table[] = { + { + .wl_id = "com.apple.coreaudio.hal.iothread", + .wl_type = OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO, + }, + { + .wl_id = "com.apple.coreaudio.hal.clientthread", + .wl_type = OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT, + }, +}; +#endif // !TARGET_OS_SIMULATOR + +static os_workgroup_type_t +_os_workgroup_lookup_type_from_workload_id(const char *workload_id) +{ + os_workgroup_type_t workload_type = OS_WORKGROUP_TYPE_DEFAULT; + + if (!workload_id) { + DISPATCH_CLIENT_CRASH(0, "Workload identifier must not be NULL"); + } +#if !TARGET_OS_SIMULATOR + for (size_t i = 0; i < countof(_os_workgroup_workload_id_table); i++) { + if (!strcasecmp(workload_id, _os_workgroup_workload_id_table[i].wl_id)){ + workload_type = _os_workgroup_workload_id_table[i].wl_type; + if (_os_workgroup_type_is_default_type(workload_type)) { + DISPATCH_INTERNAL_CRASH(i, "Invalid workload ID type"); + } + break; + } + } +#if OS_WORKGROUP_LOG_UKNOWN_WORKLOAD_ID + if (_os_workgroup_type_is_default_type(workload_type)) { + _dispatch_log("WARNING: os_workgroup: Unknown workload ID \"%s\"", + workload_id); + } +#endif +#endif // !TARGET_OS_SIMULATOR + return workload_type; +} + +static inline os_workgroup_attr_t +_os_workgroup_workload_id_attr_resolve(const char *workload_id, + os_workgroup_attr_t attr, + const os_workgroup_attr_s *default_attr) +{ + /* N.B: expects to be called with the attr pointer returned by + * _os_workgroup_client_attr_resolve() (i.e. a mutable local copy) */ + os_workgroup_type_t wl_type = + _os_workgroup_lookup_type_from_workload_id(workload_id); + if (_os_workgroup_type_is_default_type(wl_type)) { + /* Unknown workload ID, fallback to attribute type */ + return attr; + } + /* Require matching types between workload ID and attribute. + * Use workload ID type as the type implied by the default attribute */ + if (attr->wg_type == default_attr->wg_type) { + attr->wg_type = wl_type; + } else if (wl_type != attr->wg_type) { + /* Workload ID and attribute type mismatch */ + return NULL; + } + return attr; +} + +static inline bool +_os_workgroup_workload_id_is_valid_for_wi_type(const char *workload_id, + uint32_t wi_type) +{ + os_workgroup_type_t wl_type = + _os_workgroup_lookup_type_from_workload_id(workload_id); + if (_os_workgroup_type_is_default_type(wl_type)) { + /* Unknown workload ID, nothing to match */ + return true; + } + /* Require matching workinterval types between workload ID and passed in + * type of port or workgroup object. */ + if (_wg_type_to_wi_type(wl_type) != wi_type) { + return false; + } + return true; +} + +static inline bool +_os_workgroup_join_token_initialized(os_workgroup_join_token_t token) +{ + return (token->sig == _OS_WORKGROUP_JOIN_TOKEN_SIG_INIT); +} + +static inline void +_os_workgroup_set_name(os_workgroup_t wg, const char *name) +{ + if (name) { + const char *tmp = _dispatch_strdup_if_mutable(name); + if (tmp != name) { + wg->wg_state |= OS_WORKGROUP_LABEL_NEEDS_FREE; + name = tmp; + } + } + wg->name = name; +} + +static inline bool +_os_workgroup_client_attr_is_valid(os_workgroup_attr_t attr) +{ + return (attr && _os_workgroup_client_attr_initialized(attr)); +} + +static inline os_workgroup_attr_t +_os_workgroup_client_attr_resolve(os_workgroup_attr_t attr, + os_workgroup_attr_t client_attr, + const os_workgroup_attr_s *default_attr) +{ + if (client_attr == NULL) { + *attr = *default_attr; + } else { + if (!_os_workgroup_client_attr_is_valid(client_attr)) { + return NULL; + } + + // Make a local copy of the attr + *attr = *client_attr; + + switch (attr->sig) { + case _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT: + /* For any fields which are 0, we fill in with default values */ + if (attr->wg_attr_flags == 0) { + attr->wg_attr_flags = default_attr->wg_attr_flags; + } + if (attr->wg_type == 0) { + attr->wg_type = default_attr->wg_type; + } + break; + case _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT: + /* Nothing to do, the client built the attr up from scratch */ + break; + default: + return NULL; + } + + /* Mark it as resolved */ + attr->sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT; + } + + os_assert(_os_workgroup_attr_is_resolved(attr)); + return attr; +} + +static inline bool +_start_time_is_in_past(os_clockid_t clock, uint64_t start) +{ + switch (clock) { + case OS_CLOCK_MACH_ABSOLUTE_TIME: + return start <= mach_absolute_time(); + } +} + +struct os_workgroup_pthread_ctx_s { + os_workgroup_t wg; + void *(*start_routine)(void *); + void *arg; +}; + +static void * +_os_workgroup_pthread_start(void *wrapper_arg) +{ + struct os_workgroup_pthread_ctx_s *ctx = wrapper_arg; + os_workgroup_t wg = ctx->wg; + void *(*start_routine)(void *) = ctx->start_routine; + void *arg = ctx->arg; + + free(ctx); + + os_workgroup_join_token_s token; + int rc = os_workgroup_join(wg, &token); + if (rc != 0) { + DISPATCH_CLIENT_CRASH(rc, "pthread_start os_workgroup_join failed"); + } + + void *result = start_routine(arg); + + os_workgroup_leave(wg, &token); + os_workgroup_dec_refcount(wg); + + return result; +} + +static int +_os_workgroup_pthread_create_with_workgroup(pthread_t *thread, + os_workgroup_t wg, const pthread_attr_t *attr, + void *(*start_routine)(void *), void *arg) +{ + struct os_workgroup_pthread_ctx_s *ctx = _dispatch_calloc(1, sizeof(*ctx)); + + os_workgroup_inc_refcount(wg); + + ctx->wg = wg; + ctx->start_routine = start_routine; + ctx->arg = arg; + + int rc = pthread_create(thread, attr, _os_workgroup_pthread_start, ctx); + if (rc != 0) { + os_workgroup_dec_refcount(wg); + free(ctx); + } + + return rc; +} + +static const struct pthread_workgroup_functions_s _os_workgroup_pthread_functions = { + .pwgf_version = PTHREAD_WORKGROUP_FUNCTIONS_VERSION, + .pwgf_create_with_workgroup = _os_workgroup_pthread_create_with_workgroup, +}; + +void +_workgroup_init(void) +{ + pthread_install_workgroup_functions_np(&_os_workgroup_pthread_functions); +} + +#pragma mark Private functions + +int +os_workgroup_attr_set_interval_type(os_workgroup_attr_t attr, + os_workgroup_interval_type_t interval_type) +{ + int ret = 0; + if (_os_workgroup_client_attr_is_valid(attr) && + _os_workgroup_type_is_interval_type(interval_type)) { + attr->wg_type = interval_type; + } else { + ret = EINVAL; + } + return ret; +} + +int +os_workgroup_attr_set_flags(os_workgroup_attr_t attr, + os_workgroup_attr_flags_t flags) +{ + int ret = 0; + if (_os_workgroup_client_attr_is_valid(attr)) { + attr->wg_attr_flags = flags; + } else { + ret = EINVAL; + } + + return ret; +} + +os_workgroup_t +os_workgroup_interval_copy_current_4AudioToolbox(void) +{ + os_workgroup_t wg = _os_workgroup_get_current(); + + if (wg) { + if (_os_workgroup_type_is_audio_type(wg->wg_type)) { + wg = os_retain(wg); + } else { + wg = NULL; + } + } + + return wg; +} + +#pragma mark Public functions + +os_workgroup_t +os_workgroup_create(const char *name, os_workgroup_attr_t attr) +{ + os_workgroup_t wg = NULL; + work_interval_t wi = NULL; + + /* Resolve the input attributes */ + os_workgroup_attr_s wga; + attr = _os_workgroup_client_attr_resolve(&wga, attr, + &_os_workgroup_attr_default); + if (attr == NULL) { + errno = EINVAL; + return NULL; + } + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_default_type(attr->wg_type)) { + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroups yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + errno = ENOTSUP; + return NULL; + } + + wi = _os_workgroup_create_work_interval(attr); + if (wi == NULL) { + return NULL; + } + + wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + wg->wi = wi; + wg->wg_state = OS_WORKGROUP_OWNER; + wg->wg_type = attr->wg_type; + + _os_workgroup_set_name(wg, name); + + return wg; +} + +os_workgroup_interval_t +os_workgroup_interval_create(const char *name, os_clockid_t clock, + os_workgroup_attr_t attr) +{ + os_workgroup_interval_t wgi = NULL; + work_interval_t wi = NULL; + + /* Resolve the input attributes */ + os_workgroup_attr_s wga; + attr = _os_workgroup_client_attr_resolve(&wga, attr, + &_os_workgroup_interval_attr_default); + if (attr == NULL) { + errno = EINVAL; + return NULL; + } + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_interval_type(attr->wg_type)) { + errno = EINVAL; + return NULL; + } + + if (!_os_workgroup_attr_is_differentiated(attr)) { + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroup yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + errno = ENOTSUP; + return NULL; + } + + wi = _os_workgroup_create_work_interval(attr); + if (wi == NULL) { + return NULL; + } + + wgi = (os_workgroup_interval_t) _os_object_alloc(WORKGROUP_INTERVAL_CLASS, + sizeof(struct os_workgroup_interval_s)); + wgi->wi = wi; + wgi->clock = clock; + wgi->wii = work_interval_instance_alloc(wi); + wgi->wii_lock = OS_UNFAIR_LOCK_INIT; + wgi->wg_type = attr->wg_type; + wgi->wg_state = OS_WORKGROUP_OWNER; + + _os_workgroup_set_name(wgi->_as_wg, name); + + return wgi; +} + +os_workgroup_t +os_workgroup_create_with_workload_id(const char * name, + const char *workload_id, os_workgroup_attr_t attr) +{ + os_workgroup_t wg = NULL; + work_interval_t wi = NULL; + + const os_workgroup_attr_s *default_attr = + &_os_workgroup_with_workload_id_attr_default; + + /* Resolve the input attributes */ + os_workgroup_attr_s wga; + attr = _os_workgroup_client_attr_resolve(&wga, attr, default_attr); + if (attr == NULL) { + _os_workgroup_error_log("Invalid attribute pointer"); + errno = EINVAL; + return NULL; + } + + /* Resolve workload ID */ + attr = _os_workgroup_workload_id_attr_resolve(workload_id, attr, + default_attr); + if (attr == NULL) { + _os_workgroup_error_log("Mismatched workload ID and attribute " + "interval type: %s vs %hd", workload_id, wga.wg_type); + errno = EINVAL; + return NULL; + } + + /* Require default attribute flags. */ + if (attr->wg_attr_flags != default_attr->wg_attr_flags) { + _os_workgroup_error_log("Non-default attribute flags: 0x%x", + attr->wg_attr_flags); + errno = EINVAL; + return NULL; + } + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_default_type(attr->wg_type)) { + _os_workgroup_error_log("Non-default workload type: %s (%hd)", + workload_id, attr->wg_type); + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroups yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + _os_workgroup_error_log("Unsupported attribute flags: 0x%x", + attr->wg_attr_flags); + errno = ENOTSUP; + return NULL; + } + + wi = _os_workgroup_create_work_interval(attr); + if (wi == NULL) { + return NULL; + } + + wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + wg->wi = wi; + wg->wg_state = OS_WORKGROUP_OWNER; + wg->wg_type = attr->wg_type; + + _os_workgroup_set_name(wg, name); + + return wg; +} + +os_workgroup_interval_t +os_workgroup_interval_create_with_workload_id(const char *name, + const char *workload_id, os_clockid_t clock, os_workgroup_attr_t attr) +{ + os_workgroup_interval_t wgi = NULL; + work_interval_t wi = NULL; + + const os_workgroup_attr_s *default_attr = + &_os_workgroup_interval_attr_default; + + /* Resolve the input attributes */ + os_workgroup_attr_s wga; + attr = _os_workgroup_client_attr_resolve(&wga, attr, default_attr); + if (attr == NULL) { + _os_workgroup_error_log("Invalid attribute pointer"); + errno = EINVAL; + return NULL; + } + + /* Resolve workload ID */ + attr = _os_workgroup_workload_id_attr_resolve(workload_id, attr, + default_attr); + if (attr == NULL) { + _os_workgroup_error_log("Mismatched workload ID and attribute " + "interval type: %s vs %hd", workload_id, wga.wg_type); + errno = EINVAL; + return NULL; + } + + /* Require default attribute flags. */ + if (attr->wg_attr_flags != default_attr->wg_attr_flags) { + _os_workgroup_error_log("Non-default attribute flags: 0x%x", + attr->wg_attr_flags); + errno = EINVAL; + return NULL; + } + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_interval_type(attr->wg_type)) { + _os_workgroup_error_log("Invalid workload interval type: %s (%hd)", + workload_id, attr->wg_type); + errno = EINVAL; + return NULL; + } + + if (!_os_workgroup_attr_is_differentiated(attr)) { + _os_workgroup_error_log("Invalid attribute flags: 0x%x", + attr->wg_attr_flags); + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroup yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + _os_workgroup_error_log("Unsupported attribute flags: 0x%x", + attr->wg_attr_flags); + errno = ENOTSUP; + return NULL; + } + + wi = _os_workgroup_create_work_interval(attr); + if (wi == NULL) { + return NULL; + } + + wgi = (os_workgroup_interval_t) _os_object_alloc(WORKGROUP_INTERVAL_CLASS, + sizeof(struct os_workgroup_interval_s)); + wgi->wi = wi; + wgi->clock = clock; + wgi->wii = work_interval_instance_alloc(wi); + wgi->wii_lock = OS_UNFAIR_LOCK_INIT; + wgi->wg_type = attr->wg_type; + wgi->wg_state = OS_WORKGROUP_OWNER; + + _os_workgroup_set_name(wgi->_as_wg, name); + + return wgi; +} + +int +os_workgroup_join_self(os_workgroup_t wg, os_workgroup_join_token_t token, + os_workgroup_index * __unused id_out) +{ + return os_workgroup_join(wg, token); +} + +void +os_workgroup_leave_self(os_workgroup_t wg, os_workgroup_join_token_t token) +{ + return os_workgroup_leave(wg, token); +} + +#pragma mark Public functions + +os_workgroup_parallel_t +os_workgroup_parallel_create(const char *name, os_workgroup_attr_t attr) +{ + os_workgroup_parallel_t wgp = NULL; + + // Clients should only specify NULL attributes. + os_workgroup_attr_s wga; + if (attr == NULL) { + wga = _os_workgroup_parallel_attr_default; + attr = &wga; + } else { + // Make a local copy of the attr + if (!_os_workgroup_client_attr_is_valid(attr)) { + errno = EINVAL; + return NULL; + } + + wga = *attr; + attr = &wga; + + switch (attr->sig) { + case _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT: + { + /* For any fields which are 0, we fill in with default values */ + if (attr->wg_attr_flags == 0) { + attr->wg_attr_flags = _os_workgroup_parallel_attr_default.wg_attr_flags; + } + if (attr->wg_type == 0) { + attr->wg_type = _os_workgroup_parallel_attr_default.wg_type; + } + } + // Fallthrough + case _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT: + break; + default: + errno = EINVAL; + return NULL; + } + /* Mark it as resolved */ + attr->sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT; + } + + os_assert(_os_workgroup_attr_is_resolved(attr)); + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_parallel_type(attr->wg_type)) { + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroups yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + errno = ENOTSUP; + return NULL; + } + + wgp = (os_workgroup_t) _os_object_alloc(WORKGROUP_PARALLEL_CLASS, + sizeof(struct os_workgroup_parallel_s)); + wgp->wi = NULL; + wgp->wg_state = OS_WORKGROUP_OWNER; + wgp->wg_type = attr->wg_type; + + _os_workgroup_set_name(wgp, name); + + return wgp; +} + +int +os_workgroup_copy_port(os_workgroup_t wg, mach_port_t *mach_port_out) +{ + os_assert(wg != NULL); + os_assert(mach_port_out != NULL); + + *mach_port_out = MACH_PORT_NULL; + int rv = 0; + + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_CANCELED) { + return EINVAL; + } + + if (!_os_workgroup_has_backing_workinterval(wg)) { + return EINVAL; + } + + if (_os_workgroup_is_configurable(wg_state)) { + rv = work_interval_copy_port(wg->wi, mach_port_out); + if (rv < 0) { + rv = errno; + } + return rv; + } + + kern_return_t kr = mach_port_mod_refs(mach_task_self(), wg->port, + MACH_PORT_RIGHT_SEND, 1); + os_assumes(kr == KERN_SUCCESS); + *mach_port_out = wg->port; + return rv; +} + +os_workgroup_t +os_workgroup_create_with_port(const char *name, mach_port_t port) +{ + if (!MACH_PORT_VALID(port)) { + errno = EINVAL; + return NULL; + } + + os_workgroup_type_t wg_type; + int ret = _os_workgroup_get_wg_wi_types_from_port(port, &wg_type, NULL); + if (ret != 0) { + return NULL; + } + + os_workgroup_t wg = NULL; + wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + _os_workgroup_set_name(wg, name); + + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); + os_assumes(kr == KERN_SUCCESS); + wg->port = port; + wg->wg_type = wg_type; + + return wg; +} + +os_workgroup_t +os_workgroup_create_with_workload_id_and_port(const char *name, + const char *workload_id, mach_port_t port) +{ + if (!MACH_PORT_VALID(port)) { + _os_workgroup_error_log("Invalid mach port 0x%x", port); + errno = EINVAL; + return NULL; + } + + os_workgroup_type_t wg_type; + uint32_t wi_type; + int ret = _os_workgroup_get_wg_wi_types_from_port(port, &wg_type, &wi_type); + if (ret != 0) { + _os_workgroup_error_log("Invalid mach port 0x%x", port); + return NULL; + } + + /* Validate workload ID is compatible with port workinterval type */ + if (!_os_workgroup_workload_id_is_valid_for_wi_type(workload_id, wi_type)) { + _os_workgroup_error_log("Mismatched workload ID and port " + "interval type: %s vs %hd", workload_id, wg_type); + errno = EINVAL; + return NULL; + } + + os_workgroup_t wg = NULL; + wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + _os_workgroup_set_name(wg, name); + + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); + os_assumes(kr == KERN_SUCCESS); + wg->port = port; + wg->wg_type = wg_type; + + return wg; +} + +os_workgroup_t +os_workgroup_create_with_workgroup(const char *name, os_workgroup_t wg) +{ + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_CANCELED) { + errno = EINVAL; + return NULL; + } + + os_workgroup_t new_wg = NULL; + + new_wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + _os_workgroup_set_name(new_wg, name); + new_wg->wg_type = wg->wg_type; + + /* We intentionally don't copy the context */ + + if (_os_workgroup_has_backing_workinterval(wg)) { + + if (_os_workgroup_is_configurable(wg_state)) { + int rv = work_interval_copy_port(wg->wi, &new_wg->port); + + if (rv < 0) { + goto error; + } + } else { + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); + + if (kr != KERN_SUCCESS) { + goto error; + } + new_wg->port = wg->port; + } + } + + return new_wg; + +error: + wg_state = os_atomic_load(&new_wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_LABEL_NEEDS_FREE) { + free((void *)new_wg->name); + } + free(new_wg); + + return NULL; +} + +os_workgroup_t +os_workgroup_create_with_workload_id_and_workgroup(const char *name, + const char *workload_id, os_workgroup_t wg) +{ + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_CANCELED) { + _os_workgroup_error_log("Workgroup already cancelled"); + errno = EINVAL; + return NULL; + } + + /* Validate workload ID is compatible with workgroup workinterval type */ + if (!_os_workgroup_workload_id_is_valid_for_wi_type(workload_id, + _wg_type_to_wi_type(wg->wg_type))) { + _os_workgroup_error_log("Mismatched workload ID and workgroup " + "interval type: %s vs %hd", workload_id, wg->wg_type); + errno = EINVAL; + return NULL; + } + + os_workgroup_t new_wg = NULL; + + new_wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + _os_workgroup_set_name(new_wg, name); + new_wg->wg_type = wg->wg_type; + + /* We intentionally don't copy the context */ + + if (_os_workgroup_has_backing_workinterval(wg)) { + + if (_os_workgroup_is_configurable(wg_state)) { + int rv = work_interval_copy_port(wg->wi, &new_wg->port); + + if (rv < 0) { + _os_workgroup_error_log("Invalid workgroup work_interval"); + goto error; + } + } else { + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); + + if (kr != KERN_SUCCESS) { + _os_workgroup_error_log("Invalid workgroup port 0x%x", wg->port); + goto error; + } + new_wg->port = wg->port; + } + } + + return new_wg; + +error: + wg_state = os_atomic_load(&new_wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_LABEL_NEEDS_FREE) { + free((void *)new_wg->name); + } + free(new_wg); + + return NULL; +} + +int +os_workgroup_max_parallel_threads(os_workgroup_t wg, os_workgroup_mpt_attr_t __unused attr) +{ + os_assert(wg != NULL); + + qos_class_t qos = QOS_CLASS_USER_INTERACTIVE; + + switch (wg->wg_type) { + case OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO: + case OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT: + return pthread_time_constraint_max_parallelism(0); + default: + return pthread_qos_max_parallelism(qos, 0); + } +} + +int +os_workgroup_join(os_workgroup_t wg, os_workgroup_join_token_t token) +{ + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if (cur_wg) { + // We currently don't allow joining multiple workgroups at all, period + errno = EALREADY; + return errno; + } + + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_CANCELED) { + errno = EINVAL; + return errno; + } + + int rv = 0; + + if (_os_workgroup_has_backing_workinterval(wg)) { + if (_os_workgroup_is_configurable(wg_state)) { + rv = work_interval_join(wg->wi); + } else { + rv = work_interval_join_port(wg->port); + } + } + + if (rv) { + rv = errno; + return rv; + } + + os_atomic_inc(&wg->joined_cnt, relaxed); + + bzero(token, sizeof(struct os_workgroup_join_token_s)); + token->sig = _OS_WORKGROUP_JOIN_TOKEN_SIG_INIT; + + token->thread = _dispatch_thread_port(); + token->old_wg = cur_wg; /* should be null */ + token->new_wg = wg; + + _os_workgroup_set_current(wg); + return rv; +} + +void +os_workgroup_leave(os_workgroup_t wg, os_workgroup_join_token_t token) +{ + if (!_os_workgroup_join_token_initialized(token)) { + os_crash("Join token is corrupt"); + } + + if (token->thread != _dispatch_thread_port()) { + os_crash("Join token provided is for a different thread"); + } + + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if ((token->new_wg != cur_wg) || (cur_wg != wg)) { + os_crash("Join token provided is for a different workgroup than the " + "last one joined by thread"); + } + os_assert(token->old_wg == NULL); + + if (_os_workgroup_has_backing_workinterval(wg)) { + dispatch_assume(work_interval_leave() == 0); + } + uint32_t old_joined_cnt = os_atomic_dec_orig(&wg->joined_cnt, relaxed); + if (old_joined_cnt == 0) { + DISPATCH_INTERNAL_CRASH(0, "Joined count underflowed"); + } + _os_workgroup_set_current(NULL); +} + +int +os_workgroup_set_working_arena(os_workgroup_t wg, void * _Nullable client_arena, + uint32_t max_workers, os_workgroup_working_arena_destructor_t destructor) +{ + size_t arena_size; + // We overflowed, we can't allocate this + if (os_mul_and_add_overflow(sizeof(mach_port_t), max_workers, sizeof(struct os_workgroup_arena_s), &arena_size)) { + errno = ENOMEM; + return errno; + } + + os_workgroup_arena_t wg_arena = calloc(arena_size, 1); + if (wg_arena == NULL) { + errno = ENOMEM; + return errno; + } + wg_arena->max_workers = max_workers; + wg_arena->client_arena = client_arena; + wg_arena->destructor = destructor; + + _os_workgroup_atomic_flags old_state, new_state; + os_workgroup_arena_t old_arena = NULL; + + bool success = os_atomic_rmw_loop(&wg->wg_atomic_flags, old_state, new_state, relaxed, { + if (_wg_joined_cnt(old_state) > 0) { // We can't change the arena while it is in use + os_atomic_rmw_loop_give_up(break); + } + old_arena = _wg_arena(old_state); + + // Remove the old arena and put the new one in + new_state = old_state; + new_state &= ~OS_WORKGROUP_ARENA_MASK; + new_state |= (uint64_t) wg_arena; + }); + + if (!success) { + free(wg_arena); + errno = EBUSY; + return errno; + } + + if (old_arena) { + old_arena->destructor(old_arena->client_arena); + free(old_arena); + } + + return 0; +} + +void * +os_workgroup_get_working_arena(os_workgroup_t wg, os_workgroup_index *_Nullable index_out) +{ + if (_os_workgroup_get_current() != wg) { + os_crash("Thread is not a member of the workgroup"); + } + + /* At this point, we know that since this thread is a member of the wg, we + * won't have the arena replaced out from under us so we can modify it + * safely */ + dispatch_assert(wg->joined_cnt > 0); + + os_workgroup_arena_t arena = os_atomic_load(&wg->wg_arena, relaxed); + if (arena == NULL) { + return NULL; + } + + /* if the max_workers was 0 and the client wants an index, then they will + * fail */ + if (index_out != NULL && arena->max_workers == 0) { + os_crash("The arena associated with workgroup is not to be partitioned"); + } + + if (index_out) { + /* Find the index of the current thread in the arena */ + uint32_t found_index = 0; + bool found = false; + for (uint32_t i = 0; i < arena->max_workers; i++) { + if (arena->arena_indices[i] == _dispatch_thread_port()) { + found_index = i; + found = true; + break; + } + } + + if (!found) { + /* Current thread doesn't already have an index, give it one */ + found_index = os_atomic_inc_orig(&arena->next_worker_index, relaxed); + + if (found_index >= arena->max_workers) { + os_crash("Exceeded the maximum number of workers who can access the arena"); + } + arena->arena_indices[found_index] = _dispatch_thread_port(); + } + + *index_out = found_index; + } + + return arena->client_arena; +} + +void +os_workgroup_cancel(os_workgroup_t wg) +{ + os_atomic_or(&wg->wg_state, OS_WORKGROUP_CANCELED, relaxed); +} + +bool +os_workgroup_testcancel(os_workgroup_t wg) +{ + return os_atomic_load(&wg->wg_state, relaxed) & OS_WORKGROUP_CANCELED; +} + +int +os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, + uint64_t deadline, os_workgroup_interval_data_t __unused data) +{ + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if (cur_wg != wgi->_as_wg) { + os_crash("Thread is not a member of the workgroup"); + } + + if (deadline < start || (!_start_time_is_in_past(wgi->clock, start))) { + errno = EINVAL; + return errno; + } + + bool success = os_unfair_lock_trylock(&wgi->wii_lock); + if (!success) { + // Someone else is concurrently in a start, update or finish method. We + // can't make progress here + errno = EBUSY; + return errno; + } + + int rv = 0; + uint64_t old_state, new_state; + os_atomic_rmw_loop(&wgi->wg_state, old_state, new_state, relaxed, { + if (old_state & (OS_WORKGROUP_CANCELED | OS_WORKGROUP_INTERVAL_STARTED)) { + rv = EINVAL; + os_atomic_rmw_loop_give_up(break); + } + if (!_os_workgroup_is_configurable(old_state)) { + rv = EPERM; + os_atomic_rmw_loop_give_up(break); + } + new_state = old_state | OS_WORKGROUP_INTERVAL_STARTED; + }); + + if (rv) { + os_unfair_lock_unlock(&wgi->wii_lock); + errno = rv; + return rv; + } + + work_interval_instance_t wii = wgi->wii; + work_interval_instance_clear(wii); + + work_interval_instance_set_start(wii, start); + work_interval_instance_set_deadline(wii, deadline); + rv = work_interval_instance_start(wii); + if (rv != 0) { + /* If we failed to start the interval in the kernel, clear the started + * field */ + os_atomic_and(&wgi->wg_state, ~OS_WORKGROUP_INTERVAL_STARTED, relaxed); + } + + os_unfair_lock_unlock(&wgi->wii_lock); + + return rv; +} + +int +os_workgroup_interval_update(os_workgroup_interval_t wgi, uint64_t deadline, + os_workgroup_interval_data_t __unused data) +{ + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if (cur_wg != wgi->_as_wg) { + os_crash("Thread is not a member of the workgroup"); + } + + bool success = os_unfair_lock_trylock(&wgi->wii_lock); + if (!success) { + // Someone else is concurrently in a start, update or finish method. We + // can't make progress here + errno = EBUSY; + return errno; + } + + uint64_t wg_state = os_atomic_load(&wgi->wg_state, relaxed); + if (!_os_workgroup_is_configurable(wg_state)) { + os_unfair_lock_unlock(&wgi->wii_lock); + errno = EPERM; + return errno; + } + + /* Note: We allow updating and finishing an workgroup_interval that has + * already started even if the workgroup has been cancelled - since + * cancellation happens asynchronously and doesn't care about ongoing + * intervals. However a subsequent new interval cannot be started */ + if (!(wg_state & OS_WORKGROUP_INTERVAL_STARTED)) { + os_unfair_lock_unlock(&wgi->wii_lock); + errno = EINVAL; + return errno; + } + + work_interval_instance_t wii = wgi->wii; + work_interval_instance_set_deadline(wii, deadline); + int rv = work_interval_instance_update(wii); + if (rv != 0) { + rv = errno; + } + + os_unfair_lock_unlock(&wgi->wii_lock); + return rv; +} + +int +os_workgroup_interval_finish(os_workgroup_interval_t wgi, + os_workgroup_interval_data_t __unused data) +{ + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if (cur_wg != wgi->_as_wg) { + os_crash("Thread is not a member of the workgroup"); + } + + bool success = os_unfair_lock_trylock(&wgi->wii_lock); + if (!success) { + // Someone else is concurrently in a start, update or finish method. We + // can't make progress here + errno = EBUSY; + return errno; + } + + uint64_t wg_state = os_atomic_load(&wgi->wg_state, relaxed); + if (!_os_workgroup_is_configurable(wg_state)) { + os_unfair_lock_unlock(&wgi->wii_lock); + errno = EPERM; + return errno; + } + if (!(wg_state & OS_WORKGROUP_INTERVAL_STARTED)) { + os_unfair_lock_unlock(&wgi->wii_lock); + errno = EINVAL; + return errno; + } + + work_interval_instance_t wii = wgi->wii; + uint64_t current_finish = 0; + switch (wgi->clock) { + case OS_CLOCK_MACH_ABSOLUTE_TIME: + current_finish = mach_absolute_time(); + break; + } + + work_interval_instance_set_finish(wii, current_finish); + int rv = work_interval_instance_finish(wii); + if (rv != 0) { + rv = errno; + } else { + /* If we succeeded in finishing, clear the started bit */ + os_atomic_and(&wgi->wg_state, ~OS_WORKGROUP_INTERVAL_STARTED, relaxed); + } + + os_unfair_lock_unlock(&wgi->wii_lock); + return rv; +} diff --git a/src/workgroup_internal.h b/src/workgroup_internal.h new file mode 100644 index 000000000..e19df6467 --- /dev/null +++ b/src/workgroup_internal.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __OS_WORKGROUP_INTERNAL__ +#define __OS_WORKGROUP_INTERNAL__ + +#include +#include +#include +#include + +void _os_workgroup_xref_dispose(os_workgroup_t wg); +void _os_workgroup_dispose(os_workgroup_t wg); +void _os_workgroup_interval_xref_dispose(os_workgroup_interval_t wgi); +void _os_workgroup_interval_dispose(os_workgroup_interval_t wgi); +void _os_workgroup_debug(os_workgroup_t wg, char *buf, size_t size); + +#if !USE_OBJC +void _os_workgroup_explicit_xref_dispose(os_workgroup_t wg); +void _os_workgroup_interval_explicit_xref_dispose(os_workgroup_interval_t wgi); +void _os_workgroup_interval_explicit_dispose(os_workgroup_interval_t wgi); +#endif + +extern pthread_key_t _os_workgroup_key; +void _os_workgroup_tsd_cleanup(void *ctxt); + +void _workgroup_init(void); + +#if 1 || DISPATCH_DEBUG // log workload_id API adoption errors by default for now +#define OS_WORKGROUP_LOG_ERRORS 1 +#endif + +#if 1 || DISPATCH_DEBUG // log workload_id lookup failures by default for now +#define OS_WORKGROUP_LOG_UKNOWN_WORKLOAD_ID 1 +#endif + +#if OS_WORKGROUP_LOG_ERRORS +#define _os_workgroup_error_log(m, ...) \ + _dispatch_log("BUG IN CLIENT of %s: " m, __func__, ##__VA_ARGS__); +#else +#define _os_workgroup_error_log(m, ...) (void)m; +#endif + +/* + * os_workgroup_type_t is an internal representation that is a superset of types + * for various types of workgroups. Currently it only includes + * os_workgroup_interval_type_t and the types specified below + * + * Making the workgroup type uint16_t means that we have a total of 64k types + * which is plenty + */ +typedef uint16_t os_workgroup_type_t; +#define OS_WORKGROUP_TYPE_DEFAULT 0x0 +#define OS_WORKGROUP_TYPE_PARALLEL 0x40 + +/* To be set when the caller provided workgroup attribute has been expanded + * and resolved. */ +#define _OS_WORKGROUP_ATTR_RESOLVED_INIT 0x782618DA +struct os_workgroup_attr_s { + uint32_t sig; + uint32_t wg_attr_flags; + os_workgroup_type_t wg_type; + uint16_t empty; + uint32_t reserved[13]; +}; + +#define _OS_WORKGROUP_JOIN_TOKEN_SIG_INIT 0x4D5F5A58 +struct os_workgroup_join_token_s { + uint32_t sig; + mach_port_t thread; + os_workgroup_t old_wg; + os_workgroup_t new_wg; + uint64_t reserved[2]; +}; + +struct os_workgroup_interval_data_s { + uint32_t sig; + uint32_t reserved[14]; +}; + +/* This is lazily allocated if the arena is used by clients */ +typedef struct os_workgroup_arena_s { + void *client_arena; + os_workgroup_working_arena_destructor_t destructor; + uint32_t max_workers; /* Client specified max size */ + uint32_t next_worker_index; + mach_port_t arena_indices[0]; /* Dyanmic depending on max_workers */ +} *os_workgroup_arena_t; + +#define OS_WORKGROUP_OWNER (1 << 0) +#define OS_WORKGROUP_CANCELED (1 << 1) +#define OS_WORKGROUP_LABEL_NEEDS_FREE (1 << 2) +#define OS_WORKGROUP_INTERVAL_STARTED (1 << 3) + + +/* Note that os_workgroup_type_t doesn't have to be in the wg_atomic_flags, we + * just put it there to pack the struct. + * + * We have to put the arena related state in an atomic because the + * joined_cnt is modified in a real time context as part of os_workgroup_join + * and os_workgroup_leave(). We cannot have a lock and so it needs to all be + * part of a single _os_workgroup_atomic_flags sized atomic state */ + +#if !defined(__LP64__) || (__LP64_ && !defined(__arm64__)) +// For 32 bit watches (armv7), we can only do DCAS up to 64 bits so the union +// type is for uint64_t. +// +// 16 bits for tracking the type +// 16 bits for max number of threads which have joined a workgroup (64k is plenty) +// 32 bits for arena pointer +// ----- +// 64 bits +typedef uint64_t _os_workgroup_atomic_flags; + +typedef uint16_t os_joined_cnt_t; +#define OS_WORKGROUP_JOINED_COUNT_SHIFT 48 +#define OS_WORKGROUP_JOINED_COUNT_MASK (((uint64_t) 0xffff) << OS_WORKGROUP_JOINED_COUNT_SHIFT) +#define OS_WORKGROUP_ARENA_MASK 0xffffffffull + +#define OS_WORKGROUP_HEADER_INTERNAL \ + DISPATCH_UNION_LE(_os_workgroup_atomic_flags volatile wg_atomic_flags, \ + os_workgroup_arena_t wg_arena, \ + os_workgroup_type_t wg_type, \ + os_joined_cnt_t joined_cnt \ + ) +#else +// For all 64 bit systems (including arm64_32), we can do DCAS (or quad width +// CAS for arm64_32) so 128 bit union type works +// +// 16 bits for tracking the type +// 16 bits for empty +// 32 bits for max number of threads which have joined a workgroup +// 64 bits for arena pointer +// ----- +// 128 bits +typedef __uint128_t _os_workgroup_atomic_flags; + +typedef uint32_t os_joined_cnt_t; +#define OS_WORKGROUP_JOINED_COUNT_SHIFT 96 +#define OS_WORKGROUP_JOINED_COUNT_MASK (((__uint128_t) 0xffffffff) << OS_WORKGROUP_JOINED_COUNT_SHIFT) +#define OS_WORKGROUP_ARENA_MASK 0xffffffffffffffffull + +#define OS_WORKGROUP_HEADER_INTERNAL \ + DISPATCH_UNION_LE(_os_workgroup_atomic_flags volatile wg_atomic_flags, \ + os_workgroup_arena_t wg_arena, \ + os_workgroup_type_t wg_type, \ + const uint16_t empty, \ + os_joined_cnt_t joined_cnt \ + ) +#endif + +static inline os_joined_cnt_t +_wg_joined_cnt(_os_workgroup_atomic_flags wgaf) +{ + return (os_joined_cnt_t) (((wgaf & OS_WORKGROUP_JOINED_COUNT_MASK)) >> OS_WORKGROUP_JOINED_COUNT_SHIFT); +} + +static inline os_workgroup_arena_t +_wg_arena(_os_workgroup_atomic_flags wgaf) +{ + return (os_workgroup_arena_t) (wgaf & OS_WORKGROUP_ARENA_MASK); +} + +#define OS_WORKGROUP_HEADER \ + struct _os_object_s _as_os_obj[0]; \ + OS_OBJECT_STRUCT_HEADER(workgroup); \ + const char *name; \ + uint64_t volatile wg_state; \ + union { \ + work_interval_t wi; \ + mach_port_t port; \ + }; \ + OS_WORKGROUP_HEADER_INTERNAL; + +struct os_workgroup_s { + OS_WORKGROUP_HEADER +}; + +struct os_workgroup_interval_s { + struct os_workgroup_s _as_wg[0]; + OS_WORKGROUP_HEADER + os_clockid_t clock; + /* Needed to serialize updates to wii when there are multiple racey calls to + * os_workgroup_interval_update */ + os_unfair_lock wii_lock; + work_interval_instance_t wii; +}; + +struct os_workgroup_parallel_s { + OS_WORKGROUP_HEADER +}; + +_Static_assert(sizeof(struct os_workgroup_attr_s) == sizeof(struct os_workgroup_attr_opaque_s), + "Incorrect size of workgroup attribute structure"); +_Static_assert(sizeof(struct os_workgroup_join_token_s) == sizeof(struct os_workgroup_join_token_opaque_s), + "Incorrect size of workgroup join token structure"); +_Static_assert(sizeof(struct os_workgroup_interval_data_s) == sizeof(struct os_workgroup_interval_data_opaque_s), + "Incorrect size of workgroup interval data structure"); + +#endif /* __OS_WORKGROUP_INTERNAL__ */ diff --git a/xcodeconfig/libdispatch.clean b/xcodeconfig/libdispatch.clean index 25a5711a2..1e2bff8c9 100644 --- a/xcodeconfig/libdispatch.clean +++ b/xcodeconfig/libdispatch.clean @@ -22,8 +22,13 @@ __MergedGlobals __dispatch_bug.last_seen __dispatch_bug_deprecated.last_seen __dispatch_bug_kevent_client.last_seen -__dispatch_bug_kevent_client.last_seen.37 -__dispatch_bug_kevent_client.last_seen.39 +#if defined(__x86_64__) +__dispatch_bug_kevent_client.last_seen.44 +__dispatch_bug_kevent_client.last_seen.46 +#else +__dispatch_bug_kevent_client.last_seen.38 +__dispatch_bug_kevent_client.last_seen.40 +#endif __dispatch_bug_kevent_vanished.last_seen __dispatch_bug_mach_client.last_seen @@ -32,6 +37,7 @@ __dispatch_build __dispatch_child_of_unsafe_fork __dispatch_continuation_cache_limit +__dispatch_custom_workloop_root_queue __dispatch_data_empty __dispatch_host_time_data.0 __dispatch_host_time_data.1 diff --git a/xcodeconfig/libdispatch.dirty b/xcodeconfig/libdispatch.dirty index b10789292..53cd19f74 100644 --- a/xcodeconfig/libdispatch.dirty +++ b/xcodeconfig/libdispatch.dirty @@ -34,6 +34,8 @@ _OBJC_CLASS_$_OS_dispatch_queue_serial __OS_dispatch_queue_serial_vtable _OBJC_CLASS_$_OS_dispatch_queue_concurrent __OS_dispatch_queue_concurrent_vtable +_OBJC_CLASS_$_OS_dispatch_queue_cooperative +__OS_dispatch_queue_cooperative_vtable _OBJC_CLASS_$_OS_dispatch_queue_global __OS_dispatch_queue_global_vtable _OBJC_CLASS_$_OS_dispatch_queue_pthread_root @@ -63,6 +65,10 @@ __OS_dispatch_disk_vtable # os_object_t classes _OBJC_CLASS_$_OS_object _OBJC_CLASS_$_OS_voucher +_OBJC_CLASS_$_OS_os_eventlink +_OBJC_CLASS_$_OS_os_workgroup +_OBJC_CLASS_$_OS_os_workgroup_interval +_OBJC_CLASS_$_OS_os_workgroup_parallel #_OBJC_CLASS_$_OS_voucher_recipe # non-os_object_t classes _OBJC_CLASS_$_OS_dispatch_data @@ -75,6 +81,7 @@ _OBJC_METACLASS_$_OS_dispatch_queue _OBJC_METACLASS_$_OS_dispatch_workloop _OBJC_METACLASS_$_OS_dispatch_queue_serial _OBJC_METACLASS_$_OS_dispatch_queue_concurrent +_OBJC_METACLASS_$_OS_dispatch_queue_cooperative _OBJC_METACLASS_$_OS_dispatch_queue_global _OBJC_METACLASS_$_OS_dispatch_queue_pthread_root _OBJC_METACLASS_$_OS_dispatch_queue_main @@ -90,6 +97,10 @@ _OBJC_METACLASS_$_OS_dispatch_operation _OBJC_METACLASS_$_OS_dispatch_disk _OBJC_METACLASS_$_OS_object _OBJC_METACLASS_$_OS_voucher +_OBJC_METACLASS_$_OS_os_eventlink +_OBJC_METACLASS_$_OS_os_workgroup +_OBJC_METACLASS_$_OS_os_workgroup_interval +_OBJC_METACLASS_$_OS_os_workgroup_parallel #_OBJC_METACLASS_$_OS_voucher_recipe _OBJC_METACLASS_$_OS_dispatch_data _OBJC_METACLASS_$_OS_dispatch_data_empty @@ -121,9 +132,11 @@ __dispatch_logv_pred __dispatch_mach_calendar_pred __dispatch_mach_host_port_pred __dispatch_mach_notify_port_pred +__dispatch_mach_notify_unote __dispatch_mach_xpc_hooks __dispatch_main_heap __dispatch_main_q_handle_pred +__dispatch_memorypressure_source __dispatch_mgr_sched_pred __dispatch_queue_serial_numbers __dispatch_root_queues_pred @@ -133,6 +146,7 @@ __firehose_task_buffer_pred __voucher_activity_debug_channel __voucher_libtrace_hooks __voucher_task_mach_voucher_pred +__voucher_process_can_use_arbitrary_personas_pred # 32bits __dispatch_mach_host_port @@ -154,3 +168,5 @@ __dispatch_io_fds __dispatch_io_devs_lockq __dispatch_io_fds_lockq __dispatch_io_init_pred +__voucher_activity_disabled.disabled +__voucher_process_can_use_arbitrary_personas diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index 8ea917e20..c61d6b280 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -34,6 +34,8 @@ _OBJC_CLASS_$_OS_dispatch_queue_serial __OS_dispatch_queue_serial_vtable _OBJC_CLASS_$_OS_dispatch_queue_concurrent __OS_dispatch_queue_concurrent_vtable +_OBJC_CLASS_$_OS_dispatch_queue_cooperative +__OS_dispatch_queue_cooperative_vtable _OBJC_CLASS_$_OS_dispatch_queue_global __OS_dispatch_queue_global_vtable _OBJC_CLASS_$_OS_dispatch_queue_pthread_root @@ -63,6 +65,10 @@ __OS_dispatch_disk_vtable # os_object_t classes _OBJC_CLASS_$_OS_object _OBJC_CLASS_$_OS_voucher +_OBJC_CLASS_$_OS_os_eventlink +_OBJC_CLASS_$_OS_os_workgroup +_OBJC_CLASS_$_OS_os_workgroup_interval +_OBJC_CLASS_$_OS_os_workgroup_parallel #_OBJC_CLASS_$_OS_voucher_recipe # non-os_object_t classes _OBJC_CLASS_$_OS_dispatch_data @@ -75,6 +81,7 @@ _OBJC_METACLASS_$_OS_dispatch_queue _OBJC_METACLASS_$_OS_dispatch_workloop _OBJC_METACLASS_$_OS_dispatch_queue_serial _OBJC_METACLASS_$_OS_dispatch_queue_concurrent +_OBJC_METACLASS_$_OS_dispatch_queue_cooperative _OBJC_METACLASS_$_OS_dispatch_queue_global _OBJC_METACLASS_$_OS_dispatch_queue_pthread_root _OBJC_METACLASS_$_OS_dispatch_queue_main @@ -90,6 +97,10 @@ _OBJC_METACLASS_$_OS_dispatch_operation _OBJC_METACLASS_$_OS_dispatch_disk _OBJC_METACLASS_$_OS_object _OBJC_METACLASS_$_OS_voucher +_OBJC_METACLASS_$_OS_os_eventlink +_OBJC_METACLASS_$_OS_os_workgroup +_OBJC_METACLASS_$_OS_os_workgroup_interval +_OBJC_METACLASS_$_OS_os_workgroup_parallel #_OBJC_METACLASS_$_OS_voucher_recipe _OBJC_METACLASS_$_OS_dispatch_data _OBJC_METACLASS_$_OS_dispatch_data_empty diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index 2e97d81f6..48f35f27a 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -18,166 +18,6 @@ // @APPLE_APACHE_LICENSE_HEADER_END@ // -#include "/Makefiles/CoreOS/Xcode/BSD.xcconfig" -#include "/AppleInternal/XcodeConfig/PlatformSupport.xcconfig" - -SDKROOT = macosx.internal -SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator -PRODUCT_NAME = libdispatch -EXECUTABLE_PREFIX = - -SDK_INSTALL_VARIANT = $(SDK_INSTALL_VARIANT_$(DRIVERKIT)) -SDK_INSTALL_VARIANT_1 = driverkit -SDK_INSTALL_VARIANT_ = default -SDK_INSTALL_ROOT = $(SDK_INSTALL_ROOT_$(SDK_INSTALL_VARIANT)) -SDK_INSTALL_ROOT_driverkit = $(DRIVERKITROOT) -SDK_INSTALL_HEADERS_ROOT = $(SDK_INSTALL_HEADERS_ROOT_$(SDK_INSTALL_VARIANT)) -SDK_INSTALL_HEADERS_ROOT_driverkit = $(SDK_INSTALL_ROOT)/$(SDK_RUNTIME_HEADERS_PREFIX) -SDK_RUNTIME_HEADERS_PREFIX = Runtime - -INSTALL_PATH = $(SDK_INSTALL_ROOT)/usr/lib/system -PUBLIC_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/include/dispatch -PRIVATE_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/local/include/dispatch -OS_PUBLIC_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/include/os -OS_PRIVATE_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/local/include/os -HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/src -LIBRARY_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_ROOT)/usr/lib/system $(SDKROOT)/$(SDK_INSTALL_ROOT)/usr/local/lib -SYSTEM_HEADER_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/local/include $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/include -SYSTEM_FRAMEWORK_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks - -INSTALLHDRS_SCRIPT_PHASE = YES -ALWAYS_SEARCH_USER_PATHS = NO -USE_HEADERMAP = NO -BUILD_VARIANTS = normal debug profile - -ONLY_ACTIVE_ARCH = NO -CLANG_LINK_OBJC_RUNTIME = NO -GCC_C_LANGUAGE_STANDARD = gnu11 -CLANG_CXX_LANGUAGE_STANDARD = gnu++11 -ENABLE_STRICT_OBJC_MSGSEND = YES -GCC_ENABLE_CPP_EXCEPTIONS = NO -GCC_STRICT_ALIASING = YES -GCC_SYMBOLS_PRIVATE_EXTERN = YES -GCC_ENABLE_PASCAL_STRINGS = NO -GCC_WARN_SHADOW = YES -GCC_WARN_64_TO_32_BIT_CONVERSION = YES -GCC_WARN_ABOUT_RETURN_TYPE = YES -GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES -GCC_WARN_ABOUT_MISSING_NEWLINE = YES -GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS = YES -GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES -GCC_WARN_SIGN_COMPARE = YES -GCC_WARN_STRICT_SELECTOR_MATCH = YES -GCC_WARN_UNDECLARED_SELECTOR = YES -GCC_WARN_UNINITIALIZED_AUTOS = YES -GCC_WARN_UNKNOWN_PRAGMAS = YES -GCC_WARN_UNUSED_FUNCTION = YES -GCC_WARN_UNUSED_LABEL = YES -GCC_WARN_UNUSED_PARAMETER = YES -GCC_WARN_UNUSED_VARIABLE = YES -CLANG_WARN_ASSIGN_ENUM = YES -CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES -CLANG_WARN_DOCUMENTATION_COMMENTS = YES -CLANG_WARN__DUPLICATE_METHOD_MATCH = YES -CLANG_WARN_EMPTY_BODY = YES -CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES -CLANG_WARN_INFINITE_RECURSION = YES -CLANG_WARN_OBJC_IMPLICIT_ATOMIC_PROPERTIES = YES -CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS = YES -CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES -CLANG_WARN_SUSPICIOUS_MOVE = YES -CLANG_WARN_UNREACHABLE_CODE = YES -CLANG_WARN_UNGUARDED_AVAILABILITY = YES -GCC_TREAT_WARNINGS_AS_ERRORS = YES -GCC_OPTIMIZATION_LEVEL = s -GCC_NO_COMMON_BLOCKS = YES -GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY=1 -GCC_PREPROCESSOR_DEFINITIONS[sdk=driverkit*] = $(GCC_PREPROCESSOR_DEFINITIONS) USE_OBJC=0 -STATICLIB_PREPROCESSOR_DEFINITIONS = DISPATCH_VARIANT_STATIC=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 - -WARNING_CFLAGS = - -// warnings we want -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wall -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wextra -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wmost -WARNING_CFLAGS = $(WARNING_CFLAGS) -Warray-bounds-pointer-arithmetic -WARNING_CFLAGS = $(WARNING_CFLAGS) -Watomic-properties -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wcomma -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wconditional-uninitialized -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wcovered-switch-default -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wdate-time -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wdeprecated -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wdouble-promotion -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wduplicate-enum -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wexpansion-to-defined -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wfloat-equal -WARNING_CFLAGS = $(WARNING_CFLAGS) -Widiomatic-parentheses -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wignored-qualifiers -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wnullable-to-nonnull-conversion -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wobjc-interface-ivars -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wover-aligned -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wpacked -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wpointer-arith -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wselector -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wstatic-in-inline -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wsuper-class-method-mismatch -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wswitch -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wtautological-compare -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wunused - -// silenced warnings -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unknown-warning-option -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-pedantic -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-bad-function-cast -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-c++-compat -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-c++98-compat -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-c++98-compat-pedantic -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-cast-align -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-cast-qual -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-disabled-macro-expansion -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-documentation-unknown-command -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-format-nonliteral -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-missing-variable-declarations -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-old-style-cast -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-padded -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-reserved-id-macro -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-shift-sign-overflow -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-undef -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unreachable-code-aggressive -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unused-macros -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-used-but-marked-unused -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-vla -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unguarded-availability-new -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-switch-enum // -Wswitch is enough, this forces explicit listing of all cases mandatory - -OTHER_CFLAGS = -fverbose-asm $(PLATFORM_CFLAGS) -OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions -OTHER_CFLAGS_normal = -momit-leaf-frame-pointer -OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 -DDISPATCH_PERF_MON=1 -OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 -DOS_DEBUG=1 -GENERATE_PROFILING_CODE = NO -DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) -SIM_SUFFIX[sdk=*simulator*] = _sim -DYLIB_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem$(SIM_SUFFIX)_kernel -lsystem$(SIM_SUFFIX)_platform -lsystem$(SIM_SUFFIX)_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -UNWIND_LDFLAGS = -lunwind -UNWIND_LDFLAGS[sdk=driverkit*] = -OBJC_LDFLAGS = -Wl,-upward-lobjc -OBJC_LDFLAGS[sdk=driverkit*] = -LIBDARWIN_LDFLAGS = -Wl,-upward-lsystem_darwin -LIBDARWIN_LDFLAGS[sdk=*simulator*] = -LIBDARWIN_LDFLAGS[sdk=driverkit*] = -ORDER_LDFLAGS = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-dirty_data_list,$(SRCROOT)/xcodeconfig/libdispatch.dirty -ORDER_LDFLAGS[sdk=macosx*] = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -ORDER_LDFLAGS[sdk=driverkit*] = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -ALIASES_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(DYLIB_LDFLAGS) $(LIBDARWIN_LDFLAGS) $(CR_LDFLAGS) $(UNWIND_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) $(ORDER_LDFLAGS) -OTHER_MIGFLAGS = -novouchers -I$(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks/System.framework/PrivateHeaders -I${SDKROOT}/${SDK_INSTALL_HEADERS_ROOT}/usr/include -I${SDKROOT}/${SDK_INSTALL_HEADERS_ROOT}/usr/local/include - -OBJC_SOURCE_FILE_NAMES = *.m -EXCLUDED_SOURCE_FILE_NAMES = $(EXCLUDED_SOURCE_FILE_NAMES_$(SDK_INSTALL_VARIANT)) -EXCLUDED_SOURCE_FILE_NAMES_driverkit = $(EXCLUDED_SOURCE_FILE_NAMES_default) $(OBJC_SOURCE_FILE_NAMES) - COPY_HEADERS_RUN_UNIFDEF = YES COPY_HEADERS_UNIFDEF_FLAGS = -U__DISPATCH_BUILDING_DISPATCH__ -U__linux__ -DTARGET_OS_WIN32=0 -U__ANDROID__ diff --git a/xcodescripts/install-headers.sh b/xcodescripts/install-headers.sh index 1fb149b63..212bf74ab 100755 --- a/xcodescripts/install-headers.sh +++ b/xcodescripts/install-headers.sh @@ -25,8 +25,21 @@ fi mkdir -p "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" || true mkdir -p "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" || true + cp -X "${SCRIPT_INPUT_FILE_1}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_2}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_3}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_4}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_5}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_2}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_3}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_4}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_5}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_6}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_7}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" + + +cp -X "${SCRIPT_INPUT_FILE_8}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_9}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_10}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_11}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_12}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_13}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_14}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_15}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" diff --git a/xcodescripts/postprocess-headers.sh b/xcodescripts/postprocess-headers.sh index 41f466939..c521fbe96 100755 --- a/xcodescripts/postprocess-headers.sh +++ b/xcodescripts/postprocess-headers.sh @@ -19,3 +19,8 @@ # @APPLE_APACHE_LICENSE_HEADER_END@ # + +unifdef ${COPY_HEADERS_UNIFDEF_FLAGS} -o "${SCRIPT_INPUT_FILE_1}" "${SCRIPT_INPUT_FILE_1}" || true +unifdef ${COPY_HEADERS_UNIFDEF_FLAGS} -o "${SCRIPT_INPUT_FILE_2}" "${SCRIPT_INPUT_FILE_2}" || true +unifdef ${COPY_HEADERS_UNIFDEF_FLAGS} -o "${SCRIPT_INPUT_FILE_3}" "${SCRIPT_INPUT_FILE_3}" || true +unifdef ${COPY_HEADERS_UNIFDEF_FLAGS} -o "${SCRIPT_INPUT_FILE_4}" "${SCRIPT_INPUT_FILE_4}" || true